From 92d4af366c680726445538c80eec6b2f55678490 Mon Sep 17 00:00:00 2001
From: Mehdi Dogguy <mehdi@debian.org>
Date: Mon, 8 Sep 2014 21:40:08 +0200
Subject: [PATCH] Imported Upstream version 2.3.2

---
 BUILD.NOTES                                   |  107 +-
 COPYING                                       |    8 +-
 DISCLAIMER                                    |  151 +-
 META                                          |    8 +-
 Makefile.am                                   |    7 +-
 Makefile.in                                   |   17 +-
 NEWS                                          | 5226 ++---------------
 README => README.rst                          |    5 +-
 RELEASE_NOTES                                 |  440 +-
 RELEASE_NOTES_LLNL                            |   51 +-
 aclocal.m4                                    |    6 +-
 auxdir/Makefile.am                            |   10 +-
 auxdir/Makefile.in                            |   18 +-
 auxdir/ltmain.sh                              |    4 +-
 auxdir/slurm.m4                               |   23 +-
 auxdir/x_ac_bluegene.m4                       |   71 +-
 auxdir/x_ac_cray.m4                           |  103 +-
 auxdir/x_ac_debug.m4                          |   19 +
 auxdir/x_ac_lua.m4                            |    6 +-
 auxdir/x_ac_man2html.m4                       |   23 +
 auxdir/x_ac_munge.m4                          |    8 +-
 auxdir/x_ac_srun.m4                           |   32 +
 auxdir/x_ac_sun_const.m4                      |    2 +-
 config.h.in                                   |   28 +-
 configure                                     | 1396 ++++-
 configure.ac                                  |   62 +-
 contribs/Makefile.am                          |    2 +-
 contribs/Makefile.in                          |   10 +-
 contribs/README                               |   43 +
 contribs/arrayrun/Makefile.am                 |    4 +
 contribs/arrayrun/Makefile.in                 |  475 ++
 contribs/arrayrun/README                      |  132 +
 contribs/arrayrun/arrayrun                    |   69 +
 contribs/arrayrun/arrayrun_worker             |  255 +
 contribs/cray/Makefile.am                     |   40 +
 contribs/cray/Makefile.in                     |  532 ++
 contribs/cray/etc_init_d_munge                |  559 ++
 contribs/cray/etc_sysconfig_slurm             |   24 +
 contribs/cray/libalps_test_programs.tar.gz    |  Bin 0 -> 345034 bytes
 contribs/cray/munge_build_script.sh           |   81 +
 contribs/cray/opt_modulefiles_slurm           |   48 +
 contribs/cray/pam_job.c                       |  117 +
 contribs/cray/slurm-build-script.sh           |  144 +
 contribs/cray/srun.pl                         | 1103 ++++
 contribs/env_cache_builder.c                  |    2 +-
 contribs/lua/Makefile.am                      |    4 +
 contribs/lua/Makefile.in                      |  475 ++
 contribs/lua/job_submit.license.lua           |  105 +
 contribs/lua/job_submit.lua                   |   14 +-
 contribs/pam/Makefile.in                      |    8 +
 contribs/pam/pam_slurm.c                      |    7 +-
 contribs/perlapi/Makefile.in                  |    8 +
 contribs/perlapi/libslurm/Makefile.in         |    8 +
 contribs/perlapi/libslurm/perl/alloc.c        |   18 +-
 contribs/perlapi/libslurm/perl/block.c        |   86 +-
 contribs/perlapi/libslurm/perl/job.c          |    4 +
 contribs/perlapi/libslurm/perl/step.c         |    8 +-
 contribs/perlapi/libslurmdb/Makefile.in       |    8 +
 contribs/perlapi/libslurmdb/perl/Slurmdb.pm   |    4 +-
 contribs/phpext/Makefile.am                   |   14 +-
 contribs/phpext/Makefile.in                   |   19 +-
 contribs/phpext/slurm_php/AUTHORS             |    2 +
 contribs/phpext/slurm_php/DISCLAIMER          |   20 +
 contribs/phpext/slurm_php/LICENSE             |  339 ++
 contribs/phpext/slurm_php/README              |   48 +
 contribs/phpext/slurm_php/RELEASE_NOTES       |   38 +
 contribs/phpext/slurm_php/config.m4.in        |   48 +-
 contribs/phpext/slurm_php/slurm_php.c         |  911 ++-
 contribs/phpext/slurm_php/slurm_php.h         |  379 +-
 .../tests/slurm_array_to_hostlist_basic.phpt  |   29 +
 .../tests/slurm_array_to_hostlist_error.phpt  |   30 +
 ..._get_control_configuration_keys_basic.phpt |   35 +
 ...et_control_configuration_values_basic.phpt |   39 +
 .../slurm_get_node_element_by_name_basic.phpt |   34 +
 .../slurm_get_node_element_by_name_error.phpt |   24 +
 .../tests/slurm_get_node_elements_basic.phpt  |   33 +
 .../tests/slurm_get_node_names_basic.phpt     |   29 +
 .../slurm_get_node_state_by_name_basic.phpt   |   41 +
 .../slurm_get_node_state_by_name_error.phpt   |   24 +
 .../tests/slurm_get_node_states_basic.phpt    |   28 +
 .../slurm_get_partition_node_names_basic.phpt |   17 +
 .../slurm_get_partition_node_names_error.phpt |   30 +
 ...urm_get_specific_partition_info_basic.phpt |   37 +
 ...urm_get_specific_partition_info_error.phpt |   30 +
 .../tests/slurm_hostlist_to_array_basic.phpt  |   27 +
 .../tests/slurm_hostlist_to_array_error.phpt  |   24 +
 .../slurm_load_job_information_basic.phpt     |   32 +
 .../slurm_load_partition_jobs_basic.phpt      |   36 +
 .../slurm_load_partition_jobs_error.phpt      |   34 +
 .../slurm_php/tests/slurm_ping_basic.phpt     |   14 +
 .../slurm_php/tests/slurm_ping_error.phpt     |   35 +
 .../slurm_print_partition_names_basic.phpt    |   33 +
 .../tests/slurm_slurmd_status_basic.phpt      |   26 +
 .../slurm_php/tests/slurm_version_basic.phpt  |   30 +
 contribs/sjobexit/Makefile.in                 |    8 +
 contribs/sjobexit/sjobexitmod.pl              |   12 +-
 contribs/sjstat                               |  138 +-
 contribs/slurmdb-direct/Makefile.in           |    8 +
 contribs/time_login.c                         |    2 +-
 contribs/torque/Makefile.in                   |    8 +
 contribs/torque/mpiexec.pl                    |    2 +-
 contribs/torque/pbsnodes.pl                   |    2 +-
 contribs/torque/qdel.pl                       |    2 +-
 contribs/torque/qhold.pl                      |    2 +-
 contribs/torque/qrls.pl                       |    2 +-
 contribs/torque/qstat.pl                      |    2 +-
 contribs/torque/qsub.pl                       |   18 +-
 contribs/web_apps/chart_stats.cgi             |    2 +-
 doc/Makefile.in                               |    8 +
 doc/html/Makefile.am                          |   61 +-
 doc/html/Makefile.in                          |   66 +-
 doc/html/accounting.shtml                     |   19 +-
 doc/html/accounting_storageplugins.shtml      |   20 +-
 doc/html/big_sys.shtml                        |    2 +-
 doc/html/bluegene.shtml                       |  310 +-
 doc/html/bull.jpg                             |  Bin 0 -> 1555 bytes
 doc/html/checkpoint_plugins.shtml             |    1 -
 doc/html/configurator.html.in                 |   64 +-
 doc/html/cpu_management.shtml                 | 3543 +++++++++++
 doc/html/cray.shtml                           |  747 ++-
 doc/html/disclaimer.shtml                     |   90 +
 doc/html/dist_plane.shtml                     |    4 +-
 doc/html/documentation.shtml                  |   13 +-
 doc/html/download.shtml                       |   51 +-
 doc/html/faq.shtml                            |  157 +-
 doc/html/footer.txt                           |   21 +-
 doc/html/gang_scheduling.shtml                |    5 +-
 doc/html/gres.shtml                           |   32 +-
 doc/html/gres_design.shtml                    |  126 +
 doc/html/header.txt                           |   12 +-
 doc/html/help.shtml                           |   11 +-
 doc/html/job_exit_code.shtml                  |   28 +-
 doc/html/job_launch.shtml                     |  140 +
 doc/html/job_submit_plugins.shtml             |   19 +-
 doc/html/jobacct_gatherplugins.shtml          |   10 +-
 doc/html/man_index.shtml                      |   43 +
 doc/html/mc_support.shtml                     |   24 +-
 doc/html/meetings.shtml                       |   20 +
 doc/html/mpi_guide.shtml                      |    2 +-
 doc/html/news.shtml                           |   17 +-
 doc/html/overview.shtml                       |   13 +-
 doc/html/platforms.shtml                      |   11 +-
 doc/html/power_save.shtml                     |    4 +-
 doc/html/preempt.shtml                        |   24 +-
 doc/html/priority_multifactor.shtml           |   19 +-
 doc/html/proctrack_plugins.shtml              |   37 +-
 doc/html/publications.shtml                   |   94 +-
 doc/html/qos.shtml                            |    2 +
 doc/html/quickstart.shtml                     |    4 +-
 doc/html/quickstart_admin.shtml               |   12 +-
 doc/html/registration.shtml                   |   78 +
 doc/html/reservations.shtml                   |   45 +-
 doc/html/resource_limits.shtml                |   81 +-
 doc/html/review_release.html                  |    9 +-
 doc/html/select_design.shtml                  |  104 +
 doc/html/selectplugins.shtml                  |  555 +-
 doc/html/slurm.shtml                          |   36 +-
 doc/html/slurm_banner.gif                     |  Bin 23043 -> 17819 bytes
 doc/html/slurm_ug_2010/agenda.htm             |    2 +-
 doc/html/slurm_ug_2010/contact.htm            |    2 +-
 doc/html/slurm_ug_2010/directions.htm         |    2 +-
 doc/html/slurm_ug_2010/index.htm              |    4 +-
 doc/html/slurm_ug_2010/registration.htm       |    2 +-
 doc/html/slurm_ug_2010/submission.htm         |    2 +-
 doc/html/slurm_ug_agenda.shtml                |  462 ++
 doc/html/slurm_ug_cfp.shtml                   |   41 +
 doc/html/slurm_ug_registration.shtml          |  111 +
 doc/html/sun_const.shtml                      |    4 +-
 doc/html/taskplugins.shtml                    |   18 +-
 doc/html/team.shtml                           |   35 +-
 doc/html/testimonials.shtml                   |   38 +-
 doc/html/topology.shtml                       |   14 +-
 doc/html/topology_plugin.shtml                |   12 +-
 doc/man/Makefile.am                           |    8 +
 doc/man/Makefile.in                           |   16 +
 doc/man/man1/sacct.1                          |   64 +-
 doc/man/man1/sacctmgr.1                       |   81 +-
 doc/man/man1/salloc.1                         |   77 +-
 doc/man/man1/sattach.1                        |    8 +-
 doc/man/man1/sbatch.1                         |   80 +-
 doc/man/man1/sbcast.1                         |    2 +-
 doc/man/man1/scancel.1                        |    8 +-
 doc/man/man1/scontrol.1                       |  194 +-
 doc/man/man1/sinfo.1                          |   33 +-
 doc/man/man1/slurm.1                          |    6 +-
 doc/man/man1/smap.1                           |   16 +-
 doc/man/man1/sprio.1                          |   14 +-
 doc/man/man1/squeue.1                         |   52 +-
 doc/man/man1/sreport.1                        |    2 +-
 doc/man/man1/srun.1                           |  159 +-
 doc/man/man1/srun_cr.1                        |    2 +-
 doc/man/man1/sshare.1                         |    4 +-
 doc/man/man1/sstat.1                          |    8 +-
 doc/man/man1/strigger.1                       |   20 +-
 doc/man/man1/sview.1                          |   21 +-
 doc/man/man2html.py                           |  200 +
 doc/man/man3/slurm_allocate_resources.3       |    2 +-
 doc/man/man3/slurm_checkpoint_error.3         |    2 +-
 doc/man/man3/slurm_clear_trigger.3            |    2 +-
 doc/man/man3/slurm_complete_job.3             |    2 +-
 doc/man/man3/slurm_free_ctl_conf.3            |   12 +-
 doc/man/man3/slurm_free_front_end_info_msg.3  |  214 +
 doc/man/man3/slurm_free_job_info_msg.3        |   22 +-
 .../slurm_free_job_step_info_response_msg.3   |   10 +-
 doc/man/man3/slurm_free_node_info.3           |    2 +-
 doc/man/man3/slurm_free_partition_info.3      |    2 +-
 doc/man/man3/slurm_get_errno.3                |    2 +-
 doc/man/man3/slurm_hostlist_create.3          |    2 +-
 .../man3/slurm_init_update_front_end_msg.3    |    1 +
 doc/man/man3/slurm_job_step_create.3          |    2 +-
 doc/man/man3/slurm_kill_job.3                 |    2 +-
 doc/man/man3/slurm_load_front_end.3           |    1 +
 doc/man/man3/slurm_load_reservations.3        |    2 +-
 doc/man/man3/slurm_print_front_end_info_msg.3 |    1 +
 doc/man/man3/slurm_print_front_end_table.3    |    1 +
 doc/man/man3/slurm_reconfigure.3              |   59 +-
 doc/man/man3/slurm_resume.3                   |    2 +-
 doc/man/man3/slurm_slurmd_status.3            |    2 +-
 doc/man/man3/slurm_sprint_front_end_table.3   |    1 +
 doc/man/man3/slurm_step_ctx_create.3          |    2 +-
 doc/man/man3/slurm_step_launch.3              |    2 +-
 doc/man/man3/slurm_update_front_end.3         |    1 +
 doc/man/man3/slurm_update_job.3               |    2 +-
 doc/man/man5/bluegene.conf.5                  |  156 +-
 doc/man/man5/cgroup.conf.5                    |  266 +-
 doc/man/man5/cray.conf.5                      |  112 +
 doc/man/man5/gres.conf.5                      |   16 +-
 doc/man/man5/slurm.conf.5                     |  413 +-
 doc/man/man5/slurmdbd.conf.5                  |   20 +-
 doc/man/man5/topology.conf.5                  |    7 +-
 doc/man/man5/wiki.conf.5                      |    7 +-
 doc/man/man8/slurmctld.8                      |    2 +-
 doc/man/man8/slurmd.8                         |    6 +-
 doc/man/man8/slurmdbd.8                       |    2 +-
 doc/man/man8/slurmstepd.8                     |    2 +-
 doc/man/man8/spank.8                          |   14 +-
 etc/bluegene.conf.example                     |    2 +-
 etc/cgroup.conf.example                       |   69 +-
 etc/cgroup.release_agent                      |    2 -
 etc/cgroup.release_common.example             |  155 +
 etc/cgroup_allowed_devices_file.conf.example  |    6 +
 etc/init.d.slurm                              |   98 +-
 slurm.spec                                    |   97 +-
 slurm/slurm.h.in                              |  219 +-
 slurm/slurm_errno.h                           |   19 +-
 slurm/slurmdb.h                               |   15 +-
 slurm/spank.h                                 |    2 +-
 src/Makefile.am                               |   20 +-
 src/Makefile.in                               |   28 +-
 src/api/Makefile.am                           |    8 +-
 src/api/Makefile.in                           |   32 +-
 src/api/allocate.c                            |    4 +-
 src/api/allocate_msg.c                        |   26 +-
 src/api/block_info.c                          |   37 +-
 src/api/cancel.c                              |    4 +-
 src/api/checkpoint.c                          |    4 +-
 src/api/complete.c                            |    4 +-
 src/api/config_info.c                         |   87 +-
 src/api/front_end_info.c                      |  218 +
 src/api/init_msg.c                            |   45 +-
 src/api/job_info.c                            |   61 +-
 src/api/job_step_info.c                       |   27 +-
 src/api/node_info.c                           |   38 +-
 src/api/partition_info.c                      |   34 +-
 src/api/pmi.c                                 |    9 +-
 src/api/pmi_server.c                          |   14 +-
 src/api/pmi_server.h                          |    4 +-
 src/api/reconfigure.c                         |   46 +-
 src/api/reservation_info.c                    |   12 +-
 src/api/signal.c                              |  311 +-
 src/api/slurm_hostlist.c                      |    2 +-
 src/api/slurm_pmi.c                           |    7 +-
 src/api/slurm_pmi.h                           |    4 +-
 src/api/step_ctx.c                            |    4 +-
 src/api/step_ctx.h                            |    4 +-
 src/api/step_io.c                             |   25 +-
 src/api/step_io.h                             |    4 +-
 src/api/step_launch.c                         |   77 +-
 src/api/step_launch.h                         |    4 +-
 src/api/submit.c                              |    4 +-
 src/api/suspend.c                             |    4 +-
 src/api/topo_info.c                           |    4 +-
 src/api/triggers.c                            |    4 +-
 src/api/update_config.c                       |   16 +-
 src/common/Makefile.am                        |   11 +-
 src/common/Makefile.in                        |   76 +-
 src/common/arg_desc.c                         |    2 +-
 src/common/arg_desc.h                         |    2 +-
 src/common/assoc_mgr.c                        |   88 +-
 src/common/assoc_mgr.h                        |   13 +-
 src/common/basil_resv_conf.c                  |   74 -
 src/common/basil_resv_conf.h                  |   66 -
 src/common/bitstring.c                        |    4 +-
 src/common/bitstring.h                        |    2 +-
 src/common/checkpoint.c                       |    2 +-
 src/common/checkpoint.h                       |    2 +-
 src/common/daemonize.c                        |   12 +-
 src/common/daemonize.h                        |    2 +-
 src/common/eio.c                              |   11 +-
 src/common/eio.h                              |    2 +-
 src/common/env.c                              |  139 +-
 src/common/env.h                              |    4 +-
 src/common/fd.h                               |    9 +
 src/common/forward.c                          |   12 +-
 src/common/forward.h                          |    2 +-
 src/common/gres.c                             |  595 +-
 src/common/gres.h                             |   75 +-
 src/common/hostlist.c                         |  777 +--
 src/common/hostlist.h                         |   24 +-
 src/common/io_hdr.c                           |    2 +-
 src/common/io_hdr.h                           |    2 +-
 src/common/job_options.c                      |   15 +-
 src/common/job_options.h                      |    2 +-
 src/common/job_resources.c                    |  183 +-
 src/common/job_resources.h                    |   24 +-
 src/common/jobacct_common.c                   |    2 +-
 src/common/jobacct_common.h                   |    5 +-
 src/common/log.c                              |    2 +-
 src/common/macros.h                           |    8 +-
 src/common/mpi.c                              |   11 +-
 src/common/mpi.h                              |   10 +-
 src/common/net.c                              |    2 +-
 src/common/net.h                              |    2 +-
 src/common/node_conf.c                        |  183 +-
 src/common/node_conf.h                        |   30 +-
 src/common/node_select.c                      |  353 +-
 src/common/node_select.h                      |  512 +-
 src/common/optz.c                             |    5 +-
 src/common/optz.h                             |    2 +-
 src/common/pack.c                             |    7 +-
 src/common/pack.h                             |   16 +-
 src/common/parse_config.c                     |  119 +-
 src/common/parse_config.h                     |   24 +-
 src/common/parse_spec.c                       |    2 +-
 src/common/parse_spec.h                       |    2 +-
 src/common/parse_time.c                       |  104 +-
 src/common/parse_time.h                       |    2 +-
 src/common/plugin.c                           |   10 +-
 src/common/plugin.h                           |    4 +-
 src/common/plugrack.c                         |    2 +-
 src/common/plugrack.h                         |    2 +-
 src/common/plugstack.c                        |   10 +-
 src/common/plugstack.h                        |    2 +-
 src/common/print_fields.c                     |    2 +-
 src/common/print_fields.h                     |    4 +-
 src/common/proc_args.c                        |  115 +-
 src/common/proc_args.h                        |    9 +-
 src/common/read_config.c                      |  512 +-
 src/common/read_config.h                      |   63 +-
 src/common/safeopen.c                         |    2 +-
 src/common/safeopen.h                         |    2 +-
 src/common/slurm_accounting_storage.c         |   85 +-
 src/common/slurm_accounting_storage.h         |   30 +-
 src/common/slurm_auth.c                       |    5 +-
 src/common/slurm_auth.h                       |    2 +-
 src/common/slurm_cred.c                       |    8 +-
 src/common/slurm_cred.h                       |    2 +-
 src/common/slurm_errno.c                      |   14 +-
 src/common/slurm_jobacct_gather.c             |    6 +-
 src/common/slurm_jobacct_gather.h             |    8 +-
 src/common/slurm_jobcomp.c                    |    2 +-
 src/common/slurm_jobcomp.h                    |    2 +-
 src/common/slurm_priority.c                   |    2 +-
 src/common/slurm_priority.h                   |    2 +-
 src/common/slurm_protocol_api.c               |   91 +-
 src/common/slurm_protocol_api.h               |   98 +-
 src/common/slurm_protocol_common.h            |   11 +-
 src/common/slurm_protocol_defs.c              |  517 +-
 src/common/slurm_protocol_defs.h              |  254 +-
 src/common/slurm_protocol_interface.h         |    2 +-
 src/common/slurm_protocol_mongo_common.h      |    2 +-
 src/common/slurm_protocol_pack.c              | 2320 +++++++-
 src/common/slurm_protocol_pack.h              |   10 +-
 src/common/slurm_protocol_socket_common.h     |    2 +-
 .../slurm_protocol_socket_implementation.c    |    6 +-
 src/common/slurm_protocol_util.c              |   70 +-
 src/common/slurm_protocol_util.h              |    2 +-
 src/common/slurm_resource_info.c              |   13 +-
 src/common/slurm_resource_info.h              |    2 +-
 src/common/slurm_rlimits_info.c               |    2 +-
 src/common/slurm_rlimits_info.h               |    2 +-
 src/common/slurm_selecttype_info.c            |    2 +-
 src/common/slurm_selecttype_info.h            |    4 +-
 src/common/slurm_step_layout.c                |   95 +-
 src/common/slurm_step_layout.h                |    2 +-
 src/common/slurm_strcasestr.c                 |    2 +-
 src/common/slurm_strcasestr.h                 |    2 +-
 src/common/slurm_topology.c                   |   17 +-
 src/common/slurm_topology.h                   |   13 +-
 src/common/slurm_xlator.h                     |   20 +-
 src/common/slurmdb_defs.c                     |   28 +-
 src/common/slurmdb_defs.h                     |    4 +-
 src/common/slurmdb_pack.c                     |  234 +-
 src/common/slurmdb_pack.h                     |    4 +-
 src/common/slurmdbd_defs.c                    |  334 +-
 src/common/slurmdbd_defs.h                    |  130 +-
 src/common/stepd_api.c                        |    6 +-
 src/common/stepd_api.h                        |    2 +-
 src/common/switch.c                           |    2 +-
 src/common/switch.h                           |    2 +-
 src/common/timers.c                           |   23 +-
 src/common/timers.h                           |   23 +-
 src/common/uid.c                              |    2 +-
 src/common/uid.h                              |    2 +-
 src/common/unsetenv.c                         |    2 +-
 src/common/unsetenv.h                         |    2 +-
 src/common/working_cluster.c                  |   30 +-
 src/common/working_cluster.h                  |   13 +-
 src/common/write_labelled_message.c           |    2 +-
 src/common/write_labelled_message.h           |    2 +-
 src/common/xassert.c                          |    2 +-
 src/common/xassert.h                          |    2 +-
 src/common/xcgroup.c                          | 1114 ++++
 src/common/xcgroup.h                          |  317 +
 .../xcgroup_read_config.c}                    |  183 +-
 .../xcgroup_read_config.h}                    |   70 +-
 src/common/xcpuinfo.c                         |  976 +++
 .../proctrack/cgroup => common}/xcpuinfo.h    |   99 +-
 src/common/xmalloc.c                          |    2 +-
 src/common/xmalloc.h                          |    2 +-
 src/common/xsignal.c                          |    4 +-
 src/common/xsignal.h                          |    2 +-
 src/common/xstring.c                          |   14 +-
 src/common/xstring.h                          |    2 +-
 src/database/Makefile.in                      |    8 +
 src/database/mysql_common.c                   |    4 +-
 src/database/mysql_common.h                   |    6 +-
 src/database/pgsql_common.c                   |    4 +-
 src/database/pgsql_common.h                   |    6 +-
 src/db_api/Makefile.am                        |    3 +-
 src/db_api/Makefile.in                        |   11 +-
 src/db_api/account_functions.c                |    9 +-
 src/db_api/archive_functions.c                |    9 +-
 src/db_api/assoc_functions.c                  |    9 +-
 src/db_api/cluster_functions.c                |    9 +-
 src/db_api/cluster_report_functions.c         |    6 +-
 src/db_api/connection_functions.c             |    9 +-
 src/db_api/coord_functions.c                  |    9 +-
 src/db_api/extra_get_functions.c              |    9 +-
 src/db_api/job_report_functions.c             |    9 +-
 src/db_api/qos_functions.c                    |    9 +-
 src/db_api/resv_report_functions.c            |    9 +-
 src/db_api/usage_functions.c                  |    9 +-
 src/db_api/user_functions.c                   |    9 +-
 src/db_api/user_report_functions.c            |    9 +-
 src/db_api/wckey_functions.c                  |    9 +-
 src/plugins/Makefile.in                       |    8 +
 src/plugins/accounting_storage/Makefile.in    |    8 +
 .../accounting_storage/common/Makefile.in     |    8 +
 .../accounting_storage/common/common_as.c     |   19 +-
 .../accounting_storage/common/common_as.h     |    2 +-
 .../accounting_storage/filetxt/Makefile.in    |    8 +
 .../filetxt/accounting_storage_filetxt.c      |   15 +-
 .../filetxt/filetxt_jobacct_process.c         |   12 +-
 .../filetxt/filetxt_jobacct_process.h         |    2 +-
 .../accounting_storage/mysql/Makefile.in      |    8 +
 .../mysql/accounting_storage_mysql.c          |   83 +-
 .../mysql/accounting_storage_mysql.h          |    3 +-
 .../accounting_storage/mysql/as_mysql_acct.c  |    3 +-
 .../accounting_storage/mysql/as_mysql_acct.h  |    2 +-
 .../mysql/as_mysql_archive.c                  |    2 +-
 .../mysql/as_mysql_archive.h                  |    2 +-
 .../accounting_storage/mysql/as_mysql_assoc.c |   21 +-
 .../accounting_storage/mysql/as_mysql_assoc.h |    2 +-
 .../mysql/as_mysql_cluster.c                  |  115 +-
 .../mysql/as_mysql_cluster.h                  |    5 +-
 .../mysql/as_mysql_convert.c                  |    2 +-
 .../mysql/as_mysql_convert.h                  |    2 +-
 .../accounting_storage/mysql/as_mysql_job.c   |  125 +-
 .../accounting_storage/mysql/as_mysql_job.h   |    2 +-
 .../mysql/as_mysql_jobacct_process.c          |   34 +-
 .../mysql/as_mysql_jobacct_process.h          |    2 +-
 .../mysql/as_mysql_problems.c                 |    2 +-
 .../mysql/as_mysql_problems.h                 |    2 +-
 .../accounting_storage/mysql/as_mysql_qos.c   |   74 +-
 .../accounting_storage/mysql/as_mysql_qos.h   |    2 +-
 .../accounting_storage/mysql/as_mysql_resv.c  |    5 +-
 .../accounting_storage/mysql/as_mysql_resv.h  |    2 +-
 .../mysql/as_mysql_rollup.c                   |  720 +--
 .../mysql/as_mysql_rollup.h                   |    2 +-
 .../accounting_storage/mysql/as_mysql_txn.c   |    2 +-
 .../accounting_storage/mysql/as_mysql_txn.h   |    2 +-
 .../accounting_storage/mysql/as_mysql_usage.c |    2 +-
 .../accounting_storage/mysql/as_mysql_usage.h |    2 +-
 .../accounting_storage/mysql/as_mysql_user.c  |    2 +-
 .../accounting_storage/mysql/as_mysql_user.h  |    2 +-
 .../accounting_storage/mysql/as_mysql_wckey.c |   11 +-
 .../accounting_storage/mysql/as_mysql_wckey.h |    2 +-
 .../accounting_storage/none/Makefile.in       |    8 +
 .../none/accounting_storage_none.c            |   15 +-
 .../accounting_storage/pgsql/Makefile.in      |    8 +
 .../pgsql/accounting_storage_pgsql.c          |   15 +-
 .../pgsql/accounting_storage_pgsql.h          |    2 +-
 .../accounting_storage/pgsql/as_pg_acct.c     |    2 +-
 .../accounting_storage/pgsql/as_pg_acct.h     |    2 +-
 .../accounting_storage/pgsql/as_pg_archive.c  |    2 +-
 .../accounting_storage/pgsql/as_pg_archive.h  |    2 +-
 .../accounting_storage/pgsql/as_pg_assoc.c    |    2 +-
 .../accounting_storage/pgsql/as_pg_assoc.h    |    2 +-
 .../accounting_storage/pgsql/as_pg_cluster.c  |    2 +-
 .../accounting_storage/pgsql/as_pg_cluster.h  |    2 +-
 .../accounting_storage/pgsql/as_pg_common.c   |    2 +-
 .../accounting_storage/pgsql/as_pg_common.h   |    2 +-
 .../accounting_storage/pgsql/as_pg_event.c    |    2 +-
 .../accounting_storage/pgsql/as_pg_event.h    |    2 +-
 .../accounting_storage/pgsql/as_pg_get_jobs.c |    3 +-
 .../accounting_storage/pgsql/as_pg_job.c      |   35 +-
 .../accounting_storage/pgsql/as_pg_job.h      |    2 +-
 .../accounting_storage/pgsql/as_pg_problem.c  |    2 +-
 .../accounting_storage/pgsql/as_pg_problem.h  |    2 +-
 .../accounting_storage/pgsql/as_pg_qos.c      |    2 +-
 .../accounting_storage/pgsql/as_pg_qos.h      |    2 +-
 .../accounting_storage/pgsql/as_pg_resv.c     |    2 +-
 .../accounting_storage/pgsql/as_pg_resv.h     |    2 +-
 .../accounting_storage/pgsql/as_pg_rollup.c   |    2 +-
 .../accounting_storage/pgsql/as_pg_rollup.h   |    2 +-
 .../accounting_storage/pgsql/as_pg_txn.c      |    2 +-
 .../accounting_storage/pgsql/as_pg_txn.h      |    2 +-
 .../accounting_storage/pgsql/as_pg_usage.c    |    2 +-
 .../accounting_storage/pgsql/as_pg_usage.h    |    2 +-
 .../accounting_storage/pgsql/as_pg_user.c     |    2 +-
 .../accounting_storage/pgsql/as_pg_user.h     |    2 +-
 .../accounting_storage/pgsql/as_pg_wckey.c    |    5 +-
 .../accounting_storage/pgsql/as_pg_wckey.h    |    2 +-
 .../accounting_storage/slurmdbd/Makefile.in   |    8 +
 .../slurmdbd/accounting_storage_slurmdbd.c    |   93 +-
 src/plugins/auth/Makefile.in                  |    8 +
 src/plugins/auth/authd/Makefile.in            |    8 +
 src/plugins/auth/authd/auth_authd.c           |    4 +-
 src/plugins/auth/munge/Makefile.in            |    8 +
 src/plugins/auth/munge/auth_munge.c           |    4 +-
 src/plugins/auth/none/Makefile.in             |    8 +
 src/plugins/auth/none/auth_none.c             |    4 +-
 src/plugins/checkpoint/Makefile.am            |    2 +-
 src/plugins/checkpoint/Makefile.in            |   10 +-
 src/plugins/checkpoint/aix/Makefile.in        |    8 +
 src/plugins/checkpoint/aix/checkpoint_aix.c   |    7 +-
 src/plugins/checkpoint/blcr/Makefile.in       |    8 +
 src/plugins/checkpoint/blcr/checkpoint_blcr.c |    5 +-
 src/plugins/checkpoint/none/Makefile.in       |    8 +
 src/plugins/checkpoint/none/checkpoint_none.c |    7 +-
 src/plugins/checkpoint/ompi/Makefile.in       |    8 +
 src/plugins/checkpoint/ompi/checkpoint_ompi.c |    7 +-
 src/plugins/checkpoint/xlch/Makefile.am       |   17 -
 src/plugins/checkpoint/xlch/checkpoint_xlch.c |  714 ---
 src/plugins/crypto/Makefile.in                |    8 +
 src/plugins/crypto/munge/Makefile.in          |    8 +
 src/plugins/crypto/munge/crypto_munge.c       |    5 +-
 src/plugins/crypto/openssl/Makefile.in        |    8 +
 src/plugins/crypto/openssl/crypto_openssl.c   |    5 +-
 src/plugins/gres/Makefile.in                  |    8 +
 src/plugins/gres/gpu/Makefile.in              |    8 +
 src/plugins/gres/gpu/gres_gpu.c               |   16 +-
 src/plugins/gres/nic/Makefile.in              |    8 +
 src/plugins/gres/nic/gres_nic.c               |    6 +-
 src/plugins/job_submit/Makefile.in            |    8 +
 src/plugins/job_submit/cnode/Makefile.in      |    8 +
 .../job_submit/cnode/job_submit_cnode.c       |    4 +-
 src/plugins/job_submit/defaults/Makefile.in   |    8 +
 .../job_submit/defaults/job_submit_defaults.c |    4 +-
 src/plugins/job_submit/logging/Makefile.in    |    8 +
 .../job_submit/logging/job_submit_logging.c   |    7 +-
 src/plugins/job_submit/lua/Makefile.in        |    8 +
 src/plugins/job_submit/lua/job_submit_lua.c   |   18 +-
 src/plugins/job_submit/partition/Makefile.in  |    8 +
 .../partition/job_submit_partition.c          |    6 +-
 src/plugins/jobacct_gather/Makefile.in        |    8 +
 src/plugins/jobacct_gather/aix/Makefile.in    |    8 +
 .../jobacct_gather/aix/jobacct_gather_aix.c   |   26 +-
 src/plugins/jobacct_gather/linux/Makefile.in  |    8 +
 .../linux/jobacct_gather_linux.c              |   22 +-
 src/plugins/jobacct_gather/none/Makefile.in   |    8 +
 .../jobacct_gather/none/jobacct_gather_none.c |    4 +-
 src/plugins/jobcomp/Makefile.in               |    8 +
 src/plugins/jobcomp/filetxt/Makefile.in       |    8 +
 .../jobcomp/filetxt/filetxt_jobcomp_process.c |    2 +-
 .../jobcomp/filetxt/filetxt_jobcomp_process.h |    2 +-
 src/plugins/jobcomp/filetxt/jobcomp_filetxt.c |    2 +-
 src/plugins/jobcomp/mysql/Makefile.in         |    8 +
 src/plugins/jobcomp/mysql/jobcomp_mysql.c     |    2 +-
 .../jobcomp/mysql/mysql_jobcomp_process.c     |    2 +-
 .../jobcomp/mysql/mysql_jobcomp_process.h     |    2 +-
 src/plugins/jobcomp/none/Makefile.in          |    8 +
 src/plugins/jobcomp/none/jobcomp_none.c       |    5 +-
 src/plugins/jobcomp/pgsql/Makefile.in         |    8 +
 src/plugins/jobcomp/pgsql/jobcomp_pgsql.c     |    2 +-
 .../jobcomp/pgsql/pgsql_jobcomp_process.c     |    2 +-
 .../jobcomp/pgsql/pgsql_jobcomp_process.h     |    2 +-
 src/plugins/jobcomp/script/Makefile.in        |    8 +
 src/plugins/jobcomp/script/jobcomp_script.c   |    7 +-
 src/plugins/mpi/Makefile.in                   |    8 +
 src/plugins/mpi/lam/Makefile.in               |    8 +
 src/plugins/mpi/lam/lam.h                     |    2 +-
 src/plugins/mpi/lam/mpi_lam.c                 |    4 +-
 src/plugins/mpi/mpich1_p4/Makefile.in         |    8 +
 src/plugins/mpi/mpich1_p4/mpich1_p4.c         |    4 +-
 src/plugins/mpi/mpich1_shmem/Makefile.in      |    8 +
 src/plugins/mpi/mpich1_shmem/mpich1_shmem.c   |    4 +-
 src/plugins/mpi/mpichgm/Makefile.in           |    8 +
 src/plugins/mpi/mpichgm/mpi_mpichgm.c         |    4 +-
 src/plugins/mpi/mpichgm/mpichgm.c             |    4 +-
 src/plugins/mpi/mpichgm/mpichgm.h             |    2 +-
 src/plugins/mpi/mpichmx/Makefile.in           |    8 +
 src/plugins/mpi/mpichmx/mpi_mpichmx.c         |    4 +-
 src/plugins/mpi/mpichmx/mpichmx.c             |    4 +-
 src/plugins/mpi/mpichmx/mpichmx.h             |    2 +-
 src/plugins/mpi/mvapich/Makefile.in           |    8 +
 src/plugins/mpi/mvapich/mpi_mvapich.c         |    4 +-
 src/plugins/mpi/mvapich/mvapich.c             |    9 +-
 src/plugins/mpi/mvapich/mvapich.h             |    2 +-
 src/plugins/mpi/none/Makefile.in              |    8 +
 src/plugins/mpi/none/mpi_none.c               |    4 +-
 src/plugins/mpi/openmpi/Makefile.in           |    8 +
 src/plugins/mpi/openmpi/mpi_openmpi.c         |    4 +-
 src/plugins/preempt/Makefile.in               |    8 +
 src/plugins/preempt/none/Makefile.in          |    8 +
 src/plugins/preempt/none/preempt_none.c       |    5 +-
 .../preempt/partition_prio/Makefile.in        |    8 +
 .../partition_prio/preempt_partition_prio.c   |    8 +-
 src/plugins/preempt/qos/Makefile.in           |    8 +
 src/plugins/preempt/qos/preempt_qos.c         |   10 +-
 src/plugins/priority/Makefile.in              |    8 +
 src/plugins/priority/basic/Makefile.in        |    8 +
 src/plugins/priority/basic/priority_basic.c   |   25 +-
 src/plugins/priority/multifactor/Makefile.in  |    8 +
 .../multifactor/priority_multifactor.c        |  365 +-
 src/plugins/proctrack/Makefile.in             |    8 +
 src/plugins/proctrack/aix/Makefile.in         |    8 +
 src/plugins/proctrack/aix/proctrack_aix.c     |   56 +-
 src/plugins/proctrack/cgroup/Changelog        |   20 -
 src/plugins/proctrack/cgroup/Makefile.am      |   11 +-
 src/plugins/proctrack/cgroup/Makefile.in      |   33 +-
 .../proctrack/cgroup/proctrack_cgroup.c       |  749 ++-
 src/plugins/proctrack/cgroup/xcgroup.c        |  985 ----
 src/plugins/proctrack/cgroup/xcgroup.h        |  237 -
 src/plugins/proctrack/cgroup/xcpuinfo.c       |  312 -
 src/plugins/proctrack/linuxproc/Makefile.in   |    8 +
 src/plugins/proctrack/linuxproc/kill_tree.c   |   11 +-
 src/plugins/proctrack/linuxproc/kill_tree.h   |    2 +-
 .../proctrack/linuxproc/proctrack_linuxproc.c |   39 +-
 src/plugins/proctrack/lua/Makefile.in         |    8 +
 src/plugins/proctrack/lua/proctrack_lua.c     |   88 +-
 src/plugins/proctrack/pgid/Makefile.in        |    8 +
 src/plugins/proctrack/pgid/proctrack_pgid.c   |   56 +-
 src/plugins/proctrack/rms/Makefile.in         |    8 +
 src/plugins/proctrack/rms/proctrack_rms.c     |   51 +-
 src/plugins/proctrack/sgi_job/Makefile.in     |    8 +
 .../proctrack/sgi_job/proctrack_sgi_job.c     |   38 +-
 src/plugins/sched/Makefile.in                 |    8 +
 src/plugins/sched/backfill/Makefile.in        |    8 +
 src/plugins/sched/backfill/backfill.c         |  139 +-
 src/plugins/sched/backfill/backfill.h         |    2 +-
 src/plugins/sched/backfill/backfill_wrapper.c |    5 +-
 src/plugins/sched/builtin/Makefile.in         |    8 +
 src/plugins/sched/builtin/builtin.c           |   35 +-
 src/plugins/sched/builtin/builtin.h           |    2 +-
 src/plugins/sched/builtin/builtin_wrapper.c   |    5 +-
 src/plugins/sched/hold/Makefile.in            |    8 +
 src/plugins/sched/hold/hold_wrapper.c         |    4 +-
 src/plugins/sched/wiki/Makefile.in            |    8 +
 src/plugins/sched/wiki/cancel_job.c           |    4 +-
 src/plugins/sched/wiki/get_jobs.c             |    4 +-
 src/plugins/sched/wiki/get_nodes.c            |   25 +-
 src/plugins/sched/wiki/hostlist.c             |    2 +-
 src/plugins/sched/wiki/job_modify.c           |    2 +-
 src/plugins/sched/wiki/msg.c                  |   12 +-
 src/plugins/sched/wiki/msg.h                  |    5 +-
 src/plugins/sched/wiki/resume_job.c           |    2 +-
 src/plugins/sched/wiki/sched_wiki.c           |    7 +-
 src/plugins/sched/wiki/start_job.c            |    2 +-
 src/plugins/sched/wiki/suspend_job.c          |    2 +-
 src/plugins/sched/wiki2/Makefile.in           |    8 +
 src/plugins/sched/wiki2/cancel_job.c          |    4 +-
 src/plugins/sched/wiki2/event.c               |    2 +-
 src/plugins/sched/wiki2/get_jobs.c            |    5 +-
 src/plugins/sched/wiki2/get_nodes.c           |   25 +-
 src/plugins/sched/wiki2/hostlist.c            |    2 +-
 src/plugins/sched/wiki2/initialize.c          |    2 +-
 src/plugins/sched/wiki2/job_add_task.c        |    2 +-
 src/plugins/sched/wiki2/job_modify.c          |    2 +-
 src/plugins/sched/wiki2/job_notify.c          |    2 +-
 src/plugins/sched/wiki2/job_release_task.c    |    2 +-
 src/plugins/sched/wiki2/job_requeue.c         |    5 +-
 src/plugins/sched/wiki2/job_signal.c          |    6 +-
 src/plugins/sched/wiki2/job_will_run.c        |    2 +-
 src/plugins/sched/wiki2/msg.c                 |   12 +-
 src/plugins/sched/wiki2/msg.h                 |    5 +-
 src/plugins/sched/wiki2/resume_job.c          |    2 +-
 src/plugins/sched/wiki2/sched_wiki.c          |    4 +-
 src/plugins/sched/wiki2/start_job.c           |    2 +-
 src/plugins/sched/wiki2/suspend_job.c         |    2 +-
 src/plugins/select/Makefile.am                |    8 +-
 src/plugins/select/Makefile.in                |   15 +-
 src/plugins/select/bgq/Makefile.am            |   15 -
 src/plugins/select/bgq/select_bgq.cc          |  427 --
 src/plugins/select/bluegene/Makefile.am       |   87 +-
 src/plugins/select/bluegene/Makefile.in       |  445 +-
 src/plugins/select/bluegene/ba/Makefile.am    |   35 +
 .../{block_allocator => ba}/Makefile.in       |   77 +-
 .../{block_allocator => ba}/block_allocator.c | 2870 ++-------
 .../state_test.h => ba/block_allocator.h}     |   52 +-
 .../{block_allocator => ba}/wire_test.c       |   78 +-
 .../select/bluegene/ba_bgq/Makefile.am        |   38 +
 .../select/bluegene/ba_bgq/Makefile.in        |  672 +++
 .../select/bluegene/ba_bgq/block_allocator.c  | 2082 +++++++
 .../block_allocator.h}                        |   38 +-
 .../select/bluegene/ba_bgq/wire_test.c        |  233 +
 src/plugins/select/bluegene/ba_common.c       | 1587 +++++
 src/plugins/select/bluegene/ba_common.h       |  563 ++
 src/plugins/select/bluegene/bg_core.c         |  604 ++
 src/plugins/select/bluegene/bg_core.h         |   87 +
 .../defined_block.c => bg_defined_block.c}    |  306 +-
 .../defined_block.h => bg_defined_block.h}    |    4 +-
 .../dynamic_block.c => bg_dynamic_block.c}    |  497 +-
 .../dynamic_block.h => bg_dynamic_block.h}    |    7 +-
 src/plugins/select/bluegene/bg_enums.h        |  173 +
 .../{plugin/jobinfo.c => bg_job_info.c}       |  362 +-
 .../{plugin/jobinfo.h => bg_job_info.h}       |   44 +-
 .../bluegene/{plugin => }/bg_job_place.c      |  826 ++-
 .../bluegene/{plugin => }/bg_job_place.h      |    2 +-
 .../select/bluegene/{plugin => }/bg_job_run.c |  947 +--
 .../select/bluegene/{plugin => }/bg_job_run.h |   12 +-
 .../select/bluegene/bg_list_functions.c       |  224 +
 .../select/bluegene/bg_list_functions.h       |   64 +
 .../{plugin/nodeinfo.c => bg_node_info.c}     |  118 +-
 .../{plugin/nodeinfo.h => bg_node_info.h}     |   12 +-
 src/plugins/select/bluegene/bg_read_config.c  |  851 +++
 src/plugins/select/bluegene/bg_read_config.h  |   79 +
 .../{plugin => }/bg_record_functions.c        | 1107 ++--
 .../select/bluegene/bg_record_functions.h     |   90 +
 src/plugins/select/bluegene/bg_status.c       |  322 +
 .../{bgq/bgq.h => bluegene/bg_status.h}       |   51 +-
 .../bg_record_functions.h => bg_structs.h}    |  143 +-
 src/plugins/select/bluegene/bl/Makefile.am    |   14 +
 src/plugins/select/bluegene/bl/Makefile.in    |  603 ++
 .../select/bluegene/bl/bridge_linker.c        | 2522 ++++++++
 .../state_test.c => bl/bridge_status.c}       |  508 +-
 .../select/bluegene/bl/bridge_status.h        |   48 +
 .../bridge_switch_connections.c}              |  264 +-
 .../bridge_switch_connections.h}              |   27 +-
 .../select/bluegene/bl_bgq/Makefile.am        |   15 +
 .../{bgq => bluegene/bl_bgq}/Makefile.in      |  131 +-
 .../select/bluegene/bl_bgq/bridge_helper.cc   |  318 +
 .../select/bluegene/bl_bgq/bridge_helper.h    |   82 +
 .../select/bluegene/bl_bgq/bridge_linker.cc   | 1067 ++++
 .../select/bluegene/bl_bgq/bridge_status.cc   |  550 ++
 .../select/bluegene/bl_bgq/bridge_status.h    |   47 +
 .../bluegene/block_allocator/Makefile.am      |   29 -
 .../block_allocator/block_allocator.h         |  530 --
 .../bluegene/block_allocator/bridge_linker.c  |  750 ---
 .../{block_allocator => }/bridge_linker.h     |  128 +-
 src/plugins/select/bluegene/configure_api.c   |  371 ++
 src/plugins/select/bluegene/configure_api.h   |  182 +
 .../bluegene/{plugin => }/libsched_if64.c     |    5 +-
 .../select/bluegene/plugin/Makefile.am        |   82 -
 src/plugins/select/bluegene/plugin/Manifest   |   21 -
 .../select/bluegene/plugin/bg_block_info.c    |  706 ---
 .../select/bluegene/plugin/block_sys.c        | 1281 ----
 src/plugins/select/bluegene/plugin/bluegene.c | 1719 ------
 src/plugins/select/bluegene/plugin/bluegene.h |  195 -
 .../select/bluegene/plugin/select_bluegene.c  | 1483 -----
 src/plugins/select/bluegene/runjob_plugin.cc  |  319 +
 src/plugins/select/bluegene/select_bluegene.c | 2766 +++++++++
 src/plugins/select/bluegene/sfree/Makefile.am |   17 +
 .../bluegene/sfree}/Makefile.in               |  168 +-
 .../select/bluegene/{plugin => sfree}/opts.c  |    2 +-
 .../select/bluegene/{plugin => sfree}/sfree.c |    8 +-
 .../select/bluegene/{plugin => sfree}/sfree.h |   15 +-
 .../bluegene/{plugin => }/slurm_epilog.c      |    5 +-
 .../bluegene/{plugin => }/slurm_prolog.c      |   18 +-
 src/plugins/select/bluegene/wrap_rm_api.h     |   85 -
 src/plugins/select/cons_res/Makefile.in       |    8 +
 src/plugins/select/cons_res/dist_tasks.c      |   22 +-
 src/plugins/select/cons_res/dist_tasks.h      |    2 +-
 src/plugins/select/cons_res/job_test.c        |  107 +-
 src/plugins/select/cons_res/job_test.h        |    7 +-
 src/plugins/select/cons_res/select_cons_res.c |  673 ++-
 src/plugins/select/cons_res/select_cons_res.h |    7 +-
 src/plugins/select/cray/Makefile.am           |   30 +-
 src/plugins/select/cray/Makefile.in           |  297 +-
 src/plugins/select/cray/basil_alps.h          |  630 ++
 src/plugins/select/cray/basil_interface.c     |  942 +++
 src/plugins/select/cray/basil_interface.h     |  105 +
 src/plugins/select/cray/cray_config.c         |  192 +
 src/plugins/select/cray/cray_config.h         |   97 +
 src/plugins/select/cray/libalps/Makefile.am   |   29 +
 src/plugins/select/cray/libalps/Makefile.in   |  741 +++
 src/plugins/select/cray/libalps/atoul.c       |   54 +
 .../cray/libalps/basil_mysql_routines.c       |  300 +
 .../select/cray/libalps/basil_request.c       |  184 +
 src/plugins/select/cray/libalps/do_confirm.c  |   46 +
 src/plugins/select/cray/libalps/do_query.c    |  194 +
 src/plugins/select/cray/libalps/do_release.c  |  101 +
 src/plugins/select/cray/libalps/do_reserve.c  |  166 +
 src/plugins/select/cray/libalps/do_switch.c   |   60 +
 .../select/cray/libalps/memory_handling.c     |  150 +
 .../select/cray/libalps/memory_handling.h     |   46 +
 .../select/cray/libalps/parser_basil_1.0.c    |  133 +
 .../select/cray/libalps/parser_basil_1.1.c    |  186 +
 .../select/cray/libalps/parser_basil_3.1.c    |  294 +
 .../select/cray/libalps/parser_basil_4.0.c    |  362 ++
 .../select/cray/libalps/parser_common.c       |  716 +++
 .../select/cray/libalps/parser_internal.h     |  111 +
 src/plugins/select/cray/libalps/popen2.c      |  150 +
 .../select/cray/libemulate/Makefile.am        |   16 +
 .../select/cray/libemulate/Makefile.in        |  618 ++
 .../select/cray/libemulate/alps_emulate.c     |  609 ++
 src/plugins/select/cray/libemulate/hilbert.c  |   88 +
 src/plugins/select/cray/libemulate/hilbert.h  |   44 +
 src/plugins/select/cray/nodespec.c            |  151 +
 src/plugins/select/cray/other_select.c        |  162 +-
 src/plugins/select/cray/other_select.h        |   69 +-
 src/plugins/select/cray/parser_common.h       |  163 +
 src/plugins/select/cray/select_cray.c         |  319 +-
 src/plugins/select/linear/Makefile.in         |    8 +
 src/plugins/select/linear/select_linear.c     | 1189 +++-
 src/plugins/select/linear/select_linear.h     |    2 +-
 src/plugins/switch/Makefile.in                |    8 +
 src/plugins/switch/elan/Makefile.in           |    8 +
 src/plugins/switch/elan/qsw.c                 |    4 +-
 src/plugins/switch/elan/qsw.h                 |    2 +-
 src/plugins/switch/elan/switch_elan.c         |    4 +-
 src/plugins/switch/federation/Makefile.in     |    8 +
 src/plugins/switch/federation/federation.c    |    7 +-
 src/plugins/switch/federation/federation.h    |    2 +-
 .../switch/federation/federation_keys.h       |    2 +-
 .../switch/federation/switch_federation.c     |    4 +-
 src/plugins/switch/none/Makefile.in           |    8 +
 src/plugins/switch/none/switch_none.c         |    4 +-
 src/plugins/task/Makefile.am                  |    2 +-
 src/plugins/task/Makefile.in                  |   10 +-
 src/plugins/task/affinity/Makefile.in         |    8 +
 src/plugins/task/affinity/affinity.c          |    2 +-
 src/plugins/task/affinity/affinity.h          |    4 +-
 src/plugins/task/affinity/cpuset.c            |    2 +-
 src/plugins/task/affinity/dist_tasks.c        |  101 +-
 src/plugins/task/affinity/dist_tasks.h        |    2 +-
 src/plugins/task/affinity/numa.c              |    2 +-
 src/plugins/task/affinity/schedutils.c        |    2 +-
 src/plugins/task/affinity/task_affinity.c     |   10 +-
 src/plugins/task/cgroup/Makefile.am           |   17 +
 .../plugin => task/cgroup}/Makefile.in        |  337 +-
 src/plugins/task/cgroup/task_cgroup.c         |  319 +
 src/plugins/task/cgroup/task_cgroup.h         |   46 +
 src/plugins/task/cgroup/task_cgroup_cpuset.c  |  749 +++
 src/plugins/task/cgroup/task_cgroup_cpuset.h  |   61 +
 src/plugins/task/cgroup/task_cgroup_devices.c |  509 ++
 src/plugins/task/cgroup/task_cgroup_devices.h |   58 +
 src/plugins/task/cgroup/task_cgroup_memory.c  |  424 ++
 src/plugins/task/cgroup/task_cgroup_memory.h  |   58 +
 src/plugins/task/none/Makefile.in             |    8 +
 src/plugins/task/none/task_none.c             |   12 +-
 src/plugins/topology/3d_torus/Makefile.in     |    8 +
 src/plugins/topology/3d_torus/hilbert_slurm.c |   87 +-
 .../topology/3d_torus/topology_3d_torus.c     |   31 +-
 src/plugins/topology/Makefile.in              |    8 +
 src/plugins/topology/node_rank/Makefile.in    |    8 +
 .../topology/node_rank/topology_node_rank.c   |   88 +-
 src/plugins/topology/none/Makefile.in         |    8 +
 src/plugins/topology/none/topology_none.c     |   16 +-
 src/plugins/topology/tree/Makefile.in         |    8 +
 src/plugins/topology/tree/topology_tree.c     |   26 +-
 src/sacct/Makefile.in                         |    8 +
 src/sacct/options.c                           |    2 +-
 src/sacct/print.c                             |   44 +-
 src/sacct/process.c                           |    2 +-
 src/sacct/sacct.c                             |    4 +-
 src/sacct/sacct.h                             |    4 +-
 src/sacctmgr/Makefile.in                      |    8 +
 src/sacctmgr/account_functions.c              |    2 +-
 src/sacctmgr/archive_functions.c              |   11 +-
 src/sacctmgr/association_functions.c          |    2 +-
 src/sacctmgr/cluster_functions.c              |  184 +-
 src/sacctmgr/common.c                         |   57 +-
 src/sacctmgr/config_functions.c               |    2 +-
 src/sacctmgr/event_functions.c                |    6 +-
 src/sacctmgr/file_functions.c                 |   22 +-
 src/sacctmgr/job_functions.c                  |   10 +-
 src/sacctmgr/problem_functions.c              |    7 +-
 src/sacctmgr/qos_functions.c                  |   80 +-
 src/sacctmgr/sacctmgr.c                       |   33 +-
 src/sacctmgr/sacctmgr.h                       |    9 +-
 src/sacctmgr/txn_functions.c                  |    6 +-
 src/sacctmgr/user_functions.c                 |    2 +-
 src/sacctmgr/wckey_functions.c                |    6 +-
 src/salloc/Makefile.am                        |    4 +
 src/salloc/Makefile.in                        |   16 +-
 src/salloc/opt.c                              |  101 +-
 src/salloc/opt.h                              |    8 +-
 src/salloc/salloc.c                           |  144 +-
 src/salloc/salloc.h                           |    2 +-
 src/sattach/Makefile.in                       |    8 +
 src/sattach/attach.c                          |    2 +-
 src/sattach/opt.c                             |    2 +-
 src/sattach/opt.h                             |    4 +-
 src/sattach/sattach.c                         |   27 +-
 src/sbatch/Makefile.in                        |    8 +
 src/sbatch/mult_cluster.c                     |    2 +-
 src/sbatch/mult_cluster.h                     |    2 +-
 src/sbatch/opt.c                              |  159 +-
 src/sbatch/opt.h                              |    6 +-
 src/sbatch/sbatch.c                           |   53 +-
 src/sbcast/Makefile.in                        |    8 +
 src/sbcast/agent.c                            |    4 +-
 src/sbcast/opts.c                             |    2 +-
 src/sbcast/sbcast.c                           |    4 +-
 src/sbcast/sbcast.h                           |    8 +-
 src/scancel/Makefile.in                       |    8 +
 src/scancel/opt.c                             |   17 +-
 src/scancel/scancel.c                         |   60 +-
 src/scancel/scancel.h                         |    2 +-
 src/scontrol/Makefile.in                      |    8 +
 src/scontrol/create_res.c                     |    9 +-
 src/scontrol/info_block.c                     |    2 +-
 src/scontrol/info_job.c                       |    8 +-
 src/scontrol/info_node.c                      |  180 +-
 src/scontrol/info_part.c                      |    2 +-
 src/scontrol/info_res.c                       |    2 +-
 src/scontrol/scontrol.c                       |  366 +-
 src/scontrol/scontrol.h                       |   14 +-
 src/scontrol/update_job.c                     |  123 +-
 src/scontrol/update_node.c                    |  162 +-
 src/scontrol/update_part.c                    |   32 +-
 src/scontrol/update_step.c                    |   65 +-
 src/sinfo/Makefile.in                         |    8 +
 src/sinfo/opts.c                              |   31 +-
 src/sinfo/print.c                             |   43 +-
 src/sinfo/print.h                             |   14 +-
 src/sinfo/sinfo.c                             |   55 +-
 src/sinfo/sinfo.h                             |   12 +-
 src/sinfo/sort.c                              |  120 +-
 src/slurmctld/Makefile.am                     |   23 +-
 src/slurmctld/Makefile.in                     |   30 +-
 src/slurmctld/acct_policy.c                   |  414 +-
 src/slurmctld/acct_policy.h                   |    2 +-
 src/slurmctld/agent.c                         |  117 +-
 src/slurmctld/agent.h                         |    4 +-
 src/slurmctld/backup.c                        |   25 +-
 src/slurmctld/basil_interface.c               |  184 -
 src/slurmctld/basil_interface.h               |   73 -
 src/slurmctld/controller.c                    |   59 +-
 src/slurmctld/front_end.c                     |  823 +++
 src/slurmctld/front_end.h                     |  131 +
 src/slurmctld/gang.c                          |   13 +-
 src/slurmctld/gang.h                          |    5 +-
 src/slurmctld/groups.c                        |   25 +-
 src/slurmctld/groups.h                        |    4 +-
 src/slurmctld/job_mgr.c                       | 1836 ++++--
 src/slurmctld/job_scheduler.c                 |  250 +-
 src/slurmctld/job_scheduler.h                 |    2 +-
 src/slurmctld/job_submit.c                    |    8 +-
 src/slurmctld/job_submit.h                    |    4 +-
 src/slurmctld/licenses.c                      |   76 +-
 src/slurmctld/licenses.h                      |   16 +-
 src/slurmctld/locks.c                         |    2 +-
 src/slurmctld/locks.h                         |    2 +-
 src/slurmctld/node_mgr.c                      |  564 +-
 src/slurmctld/node_scheduler.c                |  228 +-
 src/slurmctld/node_scheduler.h                |    5 +-
 src/slurmctld/partition_mgr.c                 |  124 +-
 src/slurmctld/ping_nodes.c                    |  125 +-
 src/slurmctld/ping_nodes.h                    |    2 +-
 src/slurmctld/port_mgr.c                      |    2 +-
 src/slurmctld/port_mgr.h                      |    2 +-
 src/slurmctld/power_save.c                    |    2 +-
 src/slurmctld/preempt.c                       |   55 +-
 src/slurmctld/preempt.h                       |   10 +-
 src/slurmctld/proc_req.c                      |  541 +-
 src/slurmctld/proc_req.h                      |    2 +-
 src/slurmctld/read_config.c                   |  107 +-
 src/slurmctld/read_config.h                   |    2 +-
 src/slurmctld/reservation.c                   |  219 +-
 src/slurmctld/reservation.h                   |    5 +-
 src/slurmctld/sched_plugin.c                  |    2 +-
 src/slurmctld/sched_plugin.h                  |    6 +-
 src/slurmctld/slurmctld.h                     |  175 +-
 src/slurmctld/srun_comm.c                     |   60 +-
 src/slurmctld/srun_comm.h                     |   12 +-
 src/slurmctld/state_save.c                    |   30 +-
 src/slurmctld/state_save.h                    |   10 +-
 src/slurmctld/step_mgr.c                      |  762 ++-
 src/slurmctld/trigger_mgr.c                   |  138 +-
 src/slurmctld/trigger_mgr.h                   |    7 +-
 src/slurmd/Makefile.am                        |    2 +-
 src/slurmd/Makefile.in                        |   10 +-
 src/slurmd/common/Makefile.am                 |   18 +
 src/slurmd/common/Makefile.in                 |  608 ++
 src/slurmd/common/proctrack.c                 |   42 +-
 src/slurmd/common/proctrack.h                 |   19 +-
 src/slurmd/common/reverse_tree.h              |    2 +-
 src/slurmd/common/run_script.c                |    2 +-
 src/slurmd/common/run_script.h                |    2 +-
 src/slurmd/common/set_oomadj.c                |   27 +-
 src/slurmd/common/set_oomadj.h                |    2 +-
 src/slurmd/common/setproctitle.c              |    2 +-
 src/slurmd/common/setproctitle.h              |    2 +-
 src/slurmd/common/slurmstepd_init.c           |    4 +-
 src/slurmd/common/slurmstepd_init.h           |    2 +-
 src/slurmd/common/task_plugin.c               |  215 +-
 src/slurmd/common/task_plugin.h               |   44 +-
 src/slurmd/slurmd/Makefile.am                 |   18 +-
 src/slurmd/slurmd/Makefile.in                 |  124 +-
 src/slurmd/slurmd/get_mach_stat.c             |  690 +--
 src/slurmd/slurmd/get_mach_stat.h             |    7 +-
 src/slurmd/slurmd/read_proc.c                 |    4 +-
 src/slurmd/slurmd/req.c                       |   81 +-
 src/slurmd/slurmd/req.h                       |    2 +-
 src/slurmd/slurmd/reverse_tree_math.c         |    2 +-
 src/slurmd/slurmd/reverse_tree_math.h         |    2 +-
 src/slurmd/slurmd/slurmd.c                    |  131 +-
 src/slurmd/slurmd/slurmd.h                    |    2 +-
 src/slurmd/slurmd/xcpu.c                      |    2 +-
 src/slurmd/slurmd/xcpu.h                      |    2 +-
 src/slurmd/slurmstepd/Makefile.am             |   20 +-
 src/slurmd/slurmstepd/Makefile.in             |  125 +-
 src/slurmd/slurmstepd/fname.c                 |    2 +-
 src/slurmd/slurmstepd/fname.h                 |    2 +-
 src/slurmd/slurmstepd/io.c                    |   24 +-
 src/slurmd/slurmstepd/io.h                    |    2 +-
 src/slurmd/slurmstepd/mgr.c                   |  425 +-
 src/slurmd/slurmstepd/mgr.h                   |    2 +-
 src/slurmd/slurmstepd/multi_prog.c            |    2 +-
 src/slurmd/slurmstepd/multi_prog.h            |    2 +-
 src/slurmd/slurmstepd/pam_ses.c               |    4 +-
 src/slurmd/slurmstepd/pam_ses.h               |    2 +-
 src/slurmd/slurmstepd/pdebug.c                |    8 +-
 src/slurmd/slurmstepd/pdebug.h                |    2 +-
 src/slurmd/slurmstepd/req.c                   |   35 +-
 src/slurmd/slurmstepd/req.h                   |    2 +-
 src/slurmd/slurmstepd/slurmstepd.c            |   19 +-
 src/slurmd/slurmstepd/slurmstepd.h            |    2 +-
 src/slurmd/slurmstepd/slurmstepd_job.c        |   13 +-
 src/slurmd/slurmstepd/slurmstepd_job.h        |    4 +-
 .../slurmstepd/step_terminate_monitor.c       |    2 +-
 .../slurmstepd/step_terminate_monitor.h       |    2 +-
 src/slurmd/slurmstepd/task.c                  |   64 +-
 src/slurmd/slurmstepd/task.h                  |    6 +-
 src/slurmd/slurmstepd/ulimits.c               |    9 +-
 src/slurmd/slurmstepd/ulimits.h               |    2 +-
 src/slurmdbd/Makefile.in                      |    8 +
 src/slurmdbd/agent.c                          |    2 +-
 src/slurmdbd/agent.h                          |    2 +-
 src/slurmdbd/backup.c                         |    2 +-
 src/slurmdbd/backup.h                         |    2 +-
 src/slurmdbd/proc_req.c                       |  149 +-
 src/slurmdbd/proc_req.h                       |    4 +-
 src/slurmdbd/read_config.c                    |   21 +-
 src/slurmdbd/read_config.h                    |    4 +-
 src/slurmdbd/rpc_mgr.c                        |   14 +-
 src/slurmdbd/rpc_mgr.h                        |    2 +-
 src/slurmdbd/slurmdbd.c                       |   66 +-
 src/slurmdbd/slurmdbd.h                       |    2 +-
 src/smap/Makefile.am                          |   18 +-
 src/smap/Makefile.in                          |   40 +-
 src/smap/configure_functions.c                | 1615 +++--
 src/smap/grid_functions.c                     |  419 +-
 src/smap/job_functions.c                      |  134 +-
 src/smap/opts.c                               |   65 +-
 src/smap/partition_functions.c                |  431 +-
 src/smap/reservation_functions.c              |   43 +-
 src/smap/smap.c                               |  349 +-
 src/smap/smap.h                               |   58 +-
 src/sprio/Makefile.in                         |    8 +
 src/sprio/opts.c                              |   36 +-
 src/sprio/print.c                             |   10 +-
 src/sprio/print.h                             |    4 +-
 src/sprio/sprio.c                             |   29 +-
 src/sprio/sprio.h                             |    4 +-
 src/squeue/Makefile.in                        |    8 +
 src/squeue/opts.c                             |   64 +-
 src/squeue/print.c                            |   82 +-
 src/squeue/print.h                            |   14 +-
 src/squeue/sort.c                             |   34 +-
 src/squeue/squeue.c                           |    4 +-
 src/squeue/squeue.h                           |    5 +-
 src/sreport/Makefile.in                       |    8 +
 src/sreport/assoc_reports.c                   |    2 +-
 src/sreport/assoc_reports.h                   |    2 +-
 src/sreport/cluster_reports.c                 |   14 +-
 src/sreport/cluster_reports.h                 |    2 +-
 src/sreport/common.c                          |    2 +-
 src/sreport/job_reports.c                     |    8 +-
 src/sreport/job_reports.h                     |    2 +-
 src/sreport/resv_reports.c                    |    6 +-
 src/sreport/resv_reports.h                    |    2 +-
 src/sreport/sreport.c                         |    2 +-
 src/sreport/sreport.h                         |    7 +-
 src/sreport/user_reports.c                    |    2 +-
 src/sreport/user_reports.h                    |    2 +-
 src/srun/Makefile.in                          |    8 +
 src/srun/allocate.c                           |  105 +-
 src/srun/allocate.h                           |    4 +-
 src/srun/debugger.c                           |    2 +-
 src/srun/debugger.h                           |    4 +-
 src/srun/fname.c                              |    2 +-
 src/srun/fname.h                              |    2 +-
 src/srun/multi_prog.c                         |    2 +-
 src/srun/multi_prog.h                         |    2 +-
 src/srun/opt.c                                |  249 +-
 src/srun/opt.h                                |   12 +-
 src/srun/srun.c                               |   45 +-
 src/srun/srun.h                               |    2 +-
 src/srun/srun_job.c                           |   23 +-
 src/srun/srun_job.h                           |    4 +-
 src/srun/srun_pty.c                           |    4 +-
 src/srun/srun_pty.h                           |    2 +-
 src/srun/task_state.c                         |    2 +-
 src/srun/task_state.h                         |    2 +-
 src/srun_cr/Makefile.in                       |    8 +
 src/srun_cr/srun_cr.c                         |    6 +-
 src/sshare/Makefile.in                        |    8 +
 src/sshare/process.c                          |    2 +-
 src/sshare/sshare.c                           |    7 +-
 src/sshare/sshare.h                           |    4 +-
 src/sstat/Makefile.in                         |    8 +
 src/sstat/options.c                           |    4 +-
 src/sstat/print.c                             |    2 +-
 src/sstat/process.c                           |    2 +-
 src/sstat/sstat.c                             |    2 +-
 src/sstat/sstat.h                             |    2 +-
 src/strigger/Makefile.in                      |    8 +
 src/strigger/opts.c                           |   15 +-
 src/strigger/strigger.c                       |   20 +-
 src/strigger/strigger.h                       |   11 +-
 src/sview/Makefile.am                         |    8 +-
 src/sview/Makefile.in                         |   45 +-
 src/sview/admin_info.c                        |    2 +-
 src/sview/block_info.c                        |  238 +-
 src/sview/common.c                            |  202 +-
 src/sview/config_info.c                       |    2 +-
 src/sview/defaults.c                          |   96 +-
 src/sview/front_end_info.c                    | 1087 ++++
 src/sview/grid.c                              |  463 +-
 src/sview/job_info.c                          | 1175 ++--
 src/sview/node_info.c                         |  312 +-
 src/sview/part_info.c                         |  793 ++-
 src/sview/popups.c                            |  250 +-
 src/sview/resv_info.c                         |  187 +-
 src/sview/submit_info.c                       |    2 +-
 src/sview/sview.c                             |  345 +-
 src/sview/sview.h                             |   89 +-
 testsuite/Makefile.in                         |    8 +
 testsuite/expect/Makefile.am                  |    9 +
 testsuite/expect/Makefile.in                  |   17 +
 testsuite/expect/README                       |   23 +-
 testsuite/expect/globals                      |  129 +-
 testsuite/expect/globals_accounting           |    2 +-
 testsuite/expect/pkill                        |    2 +-
 testsuite/expect/regression                   |    2 +-
 testsuite/expect/regression.py                |    2 +-
 testsuite/expect/test1.1                      |    2 +-
 testsuite/expect/test1.10                     |    7 +-
 testsuite/expect/test1.11                     |    7 +-
 testsuite/expect/test1.12                     |    8 +-
 testsuite/expect/test1.13                     |    7 +-
 testsuite/expect/test1.14                     |    7 +-
 testsuite/expect/test1.15                     |   11 +-
 testsuite/expect/test1.16                     |    2 +-
 testsuite/expect/test1.17                     |    2 +-
 testsuite/expect/test1.18                     |    2 +-
 testsuite/expect/test1.19                     |   14 +-
 testsuite/expect/test1.2                      |   19 +-
 testsuite/expect/test1.20                     |    2 +-
 testsuite/expect/test1.21                     |   14 +-
 testsuite/expect/test1.22                     |   11 +-
 testsuite/expect/test1.23                     |   80 +-
 testsuite/expect/test1.24                     |    2 +-
 testsuite/expect/test1.25                     |   10 +-
 testsuite/expect/test1.26                     |   25 +-
 testsuite/expect/test1.27                     |   11 +-
 testsuite/expect/test1.28                     |    2 +-
 testsuite/expect/test1.29                     |    2 +-
 testsuite/expect/test1.29.prog.c              |    2 +-
 testsuite/expect/test1.3                      |    2 +-
 testsuite/expect/test1.30                     |    2 +-
 testsuite/expect/test1.31                     |   23 +-
 testsuite/expect/test1.32                     |    7 +-
 testsuite/expect/test1.32.prog.c              |    2 +-
 testsuite/expect/test1.33                     |   12 +-
 testsuite/expect/test1.34                     |    2 +-
 testsuite/expect/test1.34.prog.c              |    2 +-
 testsuite/expect/test1.35                     |   50 +-
 testsuite/expect/test1.36                     |   22 +-
 testsuite/expect/test1.37                     |    8 +-
 testsuite/expect/test1.38                     |    8 +-
 testsuite/expect/test1.4                      |    4 +-
 testsuite/expect/test1.40                     |   34 +-
 testsuite/expect/test1.41                     |    7 +-
 testsuite/expect/test1.42                     |   57 +-
 testsuite/expect/test1.43                     |    7 +-
 testsuite/expect/test1.44                     |    9 +-
 testsuite/expect/test1.45                     |   15 +-
 testsuite/expect/test1.46                     |   11 +-
 testsuite/expect/test1.47                     |    8 +-
 testsuite/expect/test1.48                     |    2 +-
 testsuite/expect/test1.49                     |   16 +-
 testsuite/expect/test1.5                      |   14 +-
 testsuite/expect/test1.50                     |    2 +-
 testsuite/expect/test1.51                     |    2 +-
 testsuite/expect/test1.52                     |    2 +-
 testsuite/expect/test1.53                     |    6 +-
 testsuite/expect/test1.54                     |    4 +-
 testsuite/expect/test1.55                     |    9 +-
 testsuite/expect/test1.56                     |    2 +-
 testsuite/expect/test1.57                     |    2 +-
 testsuite/expect/test1.58                     |    9 +-
 testsuite/expect/test1.59                     |    2 +-
 testsuite/expect/test1.6                      |    2 +-
 testsuite/expect/test1.60                     |   28 +-
 testsuite/expect/test1.61                     |    8 +-
 testsuite/expect/test1.62                     |    2 +-
 testsuite/expect/test1.63                     |    7 +-
 testsuite/expect/test1.64                     |    7 +-
 testsuite/expect/test1.7                      |    2 +-
 testsuite/expect/test1.8                      |    8 +-
 testsuite/expect/test1.80                     |    2 +-
 testsuite/expect/test1.81                     |    7 +-
 testsuite/expect/test1.82                     |    7 +-
 testsuite/expect/test1.83                     |    2 +-
 testsuite/expect/test1.84                     |   10 +-
 testsuite/expect/test1.86                     |    2 +-
 testsuite/expect/test1.87                     |    2 +-
 testsuite/expect/test1.88                     |    2 +-
 testsuite/expect/test1.88.prog.c              |    2 +-
 testsuite/expect/test1.89                     |    4 +-
 testsuite/expect/test1.89.prog.c              |    2 +-
 testsuite/expect/test1.9                      |   14 +-
 testsuite/expect/test1.90                     |    4 +-
 testsuite/expect/test1.90.prog.c              |    2 +-
 testsuite/expect/test1.91                     |    4 +-
 testsuite/expect/test1.91.prog.c              |    2 +-
 testsuite/expect/test1.92                     |    8 +-
 testsuite/expect/test1.93                     |    2 +-
 testsuite/expect/test10.1                     |    2 +-
 testsuite/expect/test10.10                    |    2 +-
 testsuite/expect/test10.11                    |    2 +-
 testsuite/expect/test10.12                    |    6 +-
 testsuite/expect/test10.13                    |   45 +-
 testsuite/expect/test10.2                     |    2 +-
 testsuite/expect/test10.3                     |    2 +-
 testsuite/expect/test10.4                     |    2 +-
 testsuite/expect/test10.5                     |    2 +-
 testsuite/expect/test10.6                     |    2 +-
 testsuite/expect/test10.7                     |    2 +-
 testsuite/expect/test10.8                     |    2 +-
 testsuite/expect/test10.9                     |    2 +-
 testsuite/expect/test11.1                     |    2 +-
 testsuite/expect/test11.2                     |    2 +-
 testsuite/expect/test11.3                     |    2 +-
 testsuite/expect/test11.4                     |    2 +-
 testsuite/expect/test11.5                     |    7 +-
 testsuite/expect/test11.6                     |    2 +-
 testsuite/expect/test11.7                     |    2 +-
 testsuite/expect/test12.1                     |    2 +-
 testsuite/expect/test12.2                     |    8 +-
 testsuite/expect/test12.2.prog.c              |    2 +-
 testsuite/expect/test12.4                     |    2 +-
 testsuite/expect/test12.5                     |    2 +-
 testsuite/expect/test13.1                     |    2 +-
 testsuite/expect/test14.1                     |    2 +-
 testsuite/expect/test14.2                     |    2 +-
 testsuite/expect/test14.3                     |    2 +-
 testsuite/expect/test14.4                     |    2 +-
 testsuite/expect/test14.5                     |    2 +-
 testsuite/expect/test14.6                     |    2 +-
 testsuite/expect/test14.7                     |    2 +-
 testsuite/expect/test14.8                     |    2 +-
 testsuite/expect/test15.1                     |    2 +-
 testsuite/expect/test15.10                    |    2 +-
 testsuite/expect/test15.11                    |    2 +-
 testsuite/expect/test15.12                    |    2 +-
 testsuite/expect/test15.13                    |    2 +-
 testsuite/expect/test15.14                    |   72 +-
 testsuite/expect/test15.15                    |    2 +-
 testsuite/expect/test15.16                    |    2 +-
 testsuite/expect/test15.17                    |    7 +-
 testsuite/expect/test15.18                    |    2 +-
 testsuite/expect/test15.19                    |   13 +-
 testsuite/expect/test15.2                     |    2 +-
 testsuite/expect/test15.20                    |    6 +-
 testsuite/expect/test15.21                    |    2 +-
 testsuite/expect/test15.22                    |    2 +-
 testsuite/expect/test15.23                    |    2 +-
 testsuite/expect/test15.24                    |   17 +-
 testsuite/expect/test15.25                    |    8 +-
 testsuite/expect/test15.26                    |   17 +-
 testsuite/expect/test15.3                     |    2 +-
 testsuite/expect/test15.4                     |    2 +-
 testsuite/expect/test15.5                     |   24 +-
 testsuite/expect/test15.6                     |    2 +-
 testsuite/expect/test15.7                     |    2 +-
 testsuite/expect/test15.8                     |    2 +-
 testsuite/expect/test15.9                     |    2 +-
 testsuite/expect/test16.1                     |    2 +-
 testsuite/expect/test16.2                     |    2 +-
 testsuite/expect/test16.3                     |    2 +-
 testsuite/expect/test16.4                     |   36 +-
 testsuite/expect/test16.4.prog.c              |    2 +-
 testsuite/expect/test17.1                     |    2 +-
 testsuite/expect/test17.10                    |    2 +-
 testsuite/expect/test17.11                    |    7 +-
 testsuite/expect/test17.12                    |    2 +-
 testsuite/expect/test17.13                    |    2 +-
 testsuite/expect/test17.14                    |    2 +-
 testsuite/expect/test17.15                    |    2 +-
 testsuite/expect/test17.15.prog.c             |    2 +-
 testsuite/expect/test17.16                    |    2 +-
 testsuite/expect/test17.17                    |    7 +-
 testsuite/expect/test17.18                    |   69 +-
 testsuite/expect/test17.19                    |    2 +-
 testsuite/expect/test17.2                     |    2 +-
 testsuite/expect/test17.20                    |    2 +-
 testsuite/expect/test17.21                    |    2 +-
 testsuite/expect/test17.22                    |    2 +-
 testsuite/expect/test17.23                    |    2 +-
 testsuite/expect/test17.24                    |    2 +-
 testsuite/expect/test17.25                    |    2 +-
 testsuite/expect/test17.26                    |    2 +-
 testsuite/expect/test17.27                    |    6 +-
 testsuite/expect/test17.28                    |    2 +-
 testsuite/expect/test17.29                    |    2 +-
 testsuite/expect/test17.3                     |    2 +-
 testsuite/expect/test17.31                    |    2 +-
 testsuite/expect/test17.32                    |   19 +-
 testsuite/expect/test17.33                    |    2 +-
 testsuite/expect/test17.4                     |    2 +-
 testsuite/expect/test17.5                     |    2 +-
 testsuite/expect/test17.6                     |   19 +-
 testsuite/expect/test17.7                     |    2 +-
 testsuite/expect/test17.8                     |    2 +-
 testsuite/expect/test17.9                     |    2 +-
 testsuite/expect/test19.1                     |    2 +-
 testsuite/expect/test19.2                     |    2 +-
 testsuite/expect/test19.3                     |    2 +-
 testsuite/expect/test19.4                     |    2 +-
 testsuite/expect/test19.5                     |    2 +-
 testsuite/expect/test19.6                     |    2 +-
 testsuite/expect/test19.7                     |    2 +-
 testsuite/expect/test2.1                      |    2 +-
 testsuite/expect/test2.10                     |    2 +-
 testsuite/expect/test2.11                     |    7 +-
 testsuite/expect/test2.12                     |    2 +-
 testsuite/expect/test2.13                     |    9 +-
 testsuite/expect/test2.14                     |    8 +-
 testsuite/expect/test2.15                     |  152 +
 testsuite/expect/test2.2                      |    2 +-
 testsuite/expect/test2.3                      |    2 +-
 testsuite/expect/test2.4                      |    2 +-
 testsuite/expect/test2.5                      |   56 +-
 testsuite/expect/test2.6                      |    2 +-
 testsuite/expect/test2.7                      |    2 +-
 testsuite/expect/test2.8                      |   27 +-
 testsuite/expect/test2.9                      |    2 +-
 testsuite/expect/test20.1                     |    2 +-
 testsuite/expect/test20.2                     |    2 +-
 testsuite/expect/test20.3                     |    2 +-
 testsuite/expect/test20.4                     |    2 +-
 testsuite/expect/test21.1                     |    2 +-
 testsuite/expect/test21.10                    |    2 +-
 testsuite/expect/test21.11                    |    2 +-
 testsuite/expect/test21.12                    |    2 +-
 testsuite/expect/test21.13                    |    2 +-
 testsuite/expect/test21.14                    |    2 +-
 testsuite/expect/test21.15                    |    2 +-
 testsuite/expect/test21.16                    |    2 +-
 testsuite/expect/test21.17                    |    2 +-
 testsuite/expect/test21.18                    |    2 +-
 testsuite/expect/test21.19                    |    2 +-
 testsuite/expect/test21.2                     |    2 +-
 testsuite/expect/test21.20                    |    2 +-
 testsuite/expect/test21.21                    |    2 +-
 testsuite/expect/test21.22                    |    2 +-
 testsuite/expect/test21.23                    |    2 +-
 testsuite/expect/test21.24                    |    2 +-
 testsuite/expect/test21.25                    |    2 +-
 testsuite/expect/test21.26                    |    2 +-
 testsuite/expect/test21.27                    |    2 +-
 testsuite/expect/test21.28                    |    2 +-
 testsuite/expect/test21.29                    |    2 +-
 testsuite/expect/test21.3                     |    2 +-
 testsuite/expect/test21.4                     |    2 +-
 testsuite/expect/test21.5                     |    2 +-
 testsuite/expect/test21.6                     |    2 +-
 testsuite/expect/test21.7                     |    2 +-
 testsuite/expect/test21.8                     |    2 +-
 testsuite/expect/test21.9                     |    2 +-
 testsuite/expect/test22.1                     |    2 +-
 testsuite/expect/test22.2                     |    2 +-
 testsuite/expect/test23.1                     |    2 +-
 testsuite/expect/test23.2                     |    2 +-
 testsuite/expect/test24.1                     |    2 +-
 testsuite/expect/test24.1.prog.c              |    6 +-
 testsuite/expect/test24.2                     |    2 +-
 testsuite/expect/test25.1                     |    2 +-
 testsuite/expect/test26.1                     |  163 +
 testsuite/expect/test26.2                     |   99 +
 testsuite/expect/test3.1                      |    2 +-
 testsuite/expect/test3.10                     |    2 +-
 testsuite/expect/test3.11                     |    2 +-
 testsuite/expect/test3.12                     |  195 +
 testsuite/expect/test3.2                      |    2 +-
 testsuite/expect/test3.3                      |   49 +-
 testsuite/expect/test3.4                      |   15 +-
 testsuite/expect/test3.5                      |    2 +-
 testsuite/expect/test3.6                      |    2 +-
 testsuite/expect/test3.7                      |    2 +-
 testsuite/expect/test3.7.prog.c               |    2 +-
 testsuite/expect/test3.8                      |    2 +-
 testsuite/expect/test3.9                      |    7 +-
 testsuite/expect/test4.1                      |    2 +-
 testsuite/expect/test4.10                     |    2 +-
 testsuite/expect/test4.11                     |    2 +-
 testsuite/expect/test4.12                     |   28 +-
 testsuite/expect/test4.2                      |    2 +-
 testsuite/expect/test4.3                      |    2 +-
 testsuite/expect/test4.4                      |    2 +-
 testsuite/expect/test4.5                      |   16 +-
 testsuite/expect/test4.6                      |    2 +-
 testsuite/expect/test4.7                      |    2 +-
 testsuite/expect/test4.8                      |    2 +-
 testsuite/expect/test4.9                      |    2 +-
 testsuite/expect/test5.1                      |    2 +-
 testsuite/expect/test5.2                      |    2 +-
 testsuite/expect/test5.3                      |    2 +-
 testsuite/expect/test5.4                      |    4 +-
 testsuite/expect/test5.5                      |    2 +-
 testsuite/expect/test5.6                      |    4 +-
 testsuite/expect/test5.7                      |    2 +-
 testsuite/expect/test5.8                      |    2 +-
 testsuite/expect/test6.1                      |    2 +-
 testsuite/expect/test6.10                     |    2 +-
 testsuite/expect/test6.11                     |    2 +-
 testsuite/expect/test6.12                     |    7 +-
 testsuite/expect/test6.13                     |    2 +-
 testsuite/expect/test6.14                     |    2 +-
 testsuite/expect/test6.2                      |    2 +-
 testsuite/expect/test6.3                      |    2 +-
 testsuite/expect/test6.4                      |    2 +-
 testsuite/expect/test6.5                      |    2 +-
 testsuite/expect/test6.6                      |    2 +-
 testsuite/expect/test6.7                      |    4 +-
 testsuite/expect/test6.8                      |    2 +-
 testsuite/expect/test6.9                      |    2 +-
 testsuite/expect/test7.1                      |    2 +-
 testsuite/expect/test7.10                     |    2 +-
 testsuite/expect/test7.11                     |   10 +-
 testsuite/expect/test7.11.prog.c              |    2 +-
 testsuite/expect/test7.12                     |    3 +-
 testsuite/expect/test7.12.prog.c              |    2 +-
 testsuite/expect/test7.13                     |    6 +-
 testsuite/expect/test7.14                     |   14 +-
 testsuite/expect/test7.2                      |   17 +-
 testsuite/expect/test7.2.prog.c               |    2 +-
 testsuite/expect/test7.3                      |    2 +-
 testsuite/expect/test7.3.io.c                 |    2 +-
 testsuite/expect/test7.3.prog.c               |    2 +-
 testsuite/expect/test7.4                      |   15 +-
 testsuite/expect/test7.4.prog.c               |    2 +-
 testsuite/expect/test7.5                      |    7 +-
 testsuite/expect/test7.5.prog.c               |    2 +-
 testsuite/expect/test7.6                      |   10 +-
 testsuite/expect/test7.6.prog.c               |    2 +-
 testsuite/expect/test7.7                      |    2 +-
 testsuite/expect/test7.7.prog.c               |    2 +-
 testsuite/expect/test7.8                      |    2 +-
 testsuite/expect/test7.8.prog.c               |    2 +-
 testsuite/expect/test7.9                      |    2 +-
 testsuite/expect/test7.9.prog.c               |    2 +-
 testsuite/expect/test8.1                      |   17 +-
 testsuite/expect/test8.2                      |   14 +-
 testsuite/expect/test8.20                     |  258 +
 testsuite/expect/test8.21                     |  339 ++
 testsuite/expect/test8.21.bash                |   43 +
 testsuite/expect/test8.22                     |  163 +
 testsuite/expect/test8.23                     |  283 +
 testsuite/expect/test8.3                      |    2 +-
 testsuite/expect/test8.4                      |    2 +-
 testsuite/expect/test8.4.prog.c               |    2 +-
 testsuite/expect/test8.5                      |    6 +-
 testsuite/expect/test8.6                      |    4 +-
 testsuite/expect/test8.7                      |    2 +-
 testsuite/expect/test8.7.prog.c               |    2 +-
 testsuite/expect/test8.8                      |   22 +-
 testsuite/expect/test9.1                      |   16 +-
 testsuite/expect/test9.2                      |   16 +-
 testsuite/expect/test9.3                      |   16 +-
 testsuite/expect/test9.4                      |   18 +-
 testsuite/expect/test9.5                      |   17 +-
 testsuite/expect/test9.6                      |   17 +-
 testsuite/expect/test9.7                      |    2 +-
 testsuite/expect/test9.7.bash                 |    2 +-
 testsuite/expect/test9.8                      |   14 +-
 testsuite/expect/usleep                       |    2 +-
 testsuite/slurm_unit/Makefile.in              |    8 +
 testsuite/slurm_unit/api/Makefile.in          |    8 +
 testsuite/slurm_unit/api/manual/Makefile.in   |    8 +
 testsuite/slurm_unit/api/manual/cancel-tst.c  |    2 +-
 .../slurm_unit/api/manual/complete-tst.c      |    2 +-
 .../slurm_unit/api/manual/job_info-tst.c      |    2 +-
 .../slurm_unit/api/manual/node_info-tst.c     |    2 +-
 .../api/manual/partition_info-tst.c           |    2 +-
 .../slurm_unit/api/manual/reconfigure-tst.c   |    2 +-
 testsuite/slurm_unit/api/manual/submit-tst.c  |    2 +-
 .../slurm_unit/api/manual/update_config-tst.c |    2 +-
 testsuite/slurm_unit/common/Makefile.in       |    8 +
 1505 files changed, 83515 insertions(+), 35100 deletions(-)
 rename README => README.rst (96%)
 create mode 100644 auxdir/x_ac_man2html.m4
 create mode 100644 auxdir/x_ac_srun.m4
 create mode 100644 contribs/arrayrun/Makefile.am
 create mode 100644 contribs/arrayrun/Makefile.in
 create mode 100644 contribs/arrayrun/README
 create mode 100644 contribs/arrayrun/arrayrun
 create mode 100644 contribs/arrayrun/arrayrun_worker
 create mode 100644 contribs/cray/Makefile.am
 create mode 100644 contribs/cray/Makefile.in
 create mode 100644 contribs/cray/etc_init_d_munge
 create mode 100644 contribs/cray/etc_sysconfig_slurm
 create mode 100644 contribs/cray/libalps_test_programs.tar.gz
 create mode 100644 contribs/cray/munge_build_script.sh
 create mode 100644 contribs/cray/opt_modulefiles_slurm
 create mode 100644 contribs/cray/pam_job.c
 create mode 100644 contribs/cray/slurm-build-script.sh
 create mode 100755 contribs/cray/srun.pl
 create mode 100644 contribs/lua/Makefile.am
 create mode 100644 contribs/lua/Makefile.in
 create mode 100644 contribs/lua/job_submit.license.lua
 create mode 100644 contribs/phpext/slurm_php/AUTHORS
 create mode 100644 contribs/phpext/slurm_php/DISCLAIMER
 create mode 100644 contribs/phpext/slurm_php/LICENSE
 create mode 100644 contribs/phpext/slurm_php/README
 create mode 100644 contribs/phpext/slurm_php/RELEASE_NOTES
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_ping_error.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt
 create mode 100644 contribs/phpext/slurm_php/tests/slurm_version_basic.phpt
 create mode 100755 doc/html/bull.jpg
 create mode 100644 doc/html/cpu_management.shtml
 create mode 100644 doc/html/disclaimer.shtml
 create mode 100644 doc/html/gres_design.shtml
 create mode 100644 doc/html/job_launch.shtml
 create mode 100644 doc/html/man_index.shtml
 create mode 100644 doc/html/meetings.shtml
 create mode 100644 doc/html/registration.shtml
 create mode 100644 doc/html/select_design.shtml
 create mode 100644 doc/html/slurm_ug_agenda.shtml
 create mode 100644 doc/html/slurm_ug_cfp.shtml
 create mode 100644 doc/html/slurm_ug_registration.shtml
 create mode 100755 doc/man/man2html.py
 create mode 100644 doc/man/man3/slurm_free_front_end_info_msg.3
 create mode 100644 doc/man/man3/slurm_init_update_front_end_msg.3
 create mode 100644 doc/man/man3/slurm_load_front_end.3
 create mode 100644 doc/man/man3/slurm_print_front_end_info_msg.3
 create mode 100644 doc/man/man3/slurm_print_front_end_table.3
 create mode 100644 doc/man/man3/slurm_sprint_front_end_table.3
 create mode 100644 doc/man/man3/slurm_update_front_end.3
 create mode 100644 doc/man/man5/cray.conf.5
 delete mode 100644 etc/cgroup.release_agent
 create mode 100644 etc/cgroup.release_common.example
 create mode 100644 etc/cgroup_allowed_devices_file.conf.example
 create mode 100644 src/api/front_end_info.c
 delete mode 100644 src/common/basil_resv_conf.c
 delete mode 100644 src/common/basil_resv_conf.h
 create mode 100644 src/common/xcgroup.c
 create mode 100644 src/common/xcgroup.h
 rename src/{plugins/proctrack/cgroup/read_config.c => common/xcgroup_read_config.c} (56%)
 rename src/{plugins/proctrack/cgroup/read_config.h => common/xcgroup_read_config.h} (70%)
 create mode 100644 src/common/xcpuinfo.c
 rename src/{plugins/proctrack/cgroup => common}/xcpuinfo.h (57%)
 delete mode 100644 src/plugins/checkpoint/xlch/Makefile.am
 delete mode 100644 src/plugins/checkpoint/xlch/checkpoint_xlch.c
 delete mode 100644 src/plugins/proctrack/cgroup/Changelog
 delete mode 100644 src/plugins/proctrack/cgroup/xcgroup.c
 delete mode 100644 src/plugins/proctrack/cgroup/xcgroup.h
 delete mode 100644 src/plugins/proctrack/cgroup/xcpuinfo.c
 delete mode 100644 src/plugins/select/bgq/Makefile.am
 delete mode 100644 src/plugins/select/bgq/select_bgq.cc
 create mode 100644 src/plugins/select/bluegene/ba/Makefile.am
 rename src/plugins/select/bluegene/{block_allocator => ba}/Makefile.in (90%)
 rename src/plugins/select/bluegene/{block_allocator => ba}/block_allocator.c (56%)
 rename src/plugins/select/bluegene/{plugin/state_test.h => ba/block_allocator.h} (64%)
 rename src/plugins/select/bluegene/{block_allocator => ba}/wire_test.c (81%)
 create mode 100644 src/plugins/select/bluegene/ba_bgq/Makefile.am
 create mode 100644 src/plugins/select/bluegene/ba_bgq/Makefile.in
 create mode 100644 src/plugins/select/bluegene/ba_bgq/block_allocator.c
 rename src/plugins/select/bluegene/{plugin/bg_boot_time.h => ba_bgq/block_allocator.h} (69%)
 create mode 100644 src/plugins/select/bluegene/ba_bgq/wire_test.c
 create mode 100644 src/plugins/select/bluegene/ba_common.c
 create mode 100644 src/plugins/select/bluegene/ba_common.h
 create mode 100644 src/plugins/select/bluegene/bg_core.c
 create mode 100644 src/plugins/select/bluegene/bg_core.h
 rename src/plugins/select/bluegene/{plugin/defined_block.c => bg_defined_block.c} (63%)
 rename src/plugins/select/bluegene/{plugin/defined_block.h => bg_defined_block.h} (96%)
 rename src/plugins/select/bluegene/{plugin/dynamic_block.c => bg_dynamic_block.c} (66%)
 rename src/plugins/select/bluegene/{plugin/dynamic_block.h => bg_dynamic_block.h} (94%)
 create mode 100644 src/plugins/select/bluegene/bg_enums.h
 rename src/plugins/select/bluegene/{plugin/jobinfo.c => bg_job_info.c} (67%)
 rename src/plugins/select/bluegene/{plugin/jobinfo.h => bg_job_info.h} (81%)
 rename src/plugins/select/bluegene/{plugin => }/bg_job_place.c (72%)
 rename src/plugins/select/bluegene/{plugin => }/bg_job_place.h (98%)
 rename src/plugins/select/bluegene/{plugin => }/bg_job_run.c (52%)
 rename src/plugins/select/bluegene/{plugin => }/bg_job_run.h (90%)
 create mode 100644 src/plugins/select/bluegene/bg_list_functions.c
 create mode 100644 src/plugins/select/bluegene/bg_list_functions.h
 rename src/plugins/select/bluegene/{plugin/nodeinfo.c => bg_node_info.c} (83%)
 rename src/plugins/select/bluegene/{plugin/nodeinfo.h => bg_node_info.h} (91%)
 create mode 100644 src/plugins/select/bluegene/bg_read_config.c
 create mode 100644 src/plugins/select/bluegene/bg_read_config.h
 rename src/plugins/select/bluegene/{plugin => }/bg_record_functions.c (63%)
 create mode 100644 src/plugins/select/bluegene/bg_record_functions.h
 create mode 100644 src/plugins/select/bluegene/bg_status.c
 rename src/plugins/select/{bgq/bgq.h => bluegene/bg_status.h} (70%)
 rename src/plugins/select/bluegene/{plugin/bg_record_functions.h => bg_structs.h} (54%)
 create mode 100644 src/plugins/select/bluegene/bl/Makefile.am
 create mode 100644 src/plugins/select/bluegene/bl/Makefile.in
 create mode 100644 src/plugins/select/bluegene/bl/bridge_linker.c
 rename src/plugins/select/bluegene/{plugin/state_test.c => bl/bridge_status.c} (59%)
 create mode 100644 src/plugins/select/bluegene/bl/bridge_status.h
 rename src/plugins/select/bluegene/{plugin/bg_switch_connections.c => bl/bridge_switch_connections.c} (78%)
 rename src/plugins/select/bluegene/{plugin/bg_block_info.h => bl/bridge_switch_connections.h} (77%)
 create mode 100644 src/plugins/select/bluegene/bl_bgq/Makefile.am
 rename src/plugins/select/{bgq => bluegene/bl_bgq}/Makefile.in (81%)
 create mode 100644 src/plugins/select/bluegene/bl_bgq/bridge_helper.cc
 create mode 100644 src/plugins/select/bluegene/bl_bgq/bridge_helper.h
 create mode 100644 src/plugins/select/bluegene/bl_bgq/bridge_linker.cc
 create mode 100644 src/plugins/select/bluegene/bl_bgq/bridge_status.cc
 create mode 100644 src/plugins/select/bluegene/bl_bgq/bridge_status.h
 delete mode 100644 src/plugins/select/bluegene/block_allocator/Makefile.am
 delete mode 100644 src/plugins/select/bluegene/block_allocator/block_allocator.h
 delete mode 100644 src/plugins/select/bluegene/block_allocator/bridge_linker.c
 rename src/plugins/select/bluegene/{block_allocator => }/bridge_linker.h (66%)
 create mode 100644 src/plugins/select/bluegene/configure_api.c
 create mode 100644 src/plugins/select/bluegene/configure_api.h
 rename src/plugins/select/bluegene/{plugin => }/libsched_if64.c (93%)
 delete mode 100644 src/plugins/select/bluegene/plugin/Makefile.am
 delete mode 100644 src/plugins/select/bluegene/plugin/Manifest
 delete mode 100644 src/plugins/select/bluegene/plugin/bg_block_info.c
 delete mode 100755 src/plugins/select/bluegene/plugin/block_sys.c
 delete mode 100644 src/plugins/select/bluegene/plugin/bluegene.c
 delete mode 100644 src/plugins/select/bluegene/plugin/bluegene.h
 delete mode 100644 src/plugins/select/bluegene/plugin/select_bluegene.c
 create mode 100644 src/plugins/select/bluegene/runjob_plugin.cc
 create mode 100644 src/plugins/select/bluegene/select_bluegene.c
 create mode 100644 src/plugins/select/bluegene/sfree/Makefile.am
 rename src/plugins/{checkpoint/xlch => select/bluegene/sfree}/Makefile.in (80%)
 rename src/plugins/select/bluegene/{plugin => sfree}/opts.c (98%)
 rename src/plugins/select/bluegene/{plugin => sfree}/sfree.c (96%)
 rename src/plugins/select/bluegene/{plugin => sfree}/sfree.h (89%)
 rename src/plugins/select/bluegene/{plugin => }/slurm_epilog.c (98%)
 rename src/plugins/select/bluegene/{plugin => }/slurm_prolog.c (93%)
 delete mode 100644 src/plugins/select/bluegene/wrap_rm_api.h
 create mode 100644 src/plugins/select/cray/basil_alps.h
 create mode 100644 src/plugins/select/cray/basil_interface.c
 create mode 100644 src/plugins/select/cray/basil_interface.h
 create mode 100644 src/plugins/select/cray/cray_config.c
 create mode 100644 src/plugins/select/cray/cray_config.h
 create mode 100644 src/plugins/select/cray/libalps/Makefile.am
 create mode 100644 src/plugins/select/cray/libalps/Makefile.in
 create mode 100644 src/plugins/select/cray/libalps/atoul.c
 create mode 100644 src/plugins/select/cray/libalps/basil_mysql_routines.c
 create mode 100644 src/plugins/select/cray/libalps/basil_request.c
 create mode 100644 src/plugins/select/cray/libalps/do_confirm.c
 create mode 100644 src/plugins/select/cray/libalps/do_query.c
 create mode 100644 src/plugins/select/cray/libalps/do_release.c
 create mode 100644 src/plugins/select/cray/libalps/do_reserve.c
 create mode 100644 src/plugins/select/cray/libalps/do_switch.c
 create mode 100644 src/plugins/select/cray/libalps/memory_handling.c
 create mode 100644 src/plugins/select/cray/libalps/memory_handling.h
 create mode 100644 src/plugins/select/cray/libalps/parser_basil_1.0.c
 create mode 100644 src/plugins/select/cray/libalps/parser_basil_1.1.c
 create mode 100644 src/plugins/select/cray/libalps/parser_basil_3.1.c
 create mode 100644 src/plugins/select/cray/libalps/parser_basil_4.0.c
 create mode 100644 src/plugins/select/cray/libalps/parser_common.c
 create mode 100644 src/plugins/select/cray/libalps/parser_internal.h
 create mode 100644 src/plugins/select/cray/libalps/popen2.c
 create mode 100644 src/plugins/select/cray/libemulate/Makefile.am
 create mode 100644 src/plugins/select/cray/libemulate/Makefile.in
 create mode 100644 src/plugins/select/cray/libemulate/alps_emulate.c
 create mode 100644 src/plugins/select/cray/libemulate/hilbert.c
 create mode 100644 src/plugins/select/cray/libemulate/hilbert.h
 create mode 100644 src/plugins/select/cray/nodespec.c
 create mode 100644 src/plugins/select/cray/parser_common.h
 create mode 100644 src/plugins/task/cgroup/Makefile.am
 rename src/plugins/{select/bluegene/plugin => task/cgroup}/Makefile.in (55%)
 create mode 100644 src/plugins/task/cgroup/task_cgroup.c
 create mode 100644 src/plugins/task/cgroup/task_cgroup.h
 create mode 100644 src/plugins/task/cgroup/task_cgroup_cpuset.c
 create mode 100644 src/plugins/task/cgroup/task_cgroup_cpuset.h
 create mode 100644 src/plugins/task/cgroup/task_cgroup_devices.c
 create mode 100644 src/plugins/task/cgroup/task_cgroup_devices.h
 create mode 100644 src/plugins/task/cgroup/task_cgroup_memory.c
 create mode 100644 src/plugins/task/cgroup/task_cgroup_memory.h
 delete mode 100644 src/slurmctld/basil_interface.c
 delete mode 100644 src/slurmctld/basil_interface.h
 create mode 100644 src/slurmctld/front_end.c
 create mode 100644 src/slurmctld/front_end.h
 create mode 100644 src/slurmd/common/Makefile.am
 create mode 100644 src/slurmd/common/Makefile.in
 create mode 100644 src/sview/front_end_info.c
 create mode 100755 testsuite/expect/test2.15
 create mode 100755 testsuite/expect/test26.1
 create mode 100755 testsuite/expect/test26.2
 create mode 100755 testsuite/expect/test3.12
 create mode 100755 testsuite/expect/test8.20
 create mode 100755 testsuite/expect/test8.21
 create mode 100755 testsuite/expect/test8.21.bash
 create mode 100755 testsuite/expect/test8.22
 create mode 100755 testsuite/expect/test8.23

diff --git a/BUILD.NOTES b/BUILD.NOTES
index 87a19e829..7c24d6cc7 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -23,9 +23,11 @@ then check-in the new Makefile.am and Makefile.in files
 
 Here is a step-by-step HOWTO for creating a new release of SLURM on a
 Linux cluster (See BlueGene and AIX specific notes below for some differences).
-0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm
-   svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
-   put the buildfarm directory in your search path
+0. Get current copies of SLURM and buildfarm
+   > git clone https://<user_name>@github.com/chaos/slurm.git
+   > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
+   place the buildfarm directory in your search path
+   > export PATH=~/buildfarm:$PATH
 1. Update NEWS and META files for the new release. In the META file,
    the API, Major, Minor, Micro, Version, and Release fields must all
    by up-to-date. **** DON'T UPDATE META UNTIL RIGHT BEFORE THE TAG ****
@@ -35,39 +37,37 @@ Linux cluster (See BlueGene and AIX specific notes below for some differences).
      files, but not to code.
    - this is a prerelease (Release = 0.preX)
 2. Tag the repository with the appropriate name for the new version.
-   svn copy https://eris.llnl.gov/svn/slurm/trunk \
-     https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3 \
-     -m "description"
+   > git tag -a slurm-2-3-0-0-pre5 -m "create tag v2.3.0-pre5"
+   > git push --tags
 3. Use the rpm make target to create the new RPMs. This requires a .rpmmacros 
    (.rpmrc for newer versions of rpmbuild) file containing:
 	%_slurm_sysconfdir      /etc/slurm
 	%_with_debug            1
 	%_with_sgijob		1
 	%_with_elan		1   (ONLY ON SYSTEMS WITH ELAN SWITCH)
-   I usually build with using the following syntax:
-   build  -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
-4. Remove the RPMs that we don't want:
-   rm -f slurm-perlapi*rpm slurm-torque*rpm
-5. Move the RPMs to
-   /usr/local/admin/rpms/llnl/RPMS-RHEL4/x86_64 (odevi, or gauss)
-   /usr/local/admin/rpms/llnl/RPMS-RHEL4/i386/ (adevi)
-   /usr/local/admin/rpms/llnl/RPMS-RHEL4/ia64/ (tdevi)
-   send an announcement email (with the latest entry from the NEWS
-   file) out to linux-admin@lists.llnl.gov.
-6. Copy tagged bzip file (e.g. slurm-0.6.0-0.pre3.bz2) to FTP server
-   for external SLURM users.
-7. Copy bzip file and rpms (including src.rpm) to sourceforge.net:
-   ncftp upload.sf.net
-   cd upload
-   put filename
-   Use SourceForge admin tool to add new release, including changelog.
+   NOTE: build will make a tar-ball based upon ALL of the files in your current
+   local directory. If that includes scratch files, everyone will get those
+   files in the tar-ball. For that reason, it is a good idea to clone a clean
+   copy of the repository and build from that
+   > git clone https://<user_name>@github.com/chaos/slurm.git <local_dir>
+   Build using the following syntax:
+   > build  --snapshot   -s <local_dir>  OR
+   > build  --nosnapshot -s <local_dir>
+   --nosnapshot will name the tar-ball and RPMs based upon the META file
+   --snapshot will name the tar-ball and RPMs based upon the META file plus a
+   timestamp. Do this to make a tar-ball for a non-tagged release.
+   NOTE: <local_dir> should be a fully-qualified pathname
+4. scp the files to schedmd.com in to ~/www/download/development or
+   ~/www/download/development. Move the older files to ~/www/download/archive,
+   login to schedmd.com, cd to ~/download, and execute "php process.php" to
+   update the web pages.
 
 BlueGene build notes:
 0. If on a bgp system and you want sview export these variables
-    export CFLAGS="-I/opt/gnome/lib/gtk-2.0/include -I/opt/gnome/lib/glib-2.0/include $CFLAGS"
-    export LIBS="-L/usr/X11R6/lib64 $LIBS"
-    export CMD_LDFLAGS='-L/usr/X11R6/lib64'
-    export PKG_CONFIG_PATH="/opt/gnome/lib64/pkgconfig/:$PKG_CONFIG_PATH"
+   > export CFLAGS="-I/opt/gnome/lib/gtk-2.0/include -I/opt/gnome/lib/glib-2.0/include $CFLAGS"
+   > export LIBS="-L/usr/X11R6/lib64 $LIBS"
+   > export CMD_LDFLAGS='-L/usr/X11R6/lib64'
+   > export PKG_CONFIG_PATH="/opt/gnome/lib64/pkgconfig/:$PKG_CONFIG_PATH"
 1. Use the rpm make target to create the new RPMs. This requires a .rpmmacros
    (.rpmrc for newer versions of rpmbuild) file containing:
 	%_prefix                /usr
@@ -76,13 +76,17 @@ BlueGene build notes:
 	%_without_pam		1
 	%_with_debug            1
    Build on Service Node with using the following syntax
-   rpmbuild -ta slurm-...bz2
+   > rpmbuild -ta slurm-...bz2
    The RPM files get written to the directory
    /usr/src/packages/RPMS/ppc64
 
 To build and run on AIX:
-0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm
-   svn co https://eris.llnl.gov/svn/buildfarm/trunk buildfarm
+0. Get current copies of SLURM and buildfarm
+   > git clone https://<user_name>@github.com/chaos/slurm.git
+   > svn co https://eris.llnl.gov/svn/chaos/private/buildfarm/trunk buildfarm
+   put the buildfarm directory in your search path
+   > export PATH=~/buildfarm:$PATH
+
    Put the buildfarm directory in your search path
    Also, you will need several commands to appear FIRST in your PATH:
 
@@ -93,10 +97,11 @@ To build and run on AIX:
    I do this by making symlinks to those commands in the buildfarm directory,
    then making the buildfarm directory the first one in my PATH.
    Also, make certain that the "proctrack" rpm is installed.
-1. export OBJECT_MODE=32
-   export PKG_CONFIG="/usr/bin/pkg-config"
+1. Export some environment variables
+   > export OBJECT_MODE=32
+   > export PKG_CONFIG="/usr/bin/pkg-config"
 2. Build with:
-   ./configure --enable-debug --prefix=/opt/freeware \
+   > ./configure --enable-debug --prefix=/opt/freeware \
 	--sysconfdir=/opt/freeware/etc/slurm \
 	--with-ssl=/opt/freeware --with-munge=/opt/freeware \
 	--with-proctrack=/opt/freeware
@@ -119,11 +124,23 @@ To build and run on AIX:
 	%with_munge             "--with-munge=/opt/freeware"
 	%with_proctrack         "--with-proctrack=/opt/freeware"
    Log in to the machine "uP".  uP is currently the lowest-common-denominator
-     AIX machine.
-   CC=/usr/bin/gcc build -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
-4. export MP_RMLIB=./slurm_ll_api.so
-   export CHECKPOINT=yes
-5. poe hostname -rmpool debug
+   AIX machine.
+   NOTE: build will make a tar-ball based upon ALL of the files in your current
+   local directory. If that includes scratch files, everyone will get those
+   files in the tar-ball. For that reason, it is a good idea to clone a clean
+   copy of the repository and build from that
+   > git clone https://<user_name>@github.com/chaos/slurm.git <local_dir>
+   Build using the following syntax:
+   > export CC=/usr/bin/gcc
+   > build  --snapshot   -s <local_dir>  OR
+   > build  --nosnapshot -s <local_dir>
+   --nosnapshot will name the tar-ball and RPMs based upon the META file
+   --snapshot will name the tar-ball and RPMs based upon the META file plus a
+   timestamp. Do this to make a tar-ball for a non-tagged release.
+4. Test POE after telling POE where to find SLURM's LoadLeveler wrapper.
+   > export MP_RMLIB=./slurm_ll_api.so
+   > export CHECKPOINT=yes
+5. > poe hostname -rmpool debug
 6. To debug, set SLURM_LL_API_DEBUG=3 before running poe - will create a file
      /tmp/slurm.*
    It can also be helpful to use poe options "-ilevel 6 -pmdlog yes"
@@ -205,9 +222,17 @@ not change gnats bug state, but records source files associated
 with the bug.
 
 For memory leaks (for AIX use zerofault, zf; for linux use valgrind)
- - run configure with the option --enable-memory-leak-debug
- - valgrind --tool=memcheck --leak-check=yes --num-callers=6 --leak-resolution=med \
-   ./slurmctld -Dc >ctld.out 2>&1    (or similar like for slurmd)
+ - Run configure with the option "--enable-memory-leak-debug" to completely
+   release allocated memory when the daemons exit
+ - valgrind --tool=memcheck --leak-check=yes --num-callers=8 --leak-resolution=med \
+   ./slurmctld -Dc >valg.ctld.out 2>&1
+ - valgrind --tool=memcheck --leak-check=yes --num-callers=8 --leak-resolution=med \
+   ./slurmd -Dc >valg.slurmd.out 2>&1    (Probably only one one node of cluster)
+ - Run the regression test. In the globals.local file include:
+   "set enable_memory_leak_debug 1"
+ - Shutdown the daemons using "scontrol shutdown"
+ - Examine the end of the log files for leaks. pthread_create() and dlopen()
+   have small memory leaks on some systems, which do not grow over time
 
 Before new major release:
  - Test on ia64, i386, x86_64, BGL, AIX, OSX, XCPU
diff --git a/COPYING b/COPYING
index afd7942ba..0712e040f 100644
--- a/COPYING
+++ b/COPYING
@@ -45,7 +45,7 @@ advertising or product endorsement purposes.
 		       Version 2, June 1991
 
  Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                       51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
  Everyone is permitted to copy and distribute verbatim copies
  of this license document, but changing it is not allowed.
 
@@ -211,7 +211,7 @@ access to copy from a designated place, then offering equivalent
 access to copy the source code from the same place counts as
 distribution of the source code, even though third parties are not
 compelled to copy the source along with the object code.
-
+
   4. You may not copy, modify, sublicense, or distribute the Program
 except as expressly provided under this License.  Any attempt
 otherwise to copy, modify, sublicense or distribute the Program is
@@ -268,7 +268,7 @@ impose that choice.
 
 This section is intended to make thoroughly clear what is believed to
 be a consequence of the rest of this License.
-
+
   8. If the distribution and/or use of the Program is restricted in
 certain countries either by patents or by copyrighted interfaces, the
 original copyright holder who places the Program under this License
@@ -321,7 +321,7 @@ PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
 POSSIBILITY OF SUCH DAMAGES.
 
 		     END OF TERMS AND CONDITIONS
-
+
 	    How to Apply These Terms to Your New Programs
 
   If you develop a new program, and you want it to be of the greatest
diff --git a/DISCLAIMER b/DISCLAIMER
index f6080c5a8..cd27e2782 100644
--- a/DISCLAIMER
+++ b/DISCLAIMER
@@ -1,53 +1,116 @@
-Copyright (C) 2008 Lawrence Livermore National Security and Hewlett-Packard.
-Copyright (C) 2002-2007 The Regents of the University of California, 
-   Linux NetworX, Hewlett-Packard and Bull.
-Produced at Lawrence Livermore National Laboratory, Hewlett-Packard, 
-Bull, Linux NetworX, and others.
+SLURM was produced at Lawrence Livermore National Laboratory in collaboration
+with various organizations.
+
+Copyright (C) 2011 Trinity Centre for High Performance Computing
+Copyright (C) 2010-2011 SchedMD LLC
+Copyright (C) 2009 CEA/DAM/DIF
+Copyright (C) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+Copyright (C) 2008-2011 Lawrence Livermore National Security
+Copyright (C) 2008 Vijay Ramasubramanian
+Copyright (C) 2007-2008 Red Hat, Inc.
+Copyright (C) 2007-2009 National University of Defense Technology, China
+Copyright (C) 2007-2011 Bull
+Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
+Copyright (C) 2004-2009, Marcus Holland-Moritz
+Copyright (C) 2002-2007 The Regents of the University of California
+Copyright (C) 2002-2003 Linux NetworX
+Copyright (C) 2002 University of Chicago
+Copyright (C) 2001, Paul Marquess
+Copyright (C) 2000 Markus Friedl
+Copyright (C) 1999, Kenneth Albanowski
+Copyright (C) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+Copyright (C) 1996-2003 Maximum Entropy Data Consultants Ltd,
+Copyright (C) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
+Copyright (C) 1989-1994, 1996-1999, 2001 Free Software Foundation, Inc.
+Many other organizations contributed code and/or documentation without
+including a copyright notice.
 
 Written by:
-Don Albert <Don.Albert(at)bull.com>
-Ernest Artiaga <ernest.artiaga(at)bsc.es>
-Danny Auble <auble1(at)llnl.gov>
-Susanne Balle <susanne.balle(at)hp.com>
-Anton Blanchard <anton(at)samba.org>
-Hongjia Cao <hgcao(at)nudt.edu.cn>
-Chuck Clouston <Chuck.Clouston(at)bull.com>
-Daniel Christians <Daniel.Christians(at)hp.com>
-Gilles Civario <gilles.civario(at)bull.net>
-Chris Dunlap <cdunlap(at)llnl.gov>
-Joey Ekstrom <ekstrom1(at)llnl.gov>
-Jim Garlick <garlick(at)llnl.gov>
-Mark Grondona <mgrondona(at)llnl.gov>
-Christopher Holmes <cholmes(at)hp.com>
-Takao Hatazaki <takao.hatazaki(at)hp.com>
-Nathan Huff <nhuff(at)geekshanty.com>
-David Jackson <jacksond(at)clusterresources.com>
-Greg Johnson <gjohnson(at)lanl.gov>
-Morris Jette <jette1(at)llnl.gov>
-Jason King <king49(at)llnl.gov>
-Nancy Kritkausky <Nancy.Kritkausky(at)bull.com>
-Bernard Li <bli(at)bcgsc.ca>
-Puenlap Lee <Puen-Lap.Lee(at)bull.com>
-Donna Mecozzi <mecozzi1(at)llnl.gov>
-Chris Morrone <morrone2(at)llnl.gov>
-Bryan O'Sullivan <bos(at)pathscale.com>
-Gennaro Oliva <oliva.g(at)na.icar.cnr.it>
-Daniel Palermo <dan.palermo(at)hp.com>
-Dan Phung <phung4(at)llnl.gov>
-Ashley Pitman <ashley(at)quadrics.com>
-Andy Riebs <Andy.Riebs(at)hp.com>
-Asier Roa <asier.roa(at)bsc.es>
-Federico Sacerdoti <Federico.Sacerdoti(at)deshaw.com>
-Jeff Squyres <jsquyres(at)lam-mpi.org>
-Keven Tew <tew1(at)llnl.gov>
-Prashanth Tamraparni <prashanth.tamraparni(at)hp.com>
-Jay Windley <jwindley(at)lnxi.com>
-Ann-Marie Wunderlin<Anne-Marie.Wunderlin(at)Bull.com>
+Amjad Majid Ali (Colorado State University)
+Par Andersson (National Supercomputer Centre, Sweden)
+Don Albert (Bull)
+Ernest Artiaga (Barcelona Supercomputer Center, Spain)
+Danny Auble (LLNL, SchedMD LLC)
+Susanne Balle (HP)
+Anton Blanchard (Samba)
+Janne Blomqvist (Aalto University, Finland)
+David Bremer (LLNL)
+Jon Bringhurst (LANL)
+Bill Brophy (Bull)
+Hongjia Cao (National University of Defense Techonogy, China)
+Daniel Christians (HP)
+Gilles Civario (Bull)
+Chuck Clouston (Bull)
+Joseph Donaghy (LLNL)
+Chris Dunlap (LLNL)
+Joey Ekstrom (LLNL/Bringham Young University)
+Josh England (TGS Management Corporation)
+Kent Engstrom (National Supercomputer Centre, Sweden)
+Jim Garlick (LLNL)
+Didier Gazen (Laboratoire d'Aerologie, France)
+Raphael Geissert (Debian)
+Yiannis Georgiou (Bull)
+Andriy Grytsenko (Massive Solutions Limited, Ukraine)
+Mark Grondona (LLNL)
+Takao Hatazaki (HP, Japan)
+Matthieu Hautreux (CEA, France)
+Chris Holmes (HP)
+David Hoppner
+Nathan Huff (North Dakota State University)
+David Jackson (Adaptive Computing)
+Morris Jette (LLNL, SchedMD LLC)
+Klaus Joas (University Karlsruhe, Germany)
+Greg Johnson (LANL)
+Jason King (LLNL)
+Aaron Knister (Environmental Protection Agency)
+Nancy Kritkausky (Bull)
+Roman Kurakin (Institute of Natural Science and Ecology, Russia)
+Eric Lin (Bull)
+Don Lipari (LLNL)
+Puenlap Lee (Bull)
+Dennis Leepow
+Bernard Li (Genome Sciences Centre, Canada)
+Donald Lipari (LLNL)
+Steven McDougall (SiCortex)
+Donna Mecozzi (LLNL)
+Bjorn-Helge Mevik (University of Oslo, Norway)
+Chris Morrone (LLNL)
+Pere Munt (Barcelona Supercomputer Center, Spain)
+Michal Novotny (Masaryk University, Czech Republic)
+Bryan O'Sullivan (Pathscale)
+Gennaro Oliva (Institute of High Performance Computing and Networking, Italy)
+Alejandro Lucero Palau (Barcelona Supercomputer Center, Spain)
+Daniel Palermo (HP)
+Dan Phung (LLNL/Columbia University)
+Ashley Pittman (Quadrics, UK)
+Vijay Ramasubramanian (University of Maryland)
+Krishnakumar Ravi[KK] (HP)
+Petter Reinholdtsen (University of Oslo, Norway)
+Gerrit Renker (Swiss National Computer Centre)
+Andy Riebs (HP)
+Asier Roa (Barcelona Supercomputer Center, Spain)
+Miguel Ros (Barcelona Supercomputer Center, Spain)
+Beat Rubischon (DALCO AG, Switzerland)
+Dan Rusak (Bull)
+Eygene Ryabinkin (Kurchatov Institute, Russia)
+Federico Sacerdoti (D.E. Shaw)
+Rod Schultz (Bull)
+Tyler Strickland (University of Florida)
+Jeff Squyres (LAM MPI)
+Prashanth Tamraparni (HP, India)
+Jimmy Tang (Trinity College, Ireland)
+Kevin Tew (LLNL/Bringham Young University)
+Adam Todorski (Rensselaer Polytechnic Institute)
+Nathan Weeks (Iowa State University)
+Tim Wickberg (Rensselaer Polytechnic Institute)
+Ramiro Brito Willmersdorf (Universidade Federal de Pemambuco, Brazil)
+Jay Windley (Linux NetworX)
+Anne-Marie Wunderlin (Bull)
 
 CODE-OCEC-09-009. All rights reserved.
 
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 Please also read the supplied file: DISCLAIMER.
 
 SLURM is free software; you can redistribute it and/or modify it under
diff --git a/META b/META
index ea82c67c3..2ec6e12d1 100644
--- a/META
+++ b/META
@@ -1,11 +1,11 @@
   Api_age:       0
-  Api_current:   22
+  Api_current:   23
   Api_revision:  0
   Major:         2
   Meta:          1
-  Micro:         7
-  Minor:         2
+  Micro:         2
+  Minor:         3
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       2.2.7
+  Version:       2.3.2
diff --git a/Makefile.am b/Makefile.am
index 4048bc6df..3436f6c5d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,11 +10,12 @@ EXTRA_DIST =			\
 	etc/slurm.epilog.clean	\
 	etc/init.d.slurm	\
 	etc/init.d.slurmdbd	\
-	etc/cgroup.conf.example	\
-	etc/cgroup.release_agent \
+	etc/cgroup.conf.example \
+	etc/cgroup.release_common.example \
+	etc/cgroup_allowed_devices_file.conf.example \
 	autogen.sh		\
 	slurm.spec		\
-	README			\
+	README.rst		\
 	RELEASE_NOTES		\
 	DISCLAIMER		\
 	COPYING			\
diff --git a/Makefile.in b/Makefile.in
index adda65787..5957ece7c 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -36,7 +36,7 @@ build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
 subdir = .
-DIST_COMMON = README $(am__configure_deps) $(pkginclude_HEADERS) \
+DIST_COMMON = $(am__configure_deps) $(pkginclude_HEADERS) \
 	$(srcdir)/Makefile.am $(srcdir)/Makefile.in \
 	$(srcdir)/config.h.in $(srcdir)/config.xml.in \
 	$(top_srcdir)/configure \
@@ -69,6 +69,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -79,6 +80,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -178,7 +180,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -215,6 +220,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -272,6 +278,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -307,6 +314,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -369,11 +377,12 @@ EXTRA_DIST = \
 	etc/slurm.epilog.clean	\
 	etc/init.d.slurm	\
 	etc/init.d.slurmdbd	\
-	etc/cgroup.conf.example	\
-	etc/cgroup.release_agent \
+	etc/cgroup.conf.example \
+	etc/cgroup.release_common.example \
+	etc/cgroup_allowed_devices_file.conf.example \
 	autogen.sh		\
 	slurm.spec		\
-	README			\
+	README.rst		\
 	RELEASE_NOTES		\
 	DISCLAIMER		\
 	COPYING			\
diff --git a/NEWS b/NEWS
index b7e40afd9..a8acda33b 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,494 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 2.3.2
+========================
+ -- Add configure option of "--without-rpath" which builds SLURM tools without
+    the rpath option, which will work if Munge and BlueGene libraries are in
+    the default library search path and make system updates easier.
+ -- Fixed issue where if a job ended with ESLURMD_UID_NOT_FOUND and
+    ESLURMD_GID_NOT_FOUND where slurm would be a little over zealous
+    in treating missing a GID or UID as a fatal error.
+ -- Backfill scheduling - Add SchedulerParameters configuration parameter of
+    "bf_res" to control the resolution in the backfill scheduler's data about
+    when jobs begin and end. Default value is 60 seconds (used to be 1 second).
+ -- Cray - Remove the "family" specification from the GPU reservation request.
+ -- Updated set_oomadj.c, replacing deprecated oom_adj reference with
+    oom_score_adj
+ -- Fix resource allocation bug, generic resources allocation was ignoring the
+    job's ntasks_per_node and cpus_per_task parameters. Patch from Carles
+    Fenoy, BSC.
+ -- Avoid orphan job step if slurmctld is down when a job step completes.
+ -- Fix Lua link order, patch from Pär Andersson, NSC.
+ -- Set SLURM_CPUS_PER_TASK=1 when user specifies --cpus-per-task=1.
+ -- Fix for fatal error managing GRES. Patch by Carles Fenoy, BSC.
+ -- Fixed race condition when using the DBD in accounting where if a job
+    wasn't started at the time the eligible message was sent but started
+    before the db_index was returned information like start time would be lost.
+ -- Fix issue in accounting where normalized shares could be updated
+    incorrectly when getting fairshare from the parent.
+ -- Fixed if not enforcing associations  but want QOS support for a default
+    qos on the cluster to fill that in correctly.
+ -- Fix in select/cons_res for "fatal: cons_res: sync loop not progressing"
+    with some configurations and job option combinations.
+ -- BLUEGNE - Fixed issue with handling HTC modes and rebooting.
+
+* Changes in SLURM 2.3.1
+========================
+ -- Do not remove the backup slurmctld's pid file when it assumes control, only
+    when it actually shuts down. Patch from Andriy Grytsenko (Massive Solutions
+    Limited).
+ -- Avoid clearing a job's reason from JobHeldAdmin or JobHeldUser when it is
+    otherwise updated using scontrol or sview commands. Patch based upon work
+    by Phil Eckert (LLNL).
+ -- BLUEGENE - Fix for if changing the defined blocks in the bluegene.conf and
+    jobs happen to be running on blocks not in the new config.
+ -- Many cosmetic modifications to eliminate warning message from GCC version
+    4.6 compiler.
+ -- Fix for sview reservation tab when finding correct reservation.
+ -- Fix for handling QOS limits per user on a reconfig of the slurmctld.
+ -- Do not treat the absence of a gres.conf file as a fatal error on systems
+    configured with GRES, but set GRES counts to zero.
+ -- BLUEGENE - Update correctly the state in the reason of a block if an
+    admin sets the state to error.
+ -- BLUEGENE - handle reason of blocks in error more correctly between
+    restarts of the slurmctld.
+ -- BLUEGENE - Fix minor potential memory leak when setting block error reason.
+ -- BLUEGENE - Fix if running in Static/Overlap mode and full system block
+    is in an error state, won't deny jobs.
+ -- Fix for accounting where your cluster isn't numbered in counting order
+    (i.e. 1-9,0 instead of 0-9).  The bug would cause 'sacct -N nodename' to
+    not give correct results on these systems.
+ -- Fix to GRES allocation logic when resources are associated with specific
+    CPUs on a node. Patch from Steve Trofinoff, CSCS.
+ -- Fix bugs in sched/backfill with respect to QOS reservation support and job
+    time limits. Patch from Alejandro Lucero Palau (Barcelona Supercomputer
+    Center).
+ -- BGQ - fix to set up corner correctly for sub block jobs.
+ -- Major re-write of the CPU Management User and Administrator Guide (web
+    page) by Martin Perry, Bull.
+ -- BLUEGENE - If removing blocks from system that once existed cleanup of old
+    block happens correctly now.
+ -- Prevent slurmctld crashing with configuration of MaxMemPerCPU=0.
+ -- Prevent job hold by operator or account coordinator of his own job from
+    being an Administrator Hold rather than User Hold by default.
+ -- Cray - Fix for srun.pl parsing to avoid adding spaces between option and
+    argument (e.g. "-N2" parsed properly without changing to "-N 2").
+ -- Major updates to cgroup support by Mark Grondona (LLNL) and Matthieu
+    Hautreux (CEA) and Sam Lang. Fixes timing problems with respect to the
+    task_epilog. Allows cgroup mount point to be configurable. Added new
+    configuration parameters MaxRAMPercent and MaxSwapPercent. Allow cgroup
+    configuration parameters that are precentages to be floating point.
+ -- Fixed issue where sview wasn't displaying correct nice value for jobs.
+ -- Fixed issue where sview wasn't displaying correct min memory per node/cpu
+    value for jobs.
+ -- Disable some SelectTypeParameters for select/linear that aren't compatible.
+ -- Move slurm_select_init to proper place to avoid loading multiple select
+    plugins in the slurmd.
+ -- BGQ - Include runjob_plugin.so in the bluegene rpm.
+ -- Report correct job "Reason" if needed nodes are DOWN, DRAINED, or
+    NOT_RESPONDING, "Resources" rather than "PartitionNodeLimit".
+ -- BLUEGENE - Fixed issues with running on a sub-midplane system.
+ -- Added some missing calls to allow older versions of SLURM to talk to newer.
+ -- BGQ - allow steps to be ran.
+ -- Do not attempt to run HeathCheckProgram on powered down nodes. Patch from
+    Ramiro Alba, Centre Tecnològic de Tranferència de Calor, Spain.
+
+* Changes in SLURM 2.3.0-2
+==========================
+ -- Fix for memory issue inside sview.
+ -- Fix issue where if a job was pending and the slurmctld was restarted a
+    variable wasn't initialized in the job structure making it so that job
+    wouldn't run.
+
+* Changes in SLURM 2.3.0
+========================
+ -- BLUEGENE - make sure we only set the jobinfo_select start_loc on a job
+    when we are on a small block, not a regular one.
+ -- BGQ - fix issue where not copying the correct amount of memory.
+ -- BLUEGENE - fix clean start if jobs were running when the slurmctld was
+    shutdown and then the system size changed.  This would probably only happen
+    if you were emulating a system.
+ -- Fix sview for calling a cray system from a non-cray system to get the
+    correct geometry of the system.
+ -- BLUEGENE - fix to correctly import pervious version of block state file.
+ -- BLUEGENE - handle loading better when doing a clean start with static
+    blocks.
+ -- Add sinfo format and sort option "%n" for NodeHostName and "%o" for
+    NodeAddr.
+ -- If a job is deferred due to partition limits, then re-test those limits
+    after a partition is modified. Patch from Don Lipari.
+ -- Fix bug which would crash slurmcld if job's owner (not root) tries to clear
+    a job's licenses by setting value to "".
+ -- Cosmetic fix for printing out debug info in the priority plugin.
+ -- In sview when switching from a bluegene machine to a regular linux cluster
+    and vice versa the node->base partition lists will be displayed if setup
+    in your .slurm/sviewrc file.
+ -- BLUEGENE - Fix for creating full system static block on a BGQ system.
+ -- BLUEGENE - Fix deadlock issue if toggling between Dynamic and Static block
+    allocation with jobs running on blocks that don't exist in the static
+    setup.
+ -- BLUEGENE - Modify code to only give HTC states to BGP systems and not
+    allow them on Q systems.
+ -- BLUEGENE - Make it possible for an admin to define multiple dimension
+    conn_types in a block definition.
+ -- BGQ - Alter tools to output multiple dimensional conn_type.
+
+* Changes in SLURM 2.3.0.rc2
+============================
+ -- With sched/wiki or sched/wiki2 (Maui or Moab scheduler), insure that a
+    requeued job's priority is reset to zero.
+ -- BLUEGENE - fix to run steps correctly in a BGL/P emulated system.
+ -- Fixed issue where if there was a network issue between the slurmctld and
+    the DBD where both remained up but were disconnected the slurmctld would
+    get registered again with the DBD.
+ -- Fixed issue where if the DBD connection from the ctld goes away because of
+    a POLLERR the dbd_fail callback is called.
+ -- BLUEGENE - Fix to smap command-line mode display.
+ -- Change in GRES behavior for job steps: A job step's default generic
+    resource allocation will be set to that of the job. If a job step's --gres
+    value is set to "none" then none of the generic resources which have been
+    allocated to the job will be allocated to the job step.
+ -- Add srun environment value of SLURM_STEP_GRES to set default --gres value
+    for a job step.
+ -- Require SchedulerTimeSlice configuration parameter to be at least 5 seconds
+    to avoid thrashing slurmd daemon.
+ -- Cray - Fix to make nodes state in accounting consistent with state set by
+    ALPS.
+ -- Cray - A node DOWN to ALPS will be marked DOWN to SLURM only after reaching
+    SlurmdTimeout. In the interim, the node state will be NO_RESPOND. This
+    change makes behavior makes SLURM handling of the node DOWN state more
+    consistent with ALPS. This change effects only Cray systems.
+ -- Cray - Fix to work with 4.0.* instead of just 4.0.0
+ -- Cray - Modify srun/aprun wrapper to map --exclusive to -F exclusive and
+    --share to -F share. Note this does not consider the partition's Shared
+    configuration, so it is an imperfect mapping of options.
+ -- BLUEGENE - Added notice in the print config to tell if you are emulated
+    or not.
+ -- BLUEGENE - Fix job step scalability issue with large task count.
+ -- BGQ - Improved c-node selection when asked for a sub-block job that
+    cannot fit into the available shape.
+ -- BLUEGENE - Modify "scontrol show step" to show  I/O nodes (BGL and BGP) or
+    c-nodes (BGQ) allocated to each step. Change field name from "Nodes=" to
+    "BP_List=".
+ -- Code cleanup on step request to get the correct select_jobinfo.
+ -- Memory leak fixed for rolling up accounting with down clusters.
+ -- BGQ - fix issue where if first job step is the entire block and then the
+    next parallel step is ran on a sub block, SLURM won't over subscribe cnodes.
+ -- Treat duplicate switch name in topology.conf as fatal error. Patch from Rod
+    Schultz, Bull
+ -- Minor update to documentation describing the AllowGroups option for a
+    partition in the slurm.conf.
+ -- Fix problem with _job_create() when not using qos's.  It makes
+    _job_create() consistent with similar logic in select_nodes().
+ -- GrpCPURunMins in a QOS flushed out.
+ -- Fix for squeue -t "CONFIGURING" to actually work.
+ -- CRAY - Add cray.conf parameter of SyncTimeout, maximum time to defer job
+    scheduling if SLURM node or job state are out of synchronization with ALPS.
+ -- If salloc was run as interactive, with job control, reset the foreground
+    process group of the terminal to the process group of the parent pid before
+    exiting. Patch from Don Albert, Bull.
+ -- BGQ - set up the corner of a sub block correctly based on a relative
+    position in the block instead of absolute.
+ -- BGQ - make sure the recently added select_jobinfo of a step launch request
+    isn't sent to the slurmd where environment variables would be overwritten
+    incorrectly.
+
+* Changes in SLURM 2.3.0.rc1
+============================
+ -- NOTE THERE HAVE BEEN NEW FIELDS ADDED TO THE JOB AND PARTITION STATE SAVE
+    FILES AND RPCS. PENDING AND RUNNING JOBS WILL BE LOST WHEN UPGRADING FROM
+    EARLIER VERSION 2.3 PRE-RELEASES AND RPCS WILL NOT WORK WITH EARLIER
+    VERSIONS.
+ -- select/cray: Add support for Accelerator information including model and
+    memory options.
+ -- Cray systems: Add support to suspend/resume salloc command to insure that
+    aprun does not get initiated when the job is suspended. Processes suspended
+    and resumed are determined by using process group ID and parent process ID,
+    so some processes may be missed. Since salloc runs as a normal user, it's
+    ability to identify processes associated with a job is limited.
+ -- Cray systems: Modify smap and sview to display all nodes even if multiple
+    nodes exist at each coordinate.
+ -- Improve efficiency of select/linear plugin with topology/tree plugin
+    configured, Patch by Andriy Grytsenko (Massive Solutions Limited).
+ -- For front-end architectures on which job steps are run (emulated Cray and
+    BlueGene systems only), fix bug that would free memory still in use.
+ -- Add squeue support to display a job's license information. Patch by Andy
+    Roosen (University of Deleware).
+ -- Add flag to the select APIs for job suspend/resume indicating if the action
+    is for gang scheduling or an explicit job suspend/resume by the user. Only
+    an explicit job suspend/resume will reset the job's priority and make
+    resources exclusively held by the job available to other jobs.
+ -- Fix possible invalid memory reference in sched/backfill. Patch by Andriy
+    Grytsenko (Massive Solutions Limited).
+ -- Add select_jobinfo to the task launch RPC. Based upon patch by Andriy
+    Grytsenko (Massive Solutions Limited).
+ -- Add DefMemPerCPU/Node and MaxMemPerCPU/Node to partition configuration.
+    This improves flexibility when gang scheduling only specific partitions.
+ -- Added new enums to print out when a job is held by a QOS instead of an
+    association limit.
+ -- Enhancements to sched/backfill performance with select/cons_res plugin.
+    Patch from Bjørn-Helge Mevik, University of Oslo.
+ -- Correct job run time reported by smap for suspended jobs.
+ -- Improve job preemption logic to avoid preempting more jobs than needed.
+ -- Add contribs/arrayrun tool providing support for job arrays. Contributed by
+    Bjørn-Helge Mevik, University of Oslo. NOTE: Not currently packaged as RPM
+    and manual file editing is required.
+ -- When suspending a job, wait 2 seconds instead of 1 second between sending
+    SIGTSTP and SIGSTOP. Some MPI implementation were not stopping within the
+    1 second delay.
+ -- Add support for managing devices based upon Linux cgroup container. Based
+    upon patch by Yiannis Georgiou, Bull.
+ -- Fix memory buffering bug if a AllowGroups parameter of a partition has 100
+    or more users. Patch by Andriy Grytsenko (Massive Solutions Limited).
+ -- Fix bug in generic resource tracking of gres associated with specific CPUs.
+    Resources were being over-allocated.
+ -- On systems with front-end nodes (IBM BlueGene and Cray) limit batch jobs to
+    only one CPU of these shared resources.
+ -- Set SLURM_MEM_PER_CPU or SLURM_MEM_PER_NODE environment variables for both
+    interactive (salloc) and batch jobs if the job has a memory limit. For Cray
+    systems also set CRAY_AUTO_APRUN_OPTIONS environment variable with the
+    memory limit.
+ -- Fix bug in select/cons_res task distribution logic when tasks-per-node=0.
+    Patch from Rod Schultz, Bull.
+ -- Restore node configuration information (CPUs, memory, etc.) for powered
+    down when slurmctld daemon restarts rather than waiting for the node to be
+    restored to service and getting the information from the node (NOTE: Only
+    relevent if FastSchedule=0).
+ -- For Cray systems with the srun2aprun wrapper, rebuild the srun man page
+    identifying the srun optioins which are valid on that system.
+ -- BlueGene: Permit users to specify a separate connection type for each
+    dimension (e.g. "--conn-type=torus,mesh,torus").
+ -- Add the ability for a user to limit the number of leaf switches in a job's
+    allocation using the --switch option of salloc, sbatch and srun. There is
+    also a new SchedulerParameters value of max_switch_wait, which a SLURM
+    administrator can used to set a maximum job delay and prevent a user job
+    from blocking lower priority jobs for too long. Based on work by Rod
+    Schultz, Bull.
+
+* Changes in SLURM 2.3.0.pre6
+=============================
+ -- NOTE: THERE HAS BEEN A NEW FIELD ADDED TO THE CONFIGURATION RESPONSE RPC
+    AS SHOWN BY "SCONTROL SHOW CONFIG". THIS FUNCTION WILL ONLY WORK WHEN THE
+    SERVER AND CLIENT ARE BOTH RUNNING SLURM VERSION 2.3.0.pre6
+ -- Modify job expansion logic to support licenses, generic resources, and
+    currently running job steps.
+ -- Added an rpath if using the --with-munge option of configure.
+ -- Add support for multiple sets of DEFAULT node, partition, and frontend
+    specifications in slurm.conf so that default values can be changed mulitple
+    times as the configuration file is read.
+ -- BLUEGENE - Improved logic to place small blocks in free space before freeing
+    larger blocks.
+ -- Add optional argument to srun's --kill-on-bad-exit so that user can set
+    its value to zero and override a SLURM configuration parameter of
+    KillOnBadExit.
+ -- Fix bug in GraceTime support for preempted jobs that prevented proper
+    operation when more than one job was being preempted. Based on patch from
+    Bill Brophy, Bull.
+ -- Fix for running sview from a non-bluegene cluster to a bluegene cluster.
+    Regression from pre5.
+ -- If job's TMPDIR environment is not set or is not usable, reset to "/tmp".
+    Patch from Andriy Grytsenko (Massive Solutions Limited).
+ -- Remove logic for defunct RPC: DBD_GET_JOBS.
+ -- Propagate DebugFlag changes by scontrol to the plugins.
+ -- Improve accuracy of REQUEST_JOB_WILL_RUN start time with respect to higher
+    priority pending jobs.
+ -- Add -R/--reservation option to squeue command as a job filter.
+ -- Add scancel support for --clusters option.
+ -- Note that scontrol and sprio can only support a single cluster at one time.
+ -- Add support to salloc for a new environment variable SALLOC_KILL_CMD.
+ -- Add scontrol ability to increment or decrement a job or step time limit.
+ -- Add support for SLURM_TIME_FORMAT environment variable to control time
+    stamp output format. Work by Gerrit Renker, CSCS.
+ -- Fix error handling in mvapich plugin that could cause srun to enter an
+    infinite loop under rare circumstances.
+ -- Add support for multiple task plugins. Patch from Andriy Grytsenko (Massive
+    Solutions Limited).
+ -- Addition of per-user node/cpu limits for QOS's. Patch from Aaron Knister,
+    UMBC.
+ -- Fix logic for multiple job resize operations.
+ -- BLUEGENE - many fixes to make things work correctly on a L/P system.
+ -- Fix bug in layout of job step with --nodelist option plus node count. Old
+    code could allocate too few nodes.
+
+* Changes in SLURM 2.3.0.pre5
+=============================
+ -- NOTE: THERE HAS BEEN A NEW FIELD ADDED TO THE JOB STATE FILE. UPGRADES FROM
+    VERSION 2.3.0-PRE4 WILL RESULT IN LOST JOBS UNLESS THE "orig_dependency"
+    FIELD IS REMOVED FROM JOB STATE SAVE/RESTORE LOGIC. ON CRAY SYSTEMS A NEW
+    "confirm_cookie" FIELD WAS ADDED AND HAS THE SAME EFFECT OF DISABLING JOB
+    STATE RESTORE.
+ -- BLUEGENE - Improve speed of start up when removing blocks at the beginning.
+ -- Correct init.d/slurm status to have non-zero exit code if ANY Slurm
+    damon that should be running on the node is not running. Patch from Rod
+    Schulz, Bull.
+ -- Improve accuracy of response to "srun --test-only jobid=#".
+ -- Fix bug in front-end configurations which reports job_cnt_comp underflow
+    errors after slurmctld restarts.
+ -- Eliminate "error from _trigger_slurmctld_event in backup.c" due to lack of
+    event triggers.
+ -- Fix logic in BackupController to properly recover front-end node state and
+    avoid purging active jobs.
+ -- Added man pages to html pages and the new cpu_management.html page.
+    Submitted by Martin Perry / Rod Schultz, Bull.
+ -- Job dependency information will only show the currently active dependencies
+    rather than the original dependencies. From Dan Rusak, Bull.
+ -- Add RPCs to get the SPANK environment variables from the slurmctld daemon.
+    Patch from Andrej N. Gritsenko.
+ -- Updated plugins/task/cgroup/task_cgroup_cpuset.c to support newer
+    HWLOC_API_VERSION.
+ -- Do not build select/bluegene plugin if C++ compiler is not installed.
+ -- Add new configure option --with-srun2aprun to build an srun command
+    which is a wrapper over Cray's aprun command and supports many srun
+    options. Without this option, the srun command will advise the user
+    to use the aprun command.
+ -- Change container ID supported by proctrack plugin from 32-bit to 64-bit.
+ -- Added contribs/cray/libalps_test_programs.tar.gz with tools to validate
+    SLURM's logic used to support Cray systems.
+ -- Create RPM for srun command that is a wrapper for the Cray/ALPS aprun
+    command. Dependent upon .rpmmacros parameter of "%_with_srun2aprun".
+ -- Add configuration parameter MaxStepCount to limit effect of bad batch
+    scripts.
+ -- Moving to github
+ -- Fix for handling a 2.3 system talking to a 2.2 slurmctld.
+ -- Add contribs/lua/job_submit.license.lua script. Update job_submit and Lua
+    related documentation.
+ -- Test if _make_batch_script() is called with a NULL script.
+ -- Increase hostlist support from 24k to 64k nodes.
+ -- Renamed the Accounting Storage database's "DerivedExitString" job field to
+    "Comment".  Provided backward compatible support for "DerivedExitString" in
+    the sacctmgr tool.
+ -- Added the ability to save the job's comment field to the Accounting
+    Storage db (to the formerly named, "DerivedExitString" job field).  This
+    behavior is enabled by a new slurm.conf parameter:
+    AccountingStoreJobComment.
+ -- Test if _make_batch_script() is called with a NULL script.
+ -- Increase hostlist support from 24k to 64k nodes.
+ -- Fix srun to handle signals correctly when waiting for a step creation.
+ -- Preserve the last job ID across slurmctld daemon restarts even if the job
+    state file can not be fully recovered.
+ -- Made the hostlist functions be able to arbitrarily handle any size
+    dimension no matter what the size of the cluster is in dimensions.
+
+* Changes in SLURM 2.3.0.pre4
+=============================
+ -- Add GraceTime to Partition and QOS data structures. Preempted jobs will be
+    given this time interval before termination. Work by Bill Brophy, Bull.
+ -- Add the ability for scontrol and sview to modify slurmctld DebugFlags
+    values.
+ -- Various Cray-specific patches:
+    - Fix a bug in distinguishing XT from XE.
+    - Avoids problems with empty nodenames on Cray.
+    - Check whether ALPS is hanging on to nodes, which happens if ALPS has not
+      yet cleaned up the node partition.
+    - Stops select/cray from clobbering node_ptr->reason.
+    - Perform 'safe' release of ALPS reservations using inventory and apkill.
+    - Compile-time sanity check for the apbasil and apkill files.
+    - Changes error handling in do_basil_release() (called by
+      select_g_job_fini()).
+    - Warn that salloc --no-shell option is not supported on Cray systems.
+ -- Add a reservation flag of "License_Only". If set, then jobs using the
+    reservation may use the licenses associated with it plus any compute nodes.
+    Otherwise the job is limited to the compute nodes associated with the
+    reservation.
+ -- Change slurm.conf node configuration parameter from "Procs" to "CPUs".
+    Both parameters will be supported for now.
+ -- BLUEGENE - fix for when user requests only midplane names with no count at
+    job submission time to process the node count correctly.
+ -- Fix job step resource allocation problem when both node and tasks counts
+    are specified. New logic selects nodes with larger CPU counts as needed.
+ -- BGQ - make it so srun wraps runjob (still under construction, but works
+    for most cases)
+ -- Permit a job's QOS and Comment field to both change in a single RPC. This
+    was previously disabled since Moab stored the QOS within the Comment field.
+ -- Add support for jobs to expand in size. Submit additional batch job with
+    the option "--dependency=expand:<jobid>". See web page "faq.html#job_size"
+    for details. Restrictions to be removed in the future.
+ -- Added --with-alps-emulation to configure, and also an optional cray.conf
+    to setup alps location and database information.
+ -- Modify PMI data types from 16-bits to 32-bits in order to support MPICH2
+    jobs with more than 65,536 tasks. Patch from Hongjia Cao, NUDT.
+ -- Set slurmd's soft process CPU limit equal to it's hard limit and notify the
+    user if the limit is not infinite.
+ -- Added proctrack/cgroup and task/cgroup plugins from Matthieu Hautreux, CEA.
+ -- Fix slurmctld restart logic that could leave nodes in UNKNOWN state for a
+    longer time than necessary after restart.
+
+* Changes in SLURM 2.3.0.pre3
+=============================
+ -- BGQ - Appears to work correctly in emulation mode, no sub blocks just yet.
+ -- Minor typos fixed
+ -- Various bug fixes for Cray systems.
+ -- Fix bug that when setting a compute node to idle state, it was failing to
+    set the systems up_node_bitmap.
+ -- BLUEGENE - code reorder
+ -- BLUEGENE - Now only one select plugin for all Bluegene systems.
+ -- Modify srun to set the SLURM_JOB_NAME environment variable when srun is
+    used to create a new job allocation. Not set when srun is used to create a
+    job step within an existing job allocation.
+ -- Modify init.d/slurm script to start multiple slurmd daemons per compute
+    node if so configured. Patch from Matthieu Hautreux, CEA.
+ -- Change license data structure counters from uint16_t to uint32_t to support
+    larger license counts.
+
+* Changes in SLURM 2.3.0.pre2
+=============================
+ -- Log a job's requeue or cancellation due to preemption to that job's stderr:
+    "*** JOB 65547 CANCELLED AT 2011-01-21T12:59:33 DUE TO PREEMPTION ***".
+ -- Added new job termination state of JOB_PREEMPTED, "PR" or "PREEMPTED" to
+    indicate job termination was due to preemption.
+ -- Optimize advanced reservations resource selection for computer topology.
+    The logic has been added to select/linear and select/cons_res, but will
+    not be enabled until the other select plugins are modified.
+ -- Remove checkpoint/xlch plugin.
+ -- Disable deletion of partitions that have unfinished jobs (pending,
+    running or suspended states). Patch from Martin Perry, BULL.
+ -- In sview, disable the sorting of node records by name at startup for
+    clusters over 1000 nodes. Users can enable this by selecting the "Name"
+    tab. This change dramatically improves scalability of sview.
+ -- Report error when trying to change a node's state from scontrol for Cray
+    systems.
+ -- Do not attempt to read the batch script for non-batch jobs. This patch
+    eliminates some inappropriate error messages.
+ -- Preserve NodeHostName when reordering nodes due to system topology.
+ -- On Cray/ALPS systems  do node inventory before scheduling jobs.
+ -- Disable some salloc options on Cray systems.
+ -- Disable scontrol's wait_job command on Cray systems.
+ -- Disable srun command on native Cray/ALPS systems.
+ -- Updated configure option "--enable-cray-emulation" (still under
+    development) to emulate a cray XT/XE system, and auto-detect a real
+    Cray XT/XE systems (removed no longer needed --enable-cray configure
+    option).  Building on native Cray systems requires the
+    cray-MySQL-devel-enterprise rpm and expat XML parser library/headers.
+
+* Changes in SLURM 2.3.0.pre1
+=============================
+ -- Added that when a slurmctld closes the connection to the database it's
+    registered host and port are removed.
+ -- Added flag to slurmdbd.conf TrackSlurmctldDown where if set will mark idle
+    resources as down on a cluster when a slurmctld disconnects or is no
+    longer reachable.
+ -- Added support for more than one front-end node to run slurmd on
+    architectures where the slurmd does not execute on the compute nodes
+    (e.g. BlueGene). New configuration parameters FrontendNode and FrontendAddr
+    added. See "man slurm.conf" for more information.
+ -- With the scontrol show job command when using the --details option, show
+    a batch job's script.
+ -- Add ability to create reservations or partitions and submit batch jobs
+    using sview. Also add the ability to delete reservations and partitions.
+ -- Added new configuration parameter MaxJobId. Once reached, restart job ID
+    values at FirstJobId.
+ -- When restarting slurmctld with priority/basic, increment all job priorities
+    so the highest job priority becomes TOP_PRIORITY.
+
+* Changes in SLURM 2.2.8
+========================
+ -- Prevent background salloc disconnecting terminal at termination. Patch by
+    Don Albert, Bull.
+ -- Fixed issue where preempt mode is skipped when creating a QOS. Patch by
+    Bill Brophy, Bull.
+ -- Fixed documention (html) for PriorityUsageResetPeriod to match that in the
+    man pages. Patch by Nancy Kritkausky, Bull.
 
 * Changes in SLURM 2.2.7
 ========================
@@ -81,6 +569,8 @@ documents those changes that are of interest to users and admins.
  -- Improve backfill scheduling logic when job specifies --ntasks-per-node and
     --mem-per-cpu options on a heterogeneous cluster. Patch from Bjorn-Helge
     Mevik, University of Oslo.
+ -- Print warning message if srun specifies --cpus-per-task larger than used
+    to create job allocation.
  -- Fix issue when changing a users name in accounting, if using wckeys would
     execute correctly, but bad memcopy would core the DBD.  No information
     would be lost or corrupted, but you would need to restart the DBD.
@@ -1594,4739 +2084,3 @@ documents those changes that are of interest to users and admins.
  -- Replaced many calls to getpwuid() with reentrant uid_to_string()
  -- The slurmstepd will now refresh it's log file handle on a reconfig,
     previously if a log was rolled any output from the stepd was lost.
-
-* Changes in SLURM 2.1.0-pre9
-=============================
- -- Added the "scontrol update SlurmctldDebug" as the preferred alternative to
-    the "scontrol setdebug" command.
- -- BLUEGENE - made it so when removing a block in an error state the nodes in
-    the block are set correctly in accounting as not in error.
- -- Fixed issue where if slurmdbd is not up qos' are set up correctly for
-    associations off of cache.
- -- scontrol, squeue, sview all display the correct node, cpu count along with
-    correct corresponding nodelist on completing jobs.
- -- Patch (Mark Grondona) fixes serious security vulnerability in SLURM in
-    the spank_job_env functionality.
- -- Improve spank_job_env interface and documentation
- -- Add ESPANK_NOT_LOCAL error code to spank_err_t
- -- Made the #define DECAY_INTERVAL used in the priority/multifactor plugin
-    a slurm.conf variable (PriorityCalcPeriod)
- -- Added new macro SLURM_VERSION for use in autoconf scripts to determine
-    current version of slurm installed on system when building against the api.
- -- Patch from Matthieu Hautreux that adds an entry into the error file when
-    a job or step receives a TERM or KILL signal.
- -- Make it so env var SLURM_SRUN_COMM_HOST is overwritten if already in 
-    existence in the slurmd.
-
-* Changes in SLURM 2.1.0-pre8
-=============================
- -- Rearranged the "scontrol show job" output into functional groupings
- -- Change the salloc/sbatch/srun -P option to -d (dependency)
- -- Removed the srun -d option; must use srun --slurmd-debug instead
- -- When running the mysql plugin natively MUNGE errors are now eliminated 
-    when sending updates to slurmctlds.
- -- Check to make sure we have a default account before looking to 
-    fill in default association. 
- -- Accounting - Slurmctld and slurmdbd will now set uids of users which were 
-    created after the start of the daemons on reconfig.  Slurmdbd will 
-    attempt to set previously non-existant uids every hour.
- -- Patch from Aaron Knister and Mark Grondona, to parse correctly quoted 
-    #SBATCH options in a batch script.
- -- job_desc_msg_t - in, out, err have been changed to std_in, std_out, 
-    and std_err respectfully.  Needed for PySLURM, since Python sees (in) 
-    as a keyword.
- -- Changed the type of addr to struct sockaddr_in in _message_socket_accept()
-    in sattach.c, step_launch.c, and allocate_msg.c, and moved the function 
-    into a common place for all the calls since the code was very similar.
- -- proctrack/lua support has been added see contribs/lua/protrack.lua
- -- replaced local gtk m4 test with AM_PATH_GTK_2_0
- -- changed AC_CHECK_LIB to AC_SEARCH_LIBS to avoid extra libs in
-    compile lines.
- -- Patch from Matthieu Hautreux to improve error message in slurmd/req.c
- -- Added support for split groups from (Matthiu Hautreux CEA)
- -- Patch from Mark Grondona to move blcr scripts into pkglibexecdir
- -- Patch from Doug Parisek to calculate a job's projected start time under the
-    builtin scheduler.
- -- Removed most global variables out of src/common/jobacct_common.h
-
-* Changes in SLURM 2.1.0-pre7
-=============================
- -- BLUEGENE - make 2.1 run correctly on a real bluegene cluster
- -- sacctmgr - Display better debug for when an admin specifies a non-existant 
-    parent account when changing parent accounts.
- -- Added a mechanism to the slurmd to defer the epilog from starting until
-    after a running prolog has finished.
- -- If a node reboots inbetween checking status the node is marked down unless 
-    ReturnToService=2
- -- Added -R option to slurmctld to recover partition state also when 
-    restarting or reconfiguring.
- 
-* Changes in SLURM 2.1.0-pre6
-=============================
- -- When getting information about nodes in hidden partitions, return a node
-    name of NULL rather than returning no information about the node so that 
-    node index information is still valid.
- -- When querying database for jobs in certain state and a time period is 
-    given only jobs in that state during the period will be returned,
-    previously if a time period was given in sacct jobs eligible to run or 
-    running would be displayed, which is still the default if no states are 
-    requested.
- -- One can now query jobs based on size (nodes and or cpus) (mysql plugin only)
- -- Applied patch from Mark Grondona that tests for a missing config file before
-    any other processing in spank_init().  This now prevents fatal errors from
-    being mistakenly treated as recoverable.
- -- --enable-debug no longer has to be stated at configure time to have 
-    the slurmctld or slurmstepd dump core on a seg fault.
- -- Moved the errant slurm_job_node_ready() declaration from job_info.h to
-    slurm.h and deleted job_info.h.
- -- Added the slurm_job_cpus_allocated_on_node_id() 
-    slurm_job_cpus_allocated_on_node() API for working with the 
-    job_resources_t structure.
- -- BLUEGENE - speed up start up for systems that have many blocks (100+)
-    configured on the system.
-
-* Changes in SLURM 2.1.0-pre5
-=============================
- -- Add squeue option "--start" to report expected start time of pending jobs.
- -- Sched/backfill plugin modified to set expected start time of pending jobs.
- -- Add SchedulerParameters option of "max_job_bf=#" to control how far down
-    the queue of pending jobs that SLURM searches in an attempt backfill 
-    schedule them. The default value is 50 jobs.
- -- Fixed cause of squeue -o "%C" seg fault.
- -- Add -"-signal=<int>@<time>" option to salloc, sbatch and srun commands to
-    notify programs before reaching the end of their time limit.
- -- Add scontrol option to update a running job's EndTime (also resets the 
-    job's time limit).
- -- Add new job wait reason, ReqNodeNotAvail: Required node is not available 
-    (down or drained).
- -- Log when slurmctld or slurmd are started with small core file limit.
- -- Permit job's owner to change features, processor count, minimum and 
-    maximun node counts of pending jobs (the operation was previously 
-    restricted to user root)
- -- Applied patch from Chuck Clouston for scontrol man page with clarifications
-    and additional info
- -- Change slurm errno name from ESLURM_TOO_MANY_REQUESTED_NODES to 
-    ESLURM_INVALID_NODE_COUNT to better reflect its meaning.
- -- Fix bug in sched/backfill which could result in invalid memory reference
-    when trying to schedule jobs submitted with --exclude option.
- -- Fix for slurmctld deadlock at startup with PreemptMode=SUSPEND,GANG.
- -- Added preemption plugins to RPM.
- -- Completely disable logging of sched/wiki and sched/wiki2 (Maui & Moab) 
-    message traffic unless DebugFlag=Wiki is configured.
- -- Change scontrol show job info: ReqProcs (number of processors requested) 
-    is replaced by NumProcs (number of processors requested or actually 
-    allocated) and ReqNodes (number of nodes requested) is replaced by 
-    NumNodes (number of nodes requested or actually allocated).
- -- Fixed issue when max nodes wasn't specified and was later set by limit 
-    to not request that as the actual maximum.
- -- Move job preemption (for requeue, checkpoint and kill modes only) out of
-    gang scheduling module. Make identification of preemptable jobs an argument
-    to the select_g_job_test function rather than calling preempt plugin from
-    the select plugin. Make output of srun --test-only option include a list
-    of preempted job IDs. 
- -- Better record keeping for front end systems when registering.
- -- Enable memory allocation logic for jobs step (i.e. allocate resources
-    within the job's memory allocation and enforce limits).
- -- handle error state in sinfo
- -- sview and "scontrol show config" now report as SLURM_VERSION the version 
-    of slurmctld rather than that of the command.
- -- Change SuspendTime configuration parameter from 16-bits to 32-bits.
- -- Add environment variable support to sattach, salloc, sbatch and srun
-    to permit user control over exit codes so application exit codes can be
-    distiguished from those generated by SLURM. SLURM_EXIT_ERROR specifies the
-    exit code when a SLURM error occurs. SLURM_EXIT_IMMEDIATE specifies the 
-    exit code when the --immediate option is specified and resources are not
-    available. Any other non-zero exit code would be that of the application
-    run by SLURM.
- -- Added a Quality of Service (QOS) html page.
- -- In sched/wiki2, JOBWILLRUN command, add support for identification of 
-    preemptable and preempted jobs (both new and old format of commands are
-    supported).
- -- Remove contribs/python/hostlist files. Download the materials as needed
-    directly from http://www.nsc.liu.se/~kent/python-hostlist.
- -- BLUEGENE - Preemption now works on bluegene systems
- -- For salloc, sbatch and srun commands, ignore _maximum_ values for
-    --sockets-per-node, --cores-per-socket and --threads-per-core options.
-    Remove --mincores, --minsockets, --minthreads options (map them to 
-    minimum values of -sockets-per-node, --cores-per-socket and 
-    --threads-per-core for now).
-
-* Changes in SLURM 2.1.0-pre4
-=============================
- -- Move processing of node configuration information in slurm.conf and 
-    topology information in topology.conf from slurmctld into common and load 
-    that information into slurmd. Use it to set environment variables for jobs
-    SLURM_TOPOLOGY_ADDR and SLURM_TOPOLOGY_ADDR_PATTERN describing the network
-    topology for each task. Based upon patch from Mattheu Hautreux (CEA).
- -- Correction in computing a job's TotalProcs value when ThreadsPerCore>1 and
-    allocating by cores or sockets.
-
-* Changes in SLURM 2.1.0-pre3
-=============================
- -- Removed sched/gang plugin and moved the logic directly into the slurmctld
-    daemon so that job preemption and gang scheduling can be used with the
-    sched/backfill plugin. Added configuration parameter:
-    PreemptMode=gang|off|suspend|cancel|requeue|checkpoint 
-    to enable/disable gang scheduling and job preemption logic (both are 
-    disabled by default).
-    (NOTE: There are some problems with memory management which could prevent a
-    job from starting when memory would be freed by a job being requeued or 
-    otherwise removed, these are being worked on)
- -- Added PreemptType configuration parameter to identify preemptable jobs.
-    Former users of SchedType=sched/gang should set SchedType=sched/backfill,
-    PreemptType=preempt/partition_prio and PreemptMode=gang,suspend. See
-    web and slurm.conf man page for other options.
-    PreemptType=preempt/qos uses Quality Of Service information in database.
- -- In select/linear, optimize job placement across partitions.
- -- If the --partition option is used with the sinfo or squeue command then
-    print information about even hidden partitions.
- -- Replaced misc cpu allocation members in job_info_t with select_job_res_t
-    which will only be populated when requested (show_flags & SHOW_DETAIL)
- -- Added a --detail option to "scontrol show job" to display the cpu/mem
-    allocation info on a node-by-node basis.
- -- Added logic to give correct request uid for individual steps that 
-    were cancelled.
- -- Created a spank_get_item() option (S_JOB_ALLOC_MEM) that conveys the memory
-    that the select/cons_res plugin has allocated to a job.
- -- BLUEGENE - blocks in error state are now handled correctly in accounting.
- -- Modify squeue to print job step information about a specific job ID using
-    the following syntax: "squeue -j <job_id> -s".
- -- BLUEGENE - scontrol delete block and update block can now remove blocks 
-    on dynamic laid out systems.
- -- BLUEGENE - Vastly improve Dynamic layout mode algorithm.
- -- Address some issues for SLURM support of Solaris.
- -- Applied patch from Doug Parisek (Doug.Parisek@bull.com) for speeding up 
-    start of sview by delaying to creation of tooltips until requested.
- -- Changed GtkToolTips to GtkToolTip for newer versions of GTK.
- -- Applied patch from Rod Schultz (Rod.Schultz@Bull.com) that eliminates
-    ambiguity in the documentation over use of the terms "CPU" and "socket".
- -- Modified get_resource_arg_range() to return full min/max values when input
-    string is null.  This fixes the srun -B option to function as documented.
- -- If the job, node, partition, reservation or trigger state file is missing 
-    or too small, automatically try using the previously saved state (file 
-    name with ".old" suffix).
- -- Set a node's power_up/configuring state flag while PrologSlurmctld is
-    running for a job allocated to that node.
- -- If PrologSlurmctld has a non-zero exit code, requeue the job or kill it.
- -- Added sacct ability to use --format NAME%LENGTH similar to sacctmgr.
- -- Improve hostlist logic for multidimensional systems.
- -- The pam_slurm Pluggable Authentication Module for SLURM previously
-    distributed separately has been moved within the main SLURM distribution
-    and is packaged as a separate RPM.
- -- Added configuration parameter MaxTasksPerNode.
- -- Remove configuration parameter SrunIOTimeout.
- -- Added functionality for sacctmgr show problems.  Current problems include
-    Accounts/Users with no associations, Accounts with no users or subaccounts
-    attached in a cluster, and Users with No UID on the system.
- -- Added new option for sacctmgr list assoc and list cluster WOLimits.  This 
-    gives a smaller default format without the limit information.  This may 
-    be the new default for list assocations and list clusters.
- -- Users are now required to have an association with there default account.
-    Sacctmgr will now complain when you try to modify a users default account
-    which they are not associated anywhere.
- -- Fix select/linear bug resulting in run_job_cnt underflow message if a 
-    suspended job is cancelled.
- -- Add test for fsync() error for state save files. Log and retry as needed.
- -- Log fatal errors from slurmd and slurmctld to syslog.
- -- Added error detection and cleanup for the case in which a compute node is 
-    rebooted and restarts its slurmd before its "down" state is noticed.
- -- BLUEGENE systems only - remove vestigal start location from jobinfo.
- -- Add reservation flag of "OVERLAP" to permit a new reservation to use
-    nodes already in another reservation.
- -- Fix so "scontrol update jobid=# nice=0" can clear previous nice value.
- -- BLUEGENE - env vars such as SLURM_NNODES, SLURM_JOB_NUM_NODES, and
-    SLURM_JOB_CPUS_PER_NODE now reference cnode counts instead of midplane
-    counts.  SLURM_NODELIST still references midplane names.
- -- Added qos support to salloc/sbatch/srun/squeue
- -- Added to scancel the ability to select jobs by account and qos
- -- Recycled the "-A" argument indicate "account" for all the commands that
-    accept the --account argument (srun -A to allocate is no longer supported.)
- -- Change sbatch response from "sbatch: Submitted batch job #" written to 
-    stderr to "Submitted batch job #" written to stdout.
- -- Made shutdown and cleanup a little safer for the mvapich and mpich1_p4
-    plugins.
- -- QOS support added with limits, priority and preemption
-    (no documentation yet).
- -- If a slurmd does not have a node listed in it's slurm.conf (slurm.conf's 
-    should be kept the same on all nodes) an error message is printed in the 
-    slurmctld log along with the message already being printed in the slurmd 
-    log for easier debugging.
-
-* Changes in SLURM 2.1.0-pre2
-=============================
- -- Added support for smap to query off node name for display.
- -- Slurmdbd modified to set user ID and group ID to SlurmUser if started as 
-    user root.
- -- Configuration parameter ControlMachine changed to  accept multiple comma-
-    separated hostnames for support of some high-availability architectures.
- -- ALTERED API CALL slurm_get_job_steps 0 has been changed to NO_VAL for both
-    job and step id to recieve all jobs/steps.  Please make adjustments to
-    your code.
- -- salloc's --wait=<secs> option deprecated by --immediate=<secs> option to 
-    match the srun command.
- -- Add new slurmctld list for node features with node bitmaps for simplified
-    scheduling logic.
- -- Multiple features can be specified when creating a reservation. Use "&" 
-    (AND) or "|" (OR) separators between the feature names.
- -- Changed internal node name caching so that front-end mode would work with
-    multiple lines of node name definitions. 
- -- Add node state flag for power-up/configuring. Represented by "#" suffix
-    on the node state name (e.g. "ALLOCATED#") for command output.
- -- Add CONFIGURING/CF job state flag for node power-up/configuring.
- -- Modify job step cancel logic for scancel and srun (on reciept of SIGTERM 
-    or three SIGINT) to immediately send SIGKILL to spawned tasks.  Previous 
-    logic would send SIGCONT, SIGTERM, wait KillWait seconds, SIGKILL.
- -- Created a spank_get_item() option (S_JOB_ALLOC_CORES) that conveys the cpus
-    that the select/cons_res plugin has allocated to a job.
- -- Improve sview performance (outrageously) on very large machines.
- -- Add support for licenses in resource reservation.
- -- BLUEGENE - Jobs waiting for a block to boot will now be in Configuring
-    state. 
- -- bit_fmt now does not return brackets surrounding any set of data.
-
-* Changes in SLURM 2.1.0-pre1
-=============================
- -- Slurmd notifies slurmctld of node boot time to better clean up after node
-    reboots.
- -- Slurmd sends node registration information repeatedly until successful
-    transmit.
- -- Change job_state in job structure to dedicate 8-bits to state flags. 
-    Added macros to get state information (IS_JOB_RUNNING(job_ptr), etc.)
- -- Added macros to get node state information (IS_NODE_DOWN(node_ptr), etc).
- -- Added support for Solaris. Patch from David Hoppner.
- -- Rename "slurm-aix-federation-<version>.rpm" to just 
-    "slurm-aix-<version>.rpm" (federation switch plugin may not be present).
- -- Eliminated the redundant squeue output format and sort options of 
-    "%o" and "%b". Use "%D" and "%S" formats respectively. Also eliminated 
-    "%X" and "%Y" and "%Z" formats. Use "%z" instead.
- -- Added mechanism for SPANK plugins to set environment variables for
-    Prolog, Epilog, PrologSLurmctld and EpilogSlurmctld programs using
-    the functions spank_get_job_env, spank_set_job_env, and 
-    spank_unset_job_env. See "man spank" for more information.
- -- Completed the work to begun in 2.0.0 to standardize on using '-Q' as the
-    --quiet flag for all the commands.
- -- BLUEGENE - sinfo and sview now display correct cpu counts for partitions
- -- Cleaned up the cons_res plugin.  It now uses a ptr to a part_record
-    instead of having to do strcmp's to find the correct one.
- -- Pushed most all the plugin specific info in src/common/node_select.c 
-    into the respected plugin.
- -- BLUEGENE - closed some corner cases where a block could had been removed 
-    while a job was waiting for it to become ready because an underlying 
-    part of the block was put into an error state.
- -- Modify sbcast logic to prevent a user from moving files to nodes they
-    have not been allocated (this would be possible in previous versions
-    only by hacking the sbcast code).
- -- Add contribs/sjstat script (Perl tool to report job state information).
-    Put into new RPM: sjstat.
- -- Add sched/wiki2 (Moab) JOBMODIFY command support for VARIABLELIST option
-    to set supplemental environment variables for pending batch jobs.
- -- BLUEGENE - add support for scontrol show blocks.
- -- Added support for job step time limits.
-
-* Changes in SLURM 2.0.10
-=========================
-
-* Changes in SLURM 2.0.9
-========================
- -- When running the mysql plugin natively MUNGE errors are now eliminated 
-    when sending updates to slurmctlds.
- -- Check to make sure we have a default account before looking to 
-    fill in default association. 
- -- Fix to make it so sched/wiki2 can modify a job's partition or hostlist of 
-    non-pending jobs.
- -- Applied slurmctld prolog bug fix from Dennis Leepow to backfill.c
- -- fixed quite a few typos (needed for debian packages)
- -- make it so slurmctld will core dump without --enable-debug set
- -- Fix issue when doing a rollup on reservations before a cluster has been 
-    added. 
- -- MySQL plugin - When doing archiving end time is now decreased by 1 
-    which should be more correct.
- -- BLUEGENE - Fixed issue where --no-rotate didn't work correctly on job
-    submissions.
- -- BLUEGENE - made the buffer longer when submitting jobs to get the entire 
-    line.  Previously the line could be shortened prematurely.
- -- BLUEGENE - Fix to make sure we don't erroneously set a connection type
-    to SMALL.
- -- Type cast a negative uint64_t to int64_t to avoid confusion when doing 
-    arithmetic with it in accounting dealing with over commit time.
-
-* Changes in SLURM 2.0.8
-========================
- -- BLUEGENE - added dub2 of stderr to put error messages sent from underlying 
-    libraries of the bridge api to the bridgeapi.log
- -- Fixed issue with sacctmgr when modifing a user and specifying 'where'
-    after giving the user name also.
- -- -L, --allclusters now works with sacct
- -- Modified job table to use 32bit u/gids for those with ids greater 
-    than 16 bits.
- -- Made minor changes for slurm to compile cleanly under gcc 4.4.1
- -- Fixed issue with task/affinity when an allocation would run multiple sruns 
-    with the --exclusive flag in an allocation with more than 1 node.  
-    Previously when the first node was filled up no tasks would ever run 
-    on the other nodes.
- -- Fixed sview and sacct to display correct run time and suspend time when 
-    job has been suspended.
- -- Applied patch from Mark Grondona that fixes the validation of the
-    PluginDir to support the colon separated list of directories as documented.
- -- BLUEGENE - squeue -o %R now prints more readable info for small blocks
- -- sacct - fixed garbage being printed out on uninitialized variable.
- -- Fix for mysql plugin when used without slurmdbd to register the 
-    slurmctld properly.
- -- Fix for mysql plugin putting correct hostnames in for running steps.
-
-* Changes in SLURM 2.0.7
-========================
- -- Fix bug in select/cons_res when nodes are configured in more than one 
-    partition and those partitions have different priorities and sched/gang
-    is not configured. CPUs were previously over-allocated.
- -- Fix core of smap when specifying -i option with invalid argument.
- -- Fix issue when using srun --test-only to not put an entry of test 
-    job into accounting.
- -- For OpenMPI use of SLURM reserved ports. If any of the tasks fails to 
-    acquire a reserved port and has an exit code of 108 then srun will 
-    kill all remaining tasks and respawn the tasks. Previous code waited 
-    for tasks to exit.
- -- MySQL plugin - When doing archiving we now get a correct end time.  
-    Previously it would grab an extra day to archive.
- -- BLUEGENE - Handle initial state correctly, previously was setting initial 
-    state to IDLE if UNKNOWN which would make it not set a registration 
-    message to accounting, which could lead to nodes not being listed as up 
-    when they really were.
- -- Fixed buffer size issue with scontrol show hostlist.
- -- Fixed issue with copy in smap -Dc previously command wouldn't work.
- -- BLUEGENE - Update documentation about small blocks in the bluegene.conf 
-    file.
- -- In sched/wiki plugin (for Maui) fix possible message truncation on very 
-    large cluster.
- -- BLUEGENE - Fix for handling undocumented Deallocating to Configuring to 
-    Free block transition state.
- -- BLUEGENE - Fix for overlap mode loading blocks when midplane is in an
-    error state.
- -- Add range check for SuspendTime configuration parameter.
- -- Moved unzipped python-hostname tarball out and the tarball in.
- -- BLUEGENE - Patched memory leak when running state test.
- -- BLUEGENE - fixed slow down generated by slow call rm_get_BG 
-    and polling thread.
-
-* Changes in SLURM 2.0.6
-========================
- -- Fixed seg fault when "scontrol listpids" is invoked for a specific job step
-    on a node on which a stepd is not running.
- -- Fix bug in sched/backfill which could result in invalid memory reference 
-    when trying to schedule jobs submitted with --exclude option.
-
-* Changes in SLURM 2.0.5
-========================
- -- BLUEGENE - Added support for emulating systems with a X-dimension of 4.
- -- BLUEGENE - When a nodecard goes down on a non-Dynamic system SLURM will 
-    now only drain blocks under 1 midplane, if no such block exists then SLURM 
-    will drain the entire midplane and not mark any block in error state.  
-    Previously SLURM would drain every overlapping block of the nodecard 
-    making it possible for a large block to make other blocks not work since 
-    they overlap some other part of the block that really isn't bad.
- -- BLUEGENE - Handle L3 errors on boot better.
- -- Don't revoke a pending batch launch request from the slurmctld if the
-    job is immediately suspended (a normal event with gang scheduling).
- -- BLUEGENE - Fixed issue with restart of slurmctld would allow error block 
-    nodes to be considered for building new blocks when testing if a job would 
-    run.  This is a visual bug only, jobs would never run on new block, but 
-    the block would appear in slurm tools.
- -- Better responsiveness when starting new allocations when running with the 
-    slurmdbd.
- -- Fixed race condition when reconfiguring the slurmctld and using the 
-    consumable resources plugin which would cause the controller to core.
- -- Fixed race condition that sometimes caused jobs to stay in completing
-    state longer than necessary after being terminated.
- -- Fixed issue where if a parent account has a qos added and then a child
-    account has the qos removed the users still get the qos.
- -- BLUEGENE - New blocks in dynamic mode will only be made in the system
-    when the block is actually needed for a job, not when testing.
- -- BLUEGENE - Don't remove larger block used for small block until job starts.
- -- Add new squeue output format and sort option of "%L" to print a job's time 
-    left (time limit minus time used).
- -- BLUEGENE - Fixed draining state count for sinfo/sview.
- -- Fix for sview to not core when viewing nodes allocated to a partition 
-    and the all jobs finish.
- -- Fix cons_res to not core dump when finishing a job running on a 
-    defunct partition.
- -- Don't require a node to have --ntasks-per-node CPUs for use when the 
-    --overcommit option is also used.
- -- Increase the maximum number of tasks which can be launched by a job step
-    per node from 64 to 128. 
- -- sview - make right click on popup window title show sorted list.
- -- scontrol now displays correct units for job min memory and min tmp disk.
- -- better support for salloc/sbatch arbitrary layout for setting correct 
-    SLURM_TASKS_PER_NODE
- -- Env var SLURM_CPUS_ON_NODE is now set correctly depending on the 
-    FastSchedule configuration parameter.
- -- Correction to topology/3d_torus plugin calculation when coordinate value 
-    exceeds "9" (i.e. a hex value).
- -- In sched/wiki2 - Strip single and double quotes out of a node's reason 
-    string to avoid confusing Moab's parser.
- -- Modified scancel to cancel any pending jobs before cancelling any other
- -- Updated sview config info
- -- Fix a couple of bugs with respect to scheduling with overlapping 
-    reservations (one with a flag of "Maintenance").
- -- Fix bug when updating a pending job's nice value after explicitly setting
-    it's priority.
- -- We no longer add blank QOS'
- -- Fix task affinity for systems running fastschedule!=0 and they have less 
-    resources configured than in existence.
- -- Slurm.pm loads without warning now on AIX systems
- -- modified pmi code to do strncpy's on the correct len
- -- Fix for filling in a qos structure to return SLURM_SUCCESS on success.
- -- BLUEGENE - Added SLURM_BG_NUM_NODES with cnode count of allocation, 
-    SLURM_JOB_NUM_NODES represents midplane counts until 2.1.
- -- BLUEGENE - Added fix for if a block is in error state and the midplane 
-    containing the block is also set to drain/down.  This previously 
-    prevented dynamic creation of new blocks when this state was present.
- -- Fixed bug where a users association limits were not enforced, only 
-    parent limits were being enforced.
- -- For OpenMPI use of SLURM reserved ports, reserve a count of ports equal to 
-    the maximum task count on any node plus one (the plus one is a correction).
- -- Do not reset SLURM_TASKS_PER_NODE when srun --preserve-env option is used
-    (needed by OpenMPI).
- -- Fix possible assert failure in task/affinity if a node is configured with
-    more resources than physically exist.
- -- Sview can now resize columns.
- -- Avoid clearing a drained node's reason field when state is changed from
-    down (i.e. returned to service). Note the drain state flag stays set.
-
-* Changes in SLURM 2.0.4
-========================
- -- Permit node suspend/resume logic to be enabled through "scontrol reconfig"
-    given appropriate changes to slurm configuration file.
- -- Check for return codes on functions with warn_unused_result set.
- -- Fix memory leak in getting step information (as used by squeue -s).
- -- Better logging for when job's request bad output file locations.
- -- Fix issue where if user specified non-existant file to write to slurmstepd
-    will regain privileges before sending batch script ended to the controller.
- -- Fix bug when using the priority_multifactor plugin with no associations 
-    yet.
- -- BLUEGENE - we no longer check for the images to sync state.  This was 
-    needed long ago when rebooting blocks wasn't a possibility and should 
-    had been removed when that functionality was available.
- -- Added message about no connection with the database for sacctmgr.
- -- On BlueGene, let srun or salloc exit on SIGINT if slurmctld dies while
-    booting its block.
- -- In select/cons_res fix bug that could result in invalid memory pointer
-    if node configurations in slurm.conf contains 8 or more distinct 
-    socket/core/thread counts.
- -- Modify select/cons_res to recognize updated memory size upon node startup
-    if FastSchedule=0.
- -- Fixed bug if not enforcing associations, but running with them and the 
-    priority/multifactor, the slurmctld will not core dump on processing usage.
- -- QOS will not be reset to the default when added back a previously deleted
-    association.
- -- Do not set a job's virtual memory limit based upon the job's specified
-    memory limit (which should be a real memory limit, not virtual).
- -- BLUEGENE - fix for sinfo/sview for displaying proper node count for nodes 
-    in draining state.
- -- Fix for sview when viewing a certain part of a group (like 1 job) so it 
-    doesn't core when the part is gone.
- -- BLUEGENE - Changed order of SYNC's to be on the front of the list to
-    avoid having a job terminated with a TERM before the SYNC of the 
-    job happens.
- -- Validate configured PluginDir value is a valid directory before trying to
-    use it.
- -- Fix to resolve agent_queue_request symbol from some checkpoint plugins.
- -- Fix possible execve error for sbatch script read from stdin.
- -- Modify handling of user ID/name and group ID/name in the slurm.conf file
-    to properly handle user names that contain all digits. Return error code 
-    from uid_from_string() and gid_from_string() functions rather than a uid of
-    -1, which might be a valid uid or gid on some systems.
- -- Fix in re-calcuation of job priorities due to DOWN or DRAINED nodes.
-
-* Changes in SLURM 2.0.3
-========================
- -- Add reservation creation/update flag of Ignore_Jobs to enable the creation
-    of a reservation that overlaps jobs expected to still be running when
-    the reservation starts. This would be especially useful to reserve all 
-    nodes for system maintenence without adjusting time limits of running
-    jobs before creating the reservation. Without this flag, nodes allocated
-    jobs expected to running when the reservation begins can not be placed 
-    into a reservation.
- -- In task/affinity plugin, add layer of abstraction to logic translating
-    block masks to physical machine masks. Patch from Matthieu Hautreux, CEA.
- -- Fix for setting the node_bitmap in a job to NULL if the job does not 
-    start correctly when expected to start.
- -- Fixed bug in srun --pty logic. Output from the task was split up 
-    arbitrarily into stdout and stderr streams, and sometimes was printed 
-    out of order.
- -- If job requests minimum and maximum node count range with select/cons_res,
-    try to satisfy the higher value (formerly only allocated the minimum).
- -- Fix for checking for a non-existant job when querying steps.
- -- For job steps with the --exclusive option, base initial wait time in 
-    partly upon the process ID for better performance with many job steps 
-    started at the same time. Maintain exponential back-off as needed.
- -- Fix for correct step ordering in sview.
- -- Support optional argument to srun and salloc --immediate option. Specify
-    timeout value in seconds for job or step to be allocated resources.
-
-* Changes in SLURM 2.0.2
-========================
- -- Fix, don't remove job details when a job is cancelled while pending.
- -- Do correct type for mktime so garbage isn't returned on 64bit systems 
-    for accounting archival.
- -- Better checking in sacctmgr to avoid infinite loops.
- -- Fix minor memory leak in fake_slurm_step_layout_create()
- -- Fix node weight (scheduling priority) calculation for powered down
-    nodes. Patch from Hongjia Cao, NUDT.
- -- Fix node suspend/resume rate calculations. Patch from Hongjia Cao, NUDT.
- -- Change calculations using ResumeRate and SuspendRate to provide higher
-    resolution.
- -- Log the IP address for incoming messages having an invalid protocol 
-    version number.
- -- Fix for sacct to show jobs that start the same second as the sacct
-    command is issued.
- -- BLUEGENE - Fix for -n option to work on correct cpu counts for each 
-    midplane instead of treating -n as a c-node count.
- -- salloc now sets SLURM_NTASKS_PER_NODE if --ntasks-per-node option is set.
- -- Fix select/linear to properly set a job's count of allocated processors
-    (all processors on the allocated nodes).
- -- Fix select/cons_res to allocate proper CPU count when --ntasks-per-node
-    option is used without a task count in the job request.
- -- Insure that no node is allocated to a job for which the CPU count is less
-    than --ntasks-per-node * --cpus-per-task.
- -- Correct AllocProcs reported by "scontrol show node" when ThreadsPerCore
-    is greater than 1 and select/cons_res is used.
- -- Fix scontrol show config for accounting information when values are 
-    not set in the slurm.conf.
- -- Added a set of SBATCH_CPU_BIND* and SBATCH_MEM_BIND* env variables to keep
-    jobsteps launched from within a batch script from inheriting the CPU and
-    memory affinity that was applied to the batch script. Patch from Matthieu
-    Hautreux, CEA.
- -- Ignore the extra processors on a node above configured size if either 
-    sched/gang or select/cons_res is configured.
- -- Fix bug in tracking memory allocated on a node for select/cons_res plugin.
- -- Fixed a race condition when writing labelled output with a file per task
-    or per node, which potentially closed a file before all data was written.
- -- BLUEGENE - Fix, for if a job comes in spanning both less than and 
-    over 1 midplane in size we check the connection type appropriately.
- -- Make sched/backfill properly schedule jobs with constraints having node 
-    counts. NOTE: Backfill of jobs with constraings having exclusive OR 
-    operators are not fully supported.  
- -- If srun is cancelled by SIGINT, set the job state to cancelled, not 
-    failed.
- -- BLUEGENE - Fix, for if you are setting an subbp into an error mode 
-    where the subbp stated isn't the first ionode in a nodecard.
- -- Fix for backfill to not core when checking shared nodes.
- -- Fix for scontrol to not core when hitting just return in interactive mode.
- -- Improve sched/backfill logic with respect to shared nodes (multiple jobs
-    per node).
- -- In sched/wiki (Maui interface) add job info fields QOS, RCLASS, DMEM and
-    TASKSPERNODE. Patch from Bjorn-Helge Mevik, University of Oslo.
-
-* Changes in SLURM 2.0.1
-========================
- -- Fix, truncate time of start and end for job steps in sacct.
- -- Initialize all messages to slurmdbd. Previously uninitialized string could
-    cause slurmctld to fail with invalid memory reference.
- -- BLUEGENE - Fix, for when trying to finish a torus on a block already 
-    visited.  Even though this may be possible electrically this isn't valid
-    in the under lying infrastructure.
- -- Fix, in mysql plugins change mediumints to int to support full 32bit 
-    numbers.
- -- Add sinfo node state filtering support for NO_RESPOND, POWER_SAVE, FAIL, 
-    MAINT, DRAINED and DRAINING states. The state filter of DRAIN still maps
-    to any node in either DRAINED or DRAINING state.
- -- Fix reservation logic when job requests specific nodes that are already
-    in some reservation the job can not use.
- -- Fix recomputation of a job's end time when allocated nodes which are
-    being powered up. The end time would be set in the past if the job's
-    time limit was INFINITE, resulting in it being prematurely terminated.
- -- Permit regular user to change the time limit of his pending jobs up to
-    the partition's limit.
- -- Fix "-Q" (quiet) option for salloc and sbatch which was previously 
-    ignored.
- -- BLUEGENE - fix for finding odd shaped blocks in dynamic mode.
- -- Fix logic supporting SuspendRate and ResumeRate configuration parameters.
-    Previous logic was changing state of one too many nodes per minute.
- -- Save new reservation state file on shutdown (even if no changes).
- -- Fix, when partitions are deleted the sched and select plugins are notified.
- -- Fix for slurmdbd to create wckeyid's when they don't exist
- -- Fix linking problem that prevented checkpoint/aix from working.
-
-* Changes in SLURM 2.0.0
-========================
- -- Fix for bluegene systems to be able to create 32 node blocks with only 
-    16 psets defined in dynamic layout mode.
- -- Improve srun_cr handling of child srun forking. Patch from Hongjia Cao, 
-    NUDT.
- -- Configuration parameter ResumeDelay replaced by SuspendTimeout and 
-    ResumeTimeout.
- -- BLUEGENE - sview/sinfo now displays correct cnode numbers for drained nodes
-    or blocks in error state.
- -- Fix some batch job launch bugs when powering up suspended nodes.
- -- Added option '-T' for sacct to truncate time of start and end and set
-    default of --starttime to Midnight of current day.
-
-* Changes in SLURM 2.0.0-rc2
-============================
- -- Change fanout logic to start on calling node instead of first node in 
-    message nodelist.
- -- Fix bug so that smap builds properly on Sun Constellation system.
- -- Filter white-space out from node feature specification.
- -- Fixed issue with duration not being honored when updating start time in 
-    reservations.
- -- Fix bug in sched/wiki and sched/wiki2 plugins for reporting job resource
-    allocation properly when node names are configured out of sort order 
-    with more than one numeric suffix (e.g. "tux10-1" is configured after 
-    "tux5-1").
- -- Avoid re-use of job_id (if specified at submit time) when the existing
-    job is in completing state (possible race condition with Moab).
- -- Added SLURM_DISTRIBUTION to env for salloc.
- -- Add support for "scontrol takeover" command for backup controller to 
-    assume control immediately. Patch from Matthieu Hautreux, CEA.
- -- If srun is unable to communicate with the slurmd tasks are now marked as 
-    failed with the controller.
- -- Fixed issues with requeued jobs not being accounted for correctly in 
-    the accounting.
- -- Clear node's POWER_SAVE flag if configuration changes to one lacking a
-    ResumeProgram.
- -- Extend a job's time limit as appropriate due to delays powering up nodes.
- -- If sbatch is used to launch a job step within an existing allocation (as
-    used by LSF) and the required node is powered down, print the message
-    "Job step creation temporarily disabled, retrying", sleep, and retry.
- -- Configuration parameter ResumeDelay added to control how much time must 
-    after a node has been suspended before resume it (e.g. powering it back 
-    up).
- -- Fix CPU binding for batch program. Patch from Matthieu Hautreux, CEA.
- -- Fix for front end systems non-responding nodes now show up correctly in
-    sinfo.
-
-* Changes in SLURM 2.0.0-rc1
-============================
- -- Fix bug in preservation of advanced reservations when slurmctld restarts.
- -- Updated perlapi to match correctly with slurm.h structures
- -- Do not install the srun command on BlueGene systems (mpirun must be used to
-    launch tasks).
- -- Corrections to scheduling logic for topology/tree in configurations where 
-    nodes are configured in multiple leaf switches.
- -- Patch from Matthieu Hautreux for backup mysql deamon support.
- -- Changed DbdBackup to DbdBackupHost for slurmdbd.conf file
- -- Add support for spank_strerror() function and improve error handling in
-    general for SPANK plugins.
- -- Added configuration parameter SrunIOTimeout to optionally ping srun's tasks
-    for better fault tolerance (e.g. killed and restarteed SLURM daemons on 
-    compute node).
- -- Add slurmctld and slurmd binding to appropriate communications address
-    based upon NodeAddr, ControllerAddr and BackupAddr configuration 
-    parameters. Based upon patch from Matthieu Hautreux, CEA.
-    NOTE: Fails when SlurmDBD is configured with some configurations.
-    NOTE: You must define BIND_SPECIFIC_ADDR to enable this option.
- -- Avoid using powered down nodes when scheduling work if possible. 
-    Fix possible invalid memory reference in power save logic.
-
-* Changes in SLURM 1.4.0-pre13
-==============================
- -- Added new partition option AllocNodes which controls the hosts from 
-    which jobs can be submitted to this partition. From Matthieu Hautreux, CEA.
- -- Better support the --contiguous option for job allocations.
- -- Add new scontrol option: show topology (reports contents of topology.conf 
-    file via RPC if topology/tree plugin is configured).
- -- Add advanced reservation display to smap command.
- -- Replaced remaining references to SLURM_JOBID with SLURM_JOB_ID - except
-    when needed for backwards compatibility.
- -- Fix logic to properly excise a DOWN node from the allocation of a job
-    with the --no-kill option.
- -- The MySQL and PgSQL plugins for accounting storage and job completion are
-    now only built if the underlying database libraries exists (previously
-    the plugins were built to produce a fatal error when used).
- -- BLUEGENE - scontrol show config will now display bluegene.conf information.
-
-* Changes in SLURM 1.4.0-pre12
-==============================
- -- Added support for hard time limit by associations with added configuration 
-    option PriorityUsageResetPeriod. This specifies the interval at which to 
-    clear the record of time used. This is currently only available with the 
-    priority/multifactor plugin.
- -- Added SLURM_SUBMIT_DIR to sbatch's output environment variables.
- -- Backup slurmdbd support implemented.
- -- Update to checkpoint/xlch logic from Hongjia Cao, NUDT.
- -- Added configuration parameter AccountingStorageBackupHost.
-
-* Changes in SLURM 1.4.0-pre11
-==============================
- -- Fix slurm.spec file for RPM build.
-
-* Changes in SLURM 1.4.0-pre10
-==============================
- -- Critical bug fix in task/affinity when the CoresPerSocket is greater
-    than the ThreadsPerCore (invalid memory reference).
- -- Add DebugFlag parameter of "Wiki" to log sched/wiki and wiki2 
-    communications in greater detail.
- -- Add "-d <slurmstepd_path>" as an option to the slurmd daemon to
-    specifying a non-stardard slurmstepd file, used  for testing purposes.
- -- Minor cleanup to crypto/munge plugin.
-    - Restrict uid allowed to decode job credentials in crypto/munge
-    - Get slurm user id early in crypto/munge
-    - Remove buggy error code handling in crypto/munge
- -- Added sprio command - works only with the priority/multifactor plugin
- -- Add real topology plugin infrastructure (it was initially added 
-    directly into slurmctld code). To specify topology information,
-    set TopologyType=topology/tree and add configuration information
-    to a new file called topology.conf. See "man topology.conf" or
-    topology.html web page for details.
- -- Set "/proc/self/oom_adj" for slurmd and slurmstepd daemons based upon
-    the values of SLURMD_OOM_ADJ and SLURMSTEPD_OOM_ADJ environment 
-    variables. This can be used to prevent daemons being killed when
-    a node's memory is exhausted. Based upon patch by Hongjia  Cao, NUDT.
- -- Fix several bugs in task/affinity: cpuset logic was broken and 
-    --cpus-per-task option not properly handled.
- -- Ensure slurmctld adopts SlurmUser GID as well as UID on startup.
-
-* Changes in SLURM 1.4.0-pre9
-=============================
- -- OpenMPI users only: Add srun logic to automatically recreate and 
-    re-launch a job step if the step fails with a reserved port conflict.
- -- Added TopologyPlugin configuration parameter.
- -- Added switch topology data structure to slurmctld (for use by select 
-    plugin) add load it based upon new slurm.conf parameters: SwitchName, 
-    Nodes, Switches and LinkSpeed.
- -- Modify select/linear and select/cons_res plugins to optimize resource
-    allocation with respect to network topology.
- -- Added  support for new configuration parameter EpilogSlurmctld (executed 
-    by slurmctld daemon).
- -- Added checkpoint/blcr plugin, SLURM now support job checkpoint/restart 
-    using BLCR. Patch from Hongjia Cao, NUDT, China.
- -- Made a variety of new environment variables available to PrologSlurmctld
-    and EpilogSlurmctld. See the "Prolog and Epilog Scripts" section of the 
-    slurm.conf man page for details.
- -- NOTE: Cold-start (without preserving state) required for upgrade from 
-    version 1.4.0-pre8.
-
-* Changes in SLURM 1.4.0-pre8
-=============================
- -- In order to create a new partition using the scontrol command, use
-    the "create" option rather than "update" (which will only operate
-    upon partitions that already exist).
- -- Added environment variable SLURM_RESTART_COUNT to batch jobs to
-    indicated the count of job restarts made.
- -- Added sacctmgr command "show config".
- -- Added the scancel option --nodelist to cancel any jobs running on a
-    given list of nodes.
- -- Add partition-specific DefaultTime (default time limit for jobs, 
-    if not specified use MaxTime for the partition. Patch from Par
-    Andersson, National Supercomputer Centre, Sweden.
- -- Add support for the scontrol command to be able change the Weight
-    associated with nodes. Patch from Krishnakumar Ravi[KK] (HP).
- -- Add DebugFlag configuration option of "CPU_Bind" for detailed CPU
-    binding information to be logged.
- -- Fix some significant bugs in task binding logic (possible infinite loops
-    and memory corruption).
- -- Add new node state flag of NODE_STATE_MAINT indicating the node is in
-    a reservation of type MAINT.
- -- Modified task/affinity plugin to automatically bind tasks to sockets,
-    cores, or threads as appropriated based upon resource allocation and
-    task count. User can override with srun's --cpu_bind option. 
- -- Fix bug in backfill logic for select/cons_res plugin, resulted in 
-    error "cons_res:_rm_job_from_res: node_state mis-count".
- -- Add logic go bind a batch job to the resources allocated to that job.
- -- Add configuration parameter MpiParams for (future) OpenMPI port 
-    management. Add resv_port_cnt and resv_ports fields to the job step 
-    data structures. Add environment variable SLURM_STEP_RESV_PORTS to
-    show what ports are reserved for a job step.
- -- Add support for SchedulerParameters=interval=<sec> to control the time
-    interval between executions of the backfill scheduler logic.
- -- Preserve record of last job ID in use even when doing a cold-start unless
-    there is no job state file or there is a change in its format (which only 
-    happens when there is a change in SLURM's major or minor version number: 
-    v1.3 -> v1.4).
- -- Added new configuration parameter KillOnBadExit to kill a job step as soon
-    as any task of a job step exits with a non-zero exit code. Patch based
-    on work from Eric Lin, Bull.
- -- Add spank plugin calls for use by salloc and sbatch command, see 
-    "man spank" for details.
- -- NOTE: Cold-start (without preserving state) required for upgrade from 
-    version 1.4.0-pre7.
-
-* Changes in SLURM 1.4.0-pre7
-=============================
- -- Bug fix for preemption with select/cons_res when there are no idle nodes.
- -- Bug fix for use of srun options --exclusive and --cpus-per-task together
-    for job step resource allocation (tracking of cpus in use was bad).
- -- Added the srun option --preserve-env to pass the current values of 
-    environment variables SLURM_NNODES and SLURM_NPROCS through to the 
-    executable, rather than computing them from commandline parameters.
- -- For select/cons_res or sched/gang only: Validate a job's resource 
-    allocation socket and core count on each allocated node. If the node's
-    configuration has been changed, then abort the job.
- -- For select/cons_res or sched/gang only: Disable updating a node's 
-    processor count if FastSchedule=0. Administrators must set a valid
-    processor count although the memory and disk space configuration can
-    be loaded from the compute node when it starts.
- -- Add configure option "--disable-iso8601" to disable SLURM use of ISO 8601
-    time format at the time of SLURM build. Default output for all commands
-    is now ISO 8601 (yyyy-mm-ddThh:mm:ss).
- -- Add support for scontrol to explicity power a node up or down using the
-    configured SuspendProg and ResumeProg programs.
- -- Fix book select/cons_res logic for tracking the number of allocated
-    CPUs on a node when a partition's Shared value is YES or FORCE.
- -- Added configure options "--enable-cray-xt" and "--with-apbasil=PATH" for
-    eventual support of Cray-XT systems.
-
-* Changes in SLURM 1.4.0-pre6
-=============================
- -- Fix job preemption when sched/gang and select/linear are configured with
-    non-sharing partitions.
- -- In select/cons_res insure that required nodes have available resources.
-
-* Changes in SLURM 1.4.0-pre5
-=============================
- -- Correction in setting of SLURM_CPU_BIND environment variable.
- -- Rebuild slurmctld's job select_jobinfo->node_bitmap on restart/reconfigure
-    of the daemon rather than restoring the bitmap since the nodes in a system
-    can change (be added or removed).
- -- Add configuration option "--with-cpusetdir=PATH" for non-standard 
-    locations.
- -- Get new multi-core data structures working on BlueGene systems.
- -- Modify PMI_Get_clique_ranks() to return an array of integers rather 
-    than a char * to satisfy PMI standard. Correct logic in 
-    PMI_Get_clique_size() for when srun --overcommit option is used.
- -- Fix bug in select/cons_res, allocated a job all of the processors on a 
-    node when the --exclusive option is specified as a job submit option.
- -- Add NUMA cpu_bind support to the task affinity plugin. Binds tasks to
-    a set of CPUs that belong NUMA locality domain with the appropriate
-    --cpu-bind option (ldoms, rank_ldom, map_ldom, and mask_ldom), see
-    "man srun" for more information.
-
-* Changes in SLURM 1.4.0-pre4
-=============================
- -- For task/affinity, force jobs to use a particular task binding by setting
-    the TaskPluginParam configuration parameter rather than slurmd's
-    SLURM_ENFORCED_CPU_BIND environment variable.
- -- Enable full preemption of jobs by partition with select/cons_res 
-    (cons_res_preempt.patch from Chris Holmes, HP).
- -- Add configuration parameter DebugFlags to provide detailed logging for
-    specific subsystems (steps and triggers so far).
- -- srun's --no-kill option is passed to slurmctld so that a job step is 
-    killed even if the node where srun executes goes down (unless the 
-    --no-kill option is used, previous termination logic would fail if 
-    srun was not responding).
- -- Transfer a job step's core bitmap from the slurmctld to the slurmd
-    within the job step credential.
- -- Add cpu_bind, cpu_bind_type, mem_bind and mem_bind_type to job allocation
-    request and job_details structure in slurmctld. Add support to --cpu_bind
-    and --mem_bind options from salloc and sbatch commands.
-
-* Changes in SLURM 1.4.0-pre3
-=============================
- -- Internal changes: CPUs per node changed from 32-bit to 16-bit size.
-    Node count fields changed from 16-bit to 32-bit size in some structures.
- -- Remove select plugin functions select_p_get_extra_jobinfo(),
-    select_p_step_begin() and select_p_step_fini().
- -- Remove the following slurmctld job structure fields: num_cpu_groups,
-    cpus_per_node, cpu_count_reps, alloc_lps_cnt, alloc_lps, and used_lps.
-    Use equivalent fields in new "select_job" structure, which is filled
-    in by the select plugins.
- -- Modify mem_per_task in job step request from 16-bit to 32-bit size.
-    Use new "select_job" structure for the job step's memory management.
- -- Add core_bitmap_job to slurmctld's job step structure to identify
-    which specific cores are allocated to the step.
- -- Add new configuration option OverTimeLimit to permit jobs to exceed 
-    their (soft) time limit by a configurable amount. Backfill scheduling
-    will be based upon the soft time limit.
- -- Remove select_g_get_job_cores(). That data is now within the slurmctld's
-    job structure.
-
-* Changes in SLURM 1.4.0-pre2
-=============================
- -- Remove srun's --ctrl-comm-ifhn-addr option (for PMI/MPICH2). It is no
-    longer needed.
- -- Modify power save mode so that nodes can be powered off when idle. See
-    https://computing.llnl.gov/linux/slurm/power_save.html or 
-    "man slurm.conf" (SuspendProgram and related parameters) for more 
-    information.
- -- Added configuration parameter PrologSlurmctld, which can be used to boot
-    nodes into a particular state for each job. See "man slurm.conf" for 
-    details.
- -- Add configuration parameter CompleteTime to control how long to wait for 
-    a job's completion before allocating already released resources to pending
-    jobs. This can be used to reduce fragmentation of resources. See
-    "man slurm.conf" for details.
- -- Make default CryptoType=crypto/munge. OpenSSL is now completely optional.
- -- Make default AuthType=auth/munge rather than auth/none.
- -- Change output format of "sinfo -R" from "%35R %N" to "%50R %N".
-
-* Changes in SLURM 1.4.0-pre1
-=============================
- -- Save/restore a job's task_distribution option on slurmctld retart.
-    NOTE: SLURM must be cold-started on converstion from version 1.3.x.
- -- Remove task_mem from job step credential (only job_mem is used now).
- -- Remove --task-mem and --job-mem options from salloc, sbatch and srun
-    (use --mem-per-cpu or --mem instead).
- -- Remove DefMemPerTask from slurm.conf (use DefMemPerCPU or DefMemPerNode
-    instead).
- -- Modify slurm_step_launch API call. Move launch host from function argument
-    to element in the data structure slurm_step_launch_params_t, which is
-    used as a function argument.
- -- Add state_reason_string to job state with optional details about why
-    a job is pending.
- -- Make "scontrol show node" output match scontrol input for some fields
-    ("Cores" changed to "CoresPerSocket", etc.).
- -- Add support for a new node state "FUTURE" in slurm.conf. These node records
-    are created in SLURM tables for future use without a reboot of the SLURM
-    daemons, but are not reported by any SLURM commands or APIs.
-
-* Changes in SLURM 1.3.17
-=========================
- -- Fix bug in configure script that can clear user specified LIBS.
-
-* Changes in SLURM 1.3.16
-=========================
- -- Fix memory leak in forward logic of tree message passing.
- -- Fix job exit code recorded for srun job allocation.
- -- Bluegene - Bug fix for too many parameters being passed to a debug statement
- -- Bluegene - Bug fix for systems running more than 8 in the X dim running
-    Dynamic mode.
-
-* Changes in SLURM 1.3.15
-=========================
- -- Fix bug in squeue command with sort on job name ("-S j" option) for jobs
-    that lack a name. Previously generated an invalid memory reference.
- -- Permit the TaskProlog to write to the job's standard output by writing
-    a line containing the prefix "print " to it's standard output.
- -- Fix for making the slurmdbd agent thread start up correctly when 
-    stopped and then started again.
- -- Add squeue option to report jobs by account (-U or --account). Patch from
-    Par Andersson, National Supercomputer Centre, Sweden.
- -- Add -DNUMA_VERSION1_COMPATIBILITY to Makefile CFLAGS for proper behavior
-    when building with NUMA version 2 APIs.
- -- BLUEGENE - slurm works on a BGP system.
- -- BLUEGENE - slurm handles HTC blocks
- -- BLUEGENE - Added option DenyPassthrough in the bluegene.conf.  Can be set
-    to any combination of X,Y,Z to not allow passthroughs when running in 
-    dynamic layout mode.
- -- Fix bug in logic to remove a job's dependency, could result in abort.
- -- Add new error message to sched/wiki and sched/wiki2 (Maui and Moab) for
-    STARTJOB request: "TASKLIST includes non-responsive nodes".
- -- Fix bug in select/linear when used with sched/gang that can result in a 
-    job's required or excluded node specification being ignored.
- -- Add logic to handle message connect timeouts (timed-out.patch from 
-    Chuck Clouston, Bull).
- -- BLUEGENE - CFLAGS=-m64 is no longer required in configure
- -- Update python-hostlist code from Kent Engström (NSC) to v1.5
-    - Add hostgrep utility to search for lines matching a hostlist.
-    - Make each "-" on the command line count as one hostlist argument.
-      If multiple hostslists are given on stdin they are combined to a
-      union hostlist before being used in the way requested by the
-      options.
- -- When using -j option in sacct no user restriction will applied unless
-    specified with the -u option.
- -- For sched/wiki and sched/wiki2, change logging of wiki message traffic
-    from debug() to debug2(). Only seen if SlurmctldDebug is configured to
-    6 or higher.
- -- Significant speed up for association based reports in sreport
- -- BLUEGENE - fix for checking if job can run with downed nodes.  Previously 
-    sbatch etc would tell you node configuration not available now jobs are 
-    accepted but held until nodes are back up.
- -- Fix in accounting so if any nodes are removed from the system when they 
-    were previously down will be recorded correctly.
- -- For sched/wiki2 (Moab), add flag to note if job is restartable and
-    prevent deadlock of job requeue fails.
- -- Modify squeue to return non-zero exit code on failure. Patch from
-    Par Andersson (NSC).
- -- Correct logic in select/cons_res to allocate a job the maximum node
-    count from a range rather than minimum (e.g. "sbatch -N1-4 my.sh").
- -- In accounting_storage/filetxt and accounting_storage/pgsql fix 
-    possible invalid memory reference when a job lacks a name.
- -- Give srun command an exit code of 1 if the prolog fails.
- -- BLUEGENE - allows for checking nodecard states in the system instead 
-    of midplane state so as to not down an entire midplane if you don't 
-    have to.
- -- BLUEGENE - fix creation of MESH blocks 
- -- BLUEGENE - on job cancellation we call jm_cancel_job and then wait until
-    the system cleans up the job.  Before we would send a SIGKILL right 
-    at the beginning. 
- -- BLUEGENE - if a user specifies a node count that can not be met the job 
-    will be refused instead of before the plugin would search for the next 
-    larger size that could be created.  This prevents users asking for 
-    things that can't be created, and then getting something back they might 
-    not be expecting.
-
-* Changes in SLURM 1.3.14
-=========================
- -- SECURITY BUG: Fix in sbcast logic that permits users to write files based
-    upon supplimental groups of the slurmd daemon. Similar logic for event
-    triggers if slurmctld is run as user root (not typical).
-
-* Changes in SLURM 1.3.13
-=========================
- -- Added ability for slurmdbd to archive and purge step and/or job records.
- -- Added DefaultQOS as an option to slurmdbd.conf for when clusters are 
-    added the default will be set to this if none is given in the sacctmgr line.
- -- Added configure option --enable-sun-const for Sun Constellation system with
-    3D torus interconnect. Supports proper smap and sview displays for 3-D
-    topology. Node names are automatically put into Hilbert curve order given
-    a one-line nodelist definition in slurm.conf (e.g. NodeNames=sun[000x533]). 
- -- Fixed bug in parsing time for sacct and sreport to pick the correct year if
-    none is specified.
- -- Provide better scheduling with overlapping partitions (when a job can not
-    be scheduled due to insufficient resources, reserve specific the nodes
-    associated with that partition rather than blocking all partitions with
-    any overlapping nodes).
- -- Correct logic to log in a job's stderr that it was "CANCELLED DUE TO 
-    NODE FAILURE" rather than just "CANCELLED".
- -- Fix to crypto/openssl plugin that could result in job launch requests
-    being spoofed through the use of an improperly formed credential. This bug 
-    could permit a user to launch tasks on compute nodes not allocated for 
-    their use, but will NOT permit them to run tasks as another user. For more 
-    information see http://www.ocert.org/advisories/ocert-2008-016.html
-
-* Changes in SLURM 1.3.12
-=========================
- -- Added support for Workload Characteristic Key (WCKey) in accounting.  The
-    WCkey is something that can be used in accounting to group associations
-    together across clusters or within clusters that are not related.  Use 
-    the --wckey option in srun, sbatch or salloc or set the SLURM_WCKEY env
-    var to have this set. Use sreport with the wckey option to view reports.
-    THIS CHANGES THE RPC LEVEL IN THE SLURMDBD.  YOU MUST UPGRADE YOUR SLURMDBD
-    BEFORE YOU UPGRADE THE REST OF YOUR CLUSTERS.  THE NEW SLURMDBD WILL TALK 
-    TO OLDER VERSIONS OF SLURM FINE.
- -- Added configuration parameter BatchStartTimeout to control how long to 
-    allow for a batch job prolog and environment loading (for Moab) to run.
-    Previously if job startup took too long, a batch job could be cancelled
-    before fully starting with a SlurmctldLog message of "Master node lost 
-    JobId=#, killing it".  See "man slurm.conf" for details.
- -- For a job step, add support for srun's --nodelist and --exclusive options
-    to be used together.
- -- On slurmstepd failure, set node state to DRAIN rather than DOWN.
- -- Fix bug in select/cons_res that would incorrectly satify a tasks's
-    --cpus-per-task specification by allocating the task CPUs on more than
-    one node.
- -- Add support for hostlist expressions containing up to two numeric 
-    expressions (e.g. "rack[0-15]_blade[0-41]").
- -- Fix bug in slurmd message forwarding which left file open in the case of
-    some communication failures.
- -- Correction to sinfo node state information on BlueGene systems. DRAIN
-    state was replaced with ALLOC or IDLE under some situations.
- -- For sched/wiki2 (Moab), strip quotes embedded within job names from the
-    name reported.
- -- Fix bug in jobcomp/script that could cause the slurmctld daemon to exit
-    upon reconfiguration ("scontrol reconfig" or SIGHUP).
- -- Fix to sinfo, don't print a node's memory size or tmp_disk space with 
-    suffix of "K" or "M" (thousands or millions of megabytes).
- -- Improve efficiency of scheduling jobs into partitions which do not overlap.
- -- Fixed sreport user top report to only display the limit specified 
-    instead of all users.
-
-* Changes in SLURM 1.3.11
-=========================
- -- Bluegene/P support added (minimally tested, but builds correctly).
- -- Fix infinite loop when using accounting_storage/mysql plugin either from
-    the slurmctld or slurmdbd daemon.
- -- Added more thread safety for assoc_mgr in the controller.
- -- For sched/wiki2 (Moab), permit clearing of a job's dependencies with the 
-    JOB_MODIFY option "DEPEND=0".
- -- Do not set a running or pending job's EndTime when changing it's time 
-    limit.
- -- Fix bug in use of "include" parameter within the plugstack.conf file.
- -- Fix bug in the parsing of negative numeric values in configuration files.
- -- Propagate --cpus-per-task parameter from salloc or sbatch input line to
-    the SLURM_CPUS_PER_TASK environment variable in the spawned shell for 
-    srun to use.
- -- Add support for srun --cpus_per_task=0. This can be used to spawn tasks
-    without allocating resouces for the job step from the job's allocation
-    when running multiple job steps with the --exclusive option.
- -- Remove registration messages from saved messages when bringing down cluster.
-    Without causes deadlock if wrong cluster name is given.
- -- Correction to build for srun debugger (export symbols).
- -- sacct will now display more properly allocations made with salloc with only 
-    one step.
- -- Altered sacctmgr, sreport to look at complete option before applying. 
-    Before we would only look at the first determined significant characters.
- -- BLUGENE - in overlap mode marking a block to error state will now end
-    jobs on overlapping blocks and free them.
- -- Give a batch job 20 minutes to start before considering it missing and 
-    killing it (long delay could result from slurmd being paged out). Changed
-    the log message from "Master node lost JobId=%u, killing it" to "Batch 
-    JobId=%u missing from master node, killing it".
- -- Avoid "Invalid node id" error when a job step within an existing job 
-    allocation specifies a node count which is less than the node count
-    allocated in order to satisfy the task count specification (e.g. 
-    "srun -n16 -N1 hostname" on allocation of 16 one-CPU nodes).
- -- For sched/wiki2 (Moab) disable changing a job's name after it has begun
-    execution.
-
-* Changes in SLURM 1.3.10
-=========================
- -- Fix several bugs in the hostlist functions:
-    - Fix hostset_insert_range() to do proper accounting of hl->nhosts (count).
-    - Avoid assertion failure when callinsg hostset_create(NULL).
-    - Fix return type of hostlist and hostset string functions from size_t to
-      ssize_t.
-    - Add check for NULL return from hostlist_create().
-    - Rewrite of hostrange_hn_within(), avoids reporting "tst0" in the hostlist
-      "tst".
- -- Modify squeue to accept "--nodes=<hostlist>" rather than 
-    "--node=<node_name>" and report all jobs with any allocated nodes from set
-    of nodes specified. From Par Anderson, National Supercomputer Centre, 
-    Sweden.
- -- Fix bug preventing use of TotalView debugger with TaskProlog configured or 
-    or srun's --task-prolog option.
- -- Improve reliability of batch job requeue logic in the event that the slurmd
-    daemon is temporarily non-responsive (for longer than the configured
-    MessageTimeout value but less than the SlurmdTimeout value).
- -- In sched/wiki2 (Moab) report a job's MAXNODES (maximum number of permitted
-    nodes).
- -- Fixed SLURM_TASKS_PER_NODE to live up more to it's name on an allocation. 
-    Will now contain the number of tasks per node instead of the number of CPUs
-    per node.  This is only for a resource allocation. Job steps already have 
-    the environment variable set correctly.
- -- Configuration parameter PropagateResourceLimits has new option of "NONE".
- -- User's --propagate options take precidence over PropagateResourceLimits
-    configuration parameter in both srun and sbatch commands.
- -- When Moab is in use (salloc or sbatch is executed with the --get-user-env
-    option to be more specific), load the user's default resource limits rather
-    than propagating the Moab daemon's limits.
- -- Fix bug in slurmctld restart logic for recovery of batch jobs that are
-    initiated as a job step rather than an independent job (used for LSF).
- -- Fix bug that can cause slurmctld restart to fail, bug introduced in SLURM
-    version 1.3.9. From Eygene Ryabinkin, Kurchatov Institute, Russia.
- -- Permit slurmd configuration parameters to be set to new values from 
-    previously unset values.
-
-* Changes in SLURM 1.3.9
-========================
- -- Fix jobs being cancelled by ctrl-C to have correct cancelled state in 
-    accounting.
- -- Slurmdbd will only cache user data, made for faster start up
- -- Improved support for job steps in FRONT_END systems
- -- Added support to dump and load association information in the controller
-    on start up if slurmdbd is unresponsive
- -- BLUEGENE - Added support for sched/backfill plugin
- -- sched/backfill modified to initiate multiple jobs per cycle.
- -- Increase buffer size in srun to hold task list expressions. Critical 
-    for jobs with 16k tasks or more.
- -- Added support for eligible jobs and downed nodes to be sent to accounting
-    from the controller the first time accounting is turned on.
- -- Correct srun logic to support --tasks-per-node option without task count.
- -- Logic in place to handle multiple versions of RPCs within the slurmdbd. 
-    THE SLURMDBD MUST BE UPGRADED TO THIS VERSION BEFORE UPGRADING THE 
-    SLURMCTLD OR THEY WILL NOT TALK.  
-    Older versions of the slurmctld will continue to talk to the new slurmdbd.
- -- Add support for new job dependency type: singleton. Only one job from a 
-    given user with a given name will execute with this dependency type.
-    From Matthieu Hautreux, CEA.
- -- Updated contribs/python/hostlist to version 1.3: See "CHANGES" file in
-    that directory for details. From Kent Engström, NSC.
- -- Add SLURM_JOB_NAME environment variable for jobs submitted using sbatch.
-    In order to prevent the job steps from all having the same name as the 
-    batch job that spawned them, the SLURM_JOB_NAME environment variable is
-    ignored when setting the name of a job step from within an existing 
-    resource allocation.
- -- For use with sched/wiki2 (Moab only), set salloc's default shell based 
-    upon the user who the job runs as rather than the user submitting the job 
-    (user root).
- -- Fix to sched/backfill when job specifies no time limit and the partition
-    time limit is INFINITE.
- -- Validate a job's constraints (node features) at job submit or modification 
-    time. Major re-write of resource allocation logic to support more complex
-    job feature requests.
- -- For sched/backfill, correct logic to support job constraint specification
-    (e.g. node features).
- -- Correct power save logic to avoid trying to wake DOWN node. From Matthieu
-    Hautreux, CEA.
- -- Cancel a job step when one of it's nodes goes DOWN based upon the job 
-    step's --no-kill option, by default the step is killed (previously the 
-    job step remained running even without the --no-kill option).
- -- Fix bug in logic to remove whitespace from plugstack.conf.
- -- Add new configuration parameter SallocDefaultCommand to control what 
-    shell that salloc launches by default.
- -- When enforcing PrivateData configuration parameter, failures return 
-    "Access/permission denied" rather than "Invalid user id".
- -- From sbatch and srun, if the --dependency option is specified then set 
-    the environment variable SLURM_JOB_DEPENDENCY to the same value.
- -- In plugin jobcomp/filetxt, use ISO8601 formats for time by default (e.g. 
-    YYYY-MM-DDTHH:MM:SS rather than MM/DD-HH:MM:SS). This restores the default
-    behavior from Slurm version 1.2. Change the value of USE_ISO8601 in
-    src/plusings/jobcomp/filetxt/jobcomp_filetxt.c to revert the behavior.
- -- Add support for configuration option of ReturnToService=2, which will 
-    return a DOWN to use if the node was previous set DOWN for any reason.
- -- Removed Gold accounting plugin.  This plugin was to be used for accounting 
-    but has seen not been maintained and is no longer needed.  If using this
-    please contact slurm-dev@llnl.gov.
- -- When not enforcing associations and running accounting if a user 
-    submits a job to an account that does not have an association on the 
-    cluster the account will be changed to the default account to help 
-    avoid trash in the accounting system.  If the users default account 
-    does not have an association on the cluster the requested account 
-    will be used.
- -- Add configuration parameter "--have-front-end" to define HAVE_FRONT_END 
-    in config.h and run slurmd only on a front end (suitable only for SLURM
-    development and testing).
-
-* Changes in SLURM 1.3.8
-========================
- -- Added PrivateData flags for Users, Usage, and Accounts to Accounting. 
-    If using slurmdbd, set in the slurmdbd.conf file. Otherwise set in the 
-    slurm.conf file.  See "man slurm.conf" or "man slurmdbd.conf" for details.
- -- Reduce frequency of resending job kill RPCs. Helpful in the event of 
-    network problems or down nodes.
- -- Fix memory leak caused under heavy load when running with select/cons_res
-    plus sched/backfill.
- -- For salloc, if no local command is specified, execute the user's default
-    shell.
- -- BLUEGENE - patch to make sure when starting a job blocks required to be
-    freed are checked to make sure no job is running on them.  If one is found
-    we will requeue the new job.  No job will be lost.
- -- BLUEGENE - Set MPI environment variables from salloc.
- -- BLUEGENE - Fix threading issue for overlap mode
- -- Reject batch scripts containing DOS linebreaks.
- -- BLUEGENE - Added wait for block boot to salloc
-
-* Changes in SLURM 1.3.7
-========================
- -- Add jobid/stepid to MESSAGE_TASK_EXIT to address race condition when 
-    a job step is cancelled, another is started immediately (before the 
-    first one completely terminates) and ports are reused. 
-    NOTE: This change requires that SLURM be updated on all nodes of the
-    cluster at the same time. There will be no impact upon currently running
-    jobs (they will ignore the jobid/stepid at the end of the message).
- -- Added Python module to process hostslists as used by SLURM. See
-    contribs/python/hostlist. Supplied by Kent Engstrom, National
-    Supercomputer Centre, Sweden.
- -- Report task termination due to signal (restored functionality present
-    in slurm v1.2).
- -- Remove sbatch test for script size being no larger than 64k bytes.
-    The current limit is 4GB.
- -- Disable FastSchedule=0 use with SchedulerType=sched/gang. Node 
-    configuration must be specified in slurm.conf for gang scheduling now.
- -- For sched/wiki and sched/wiki2 (Maui or Moab scheduler) disable the ability
-    of a non-root user to change a job's comment field (used by Maui/Moab for
-    storing scheduler state information).
- -- For sched/wiki (Maui) add pending job's future start time to the state
-    info reported to Maui.
- -- Improve reliability of job requeue logic on node failure.
- -- Add logic to ping non-responsive nodes even if SlurmdTimeout=0. This permits
-    the node to be returned to use when it starts responding rather than 
-    remaining in a non-usable state.
- -- Honor HealthCheckInterval values that are smaller than SlurmdTimeout.
- -- For non-responding nodes, log them all on a single line with a hostlist 
-    expression rather than one line per node. Frequency of log messages is 
-    dependent upon SlurmctldDebug value from 300 seconds at SlurmctldDebug<=3
-    to 1 second at SlurmctldDebug>=5.
- -- If a DOWN node is resumed, set its state to IDLE & NOT_RESPONDING and 
-    ping the node immediately to clear the NOT_RESPONDING flag.
- -- Log that a job's time limit is reached, but don't sent SIGXCPU.
- -- Fixed gid to be set in slurmstepd when run by root
- -- Changed getpwent to getpwent_r in the slurmctld and slurmd
- -- Increase timeout on most slurmdbd communications to 60 secs (time for
-    substantial database updates).
- -- Treat srun option of --begin= with a value of now without a numeric
-    component as a failure (e.g. "--begin=now+hours").
- -- Eliminate a memory leak associated with notifying srun of allocated
-    nodes having failed.
- -- Add scontrol shutdown option of "slurmctld" to just shutdown the 
-    slurmctld daemon and leave the slurmd daemons running.
- -- Do not require JobCredentialPrivateKey or JobCredentialPublicCertificate
-    in slurm.conf if using CryptoType=crypto/munge.
- -- Remove SPANK support from sbatch. 
-
-* Changes in SLURM 1.3.6
-========================
- -- Add new function to get information for a single job rather than always
-    getting information for all jobs. Improved performance of some commands. 
-    NOTE: This new RPC means that the slurmctld daemons should be updated
-    before or at the same time as the compute nodes in order to process it.
- -- In salloc, sbatch, and srun replace --task-mem options with --mem-per-cpu
-    (--task-mem will continue to be accepted for now, but is not documented).
-    Replace DefMemPerTask and MaxMemPerTask with DefMemPerCPU, DefMemPerNode,
-    MaxMemPerCPU and MaxMemPerNode in slurm.conf (old options still accepted
-    for now, but mapped to "PerCPU" parameters and not documented). Allocate
-    a job's memory memory at the same time that processors are allocated based
-    upon the --mem or --mem-per-cpu option rather than when job steps are
-    initiated.
- -- Altered QOS in accounting to be a list of admin defined states, an
-    account or user can have multiple QOS's now.  They need to be defined using
-    'sacctmgr add qos'.  They are no longer an enum.  If none are defined
-    Normal will be the QOS for everything.  Right now this is only for use 
-    with MOAB.  Does nothing outside of that.
- -- Added spank_get_item support for field S_STEP_CPUS_PER_TASK.
- -- Make corrections in spank_get_item for field S_JOB_NCPUS, previously 
-    reported task count rather than CPU count.
- -- Convert configuration parameter PrivateData from on/off flag to have
-    separate flags for job, partition, and node data. See "man slurm.conf"
-    for details.
- -- Fix bug, failed to load DisableRootJobs configuration parameter.
- -- Altered sacctmgr to always return a non-zero exit code on error and send 
-    error messages to stderr.
-
-* Changes in SLURM 1.3.5
-========================
- -- Fix processing of auth/munge authtentication key for messages originating 
-    in slurmdbd and sent to slurmctld. 
- -- If srun is allocating resources (not within sbatch or salloc) and MaxWait
-    is configured to a non-zero value then wait indefinitely for the resource
-    allocation rather than aborting the request after MaxWait time.
- -- For Moab only: add logic to reap defunct "su" processes that are spawned by
-    slurmd to load user's environment variables.
- -- Added more support for "dumping" account information to a flat file and 
-    read in again to protect data incase something bad happens to the database.
- -- Sacct will now report account names for job steps.
- -- For AIX: Remove MP_POERESTART_ENV environment variable, disabling 
-    poerestart command. User must explicitly set MP_POERESTART_ENV before 
-    executing poerestart.
- -- Put back notification that a job has been allocated resources when it was
-    pending.
-
-* Changes in SLURM 1.3.4
-========================
- -- Some updates to man page formatting from Gennaro Oliva, ICAR.
- -- Smarter loading of plugins (doesn't stat every file in the plugin dir)
- -- In sched/backfill avoid trying to schedule jobs on DOWN or DRAINED nodes.
- -- forward exit_code from step completion to slurmdbd
- -- Add retry logic to socket connect() call from client which can fail 
-    when the slurmctld is under heavy load.
- -- Fixed bug when adding associations to add correctly.
- -- Added support for associations for user root.
- -- For Moab, sbatch --get-user-env option processed by slurmd daemon
-    rather than the sbatch command itself to permit faster response
-    for Moab.
- -- IMPORTANT FIX: This only effects use of select/cons_res when allocating
-    resources by core or socket, not by CPU (default for SelectTypeParameter). 
-    We are not saving a pending job's task distribution, so after restarting
-    slurmctld, select/cons_res was over-allocating resources based upon an 
-    invalid task distribution value. Since we can't save the value without 
-    changing the state save file format, we'll just set it to the default 
-    value for now and save it in Slurm v1.4. This may result in a slight 
-    variation on how sockets and cores are allocated to jobs, but at least 
-    resources will not be over-allocated.
- -- Correct logic in accumulating resources by node weight when more than 
-    one job can run per node (select/cons_res or partition shared=yes|force).
- -- slurm.spec file updated to avoid creating empty RPMs. RPM now *must* be
-    built with correct specification of which packages to build or not build.
-    See the top of the slurm.spec file for information about how to control
-    package building specification.
- -- Set SLURM_JOB_CPUS_PER_NODE for jobs allocated using the srun command.
-    It was already set for salloc and sbatch commands.
- -- Fix to handle suspended jobs that were cancelled in accounting
- -- BLUEGENE - fix to only include bps given in a name from the bluegene.conf 
-    file.
- -- For select/cons_res: Fix record-keeping for core allocations when more 
-    than one partition uses a node or there is more than one socket per node.
- -- In output for "scontrol show job" change "StartTime" header to "EligibleTime"
-    for pending jobs to accurately describe what is reported.
- -- Add more slurmdbd.conf parameters: ArchiveScript, ArchiveAge, JobPurge, and
-    StepPurge (not fully implemented yet).
- -- Add slurm.conf parameter EnforcePartLimits to reject jobs which exceed a
-    partition's size and/or time limits rather than leaving them queued for a
-    later change in the partition's limits. NOTE: Not reported by
-    "scontrol show config" to avoid changing RPCs. It will be reported in 
-    SLURM version 1.4.
- -- Added idea of coordinator to accounting.  A coordinator can add associations
-    between exsisting users to the account or any sub-account they are 
-    coordinator to.  They can also add/remove other coordinators to those 
-    accounts.
- -- Add support for Hostname and NodeHostname in slurm.conf being fully 
-    qualified domain names (by Vijay Ramasubramanian, University of Maryland). 
-    For more information see "man slurm.conf".
-
-* Changes in SLURM 1.3.3
-========================
- -- Add mpi_openmpi plugin to the main SLURM RPM.
- -- Prevent invalid memory reference when using srun's --cpu_bind=cores option
-    (slurm-1.3.2-1.cea1.patch from Matthieu Hautreux, CEA).
- -- Task affinity plugin modified to support a particular cpu bind type: cores,
-    sockets, threads, or none. Accomplished by setting an environment variable
-    SLURM_ENFORCE_CPU_TYPE (slurm-1.3.2-1.cea2.patch from Matthieu Hautreux, 
-    CEA).
- -- For BlueGene only, log "Prolog failure" once per job not once per node.
- -- Reopen slurmctld log file after reconfigure or SIGHUP is received.
- -- In TaskPlugin=task/affinity, fix possible infinite loop for slurmd.
- -- Accounting rollup works for mysql plugin.  Automatic rollup when using 
-    slurmdbd.
- -- Copied job stat logic out of sacct into sstat in the future sacct -stat 
-    will be deprecated.
- -- Correct sbatch processing of --nice option with negative values.
- -- Add squeue formatted print option %Q to print a job's integer priority.
- -- In sched/backfill, fix bug that was changing a pending job's shared value
-    to zero (possibly changing a pending job's resource requirements from a 
-    processor on some node to the full node).
-
-* Changes in SLURM 1.3.2
-========================
- -- Get --ntasks-per-node option working for sbatch command.
- -- BLUEGENE: Added logic to give back a best block on overlapped mode 
-    in test_only mode
- -- BLUEGENE: Updated debug info and man pages for better help with the 
-    numpsets option and to fail correctly with bad image request for building
-    blocks.
- -- In sched/wiki and sched/wiki2 properly support Slurm license consumption
-    (job state reported as "Hold" when required licenses are not available).
- -- In sched/wiki2 JobWillRun command, don't return an error code if the job(s)
-    can not be started at that time. Just return an error message (from 
-    Doug Wightman, CRI).
- -- Fix bug if sched/wiki or sched/wiki2 are configured and no job comment is 
-    set.
- -- scontrol modified to report partition partition's "DisableRootJobs" value.
- -- Fix bug in setting host address for PMI communications (mpich2 only).
- -- Fix for memory size accounting on some architectures.
- -- In sbatch and salloc, change --dependency's one letter option from "-d"
-    to "-P" (continue to accept "-d", but change the documentation).
- -- Only check that task_epilog and task_prolog are runable by the job's
-    user, not as root.
- -- In sbatch, if specifying an alternate directory (--workdir/-D), then
-    input, output and error files are in that directory rather than the 
-    directory from which the command is executed
- -- NOTE: Fully operational with Moab version 5.2.3+. Change SUBMITCMD in
-    moab.cfg to be the location of sbatch rather than srun. Also set 
-    HostFormat=2 in SLURM's wiki.conf for improved performance.
- -- NOTE: We needed to change an RPC from version 1.3.1. You must upgrade 
-    all nodes in a cluster from v1.3.1 to v1.3.2 at the same time.
- -- Postgres plugin will work from job accounting, not for association 
-    management yet.
- -- For srun/sbatch --get-user-env option (Moab use only) look for "env"
-    command in both /bin and /usr/sbin (for Suse Linux).
- -- Fix bug in processing job feature requests with node counts (could fail
-    to schedule job if some nodes have not associated features).
- -- Added nodecnt and gid to jobcomp/script
- -- Insure that nodes select in "srun --will-run" command or the equivalent in
-    sched/wiki2 are in the job's partition.
- -- BLUGENE - changed partition Min|MaxNodes to represent c-node counts
-    instead of base partitions
- -- In sched/gang only, prevent possible invalid memory reference when 
-    slurmctld is reconfigured, e.g. "scontrol reconfig".
- -- In select/linear only, prevent invalid memory reference in log message when
-    nodes are added to slurm.conf and then "scontrol reconfig" is executed. 
-
-* Changes in SLURM 1.3.1
-========================
- -- Correct logic for processing batch job's memory limit enforcement.
- -- Fix bug that was setting a job's requeue value on any update of the 
-    job using the "scontrol update" command. The invalid value of an 
-    updated job prevents it's recovery when slurmctld restarts.
- -- Add support for cluster-wide consumable resources. See "Licenses"
-    parameter in slurm.conf man page and "--licenses" option in salloc, 
-    sbatch and srun man pages.
- -- Major changes in select/cons_res to support FastSchedule=2 with more
-    resources configured than actually exist (useful for testing purposes).
- -- Modify srun --test-only response to include expected initiation time 
-    for a job as well as the nodes to be allocated and processor count
-    (for use by Moab).
- -- Correct sched/backfill to properly honor job dependencies.
- -- Correct select/cons_res logic to allocate CPUs properly if there is
-    more than one thread per core (previously failed to allocate all cores).
- -- Correct select/linear logic in shared job count (was off by 1).
- -- Add support for job preeption based upon partition priority (in sched/gang,
-    preempt.patch from Chris Holmes, HP).
- -- Added much better logic for mysql accounting.  
- -- Finished all basic functionality for sacctmgr.
- -- Added load file logic to sacctmgr for setting up a cluster in one step.
- -- NOTE: We needed to change an RPC from version 1.3.0. You must upgrade 
-    all nodes in a cluster from v1.3.0 to v1.3.1 at the same time.
- -- NOTE: Work is currently underway to improve placement of jobs for gang
-    scheduling and preemption.
- -- NOTE: Work is underway to provide additional tools for reporting 
-    accounting information.
-
-* Changes in SLURM 1.3.0
-========================
- -- In sched/wiki2, add processor count to JOBWILLRUN response.
- -- Add event trigger for node entering DRAINED state.
- -- Build properly without OpenSSL installed (OpenSSL is recommended, but not 
-    required).
- -- Added slurmdbd, and modified accounting_storage plugin to talk to it. 
-    Allowing multiple slurm systems to securly store and gather information
-    not only about jobs, but the system also. See accounting web page for more
-    information.    
-
-* Changes in SLURM 1.3.0-pre11
-==============================
- -- Restructure the sbcast RPC to take advantage of larger buffers available
-    in Slurm v1.3 RPCs.
- -- Fix several memory leaks.
- -- In scontrol, show job's Requeue value, permit change of Requeue and Comment
-    values.
- -- In slurmctld job record, add QOS (quality of service) value for accounting
-    purposes with Maui and Moab.
- -- Log to a job's stderr when it is being cancelled explicitly or upon reaching
-    it's time limit.
- -- Only permit a job's account to be changed while that job is PENDING.
- -- Fix race condition in job suspend/resume (slurmd.sus_res.patch from HP).
-
-* Changes in SLURM 1.3.0-pre10
-==============================
- -- Add support for node-specific "arch" (architecture) and "os" (operating 
-    system) fields. These fields are set based upon values reported by the
-    slurmd daemon on each compute node using SLURM_ARCH and SLURM_OS environment 
-    variables (if set, the uname function otherwise) and are intended to support
-    changes in real time changes in operating system. These values are reported
-    by "scontrol show node" plus the sched/wiki and sched/wiki2 plugins for Maui
-    and Moab respectively.
- -- In sched/wiki and sched/wiki2: add HostFormat and HidePartitionJobs to 
-    "scontrol show config" SCHEDULER_CONF output.
- -- In sched/wiki2: accept hostname expression as input for GETNODES command.
- -- Add JobRequeue configuration parameter and --requeue option to the sbatch
-    command.
- -- Add HealthCheckInterval and HealthCheckProgram configuration parameters.
- -- Add SlurmDbdAddr, SlurmDbdAuthInfo and SlurmDbdPort configuration parameters.
- -- Modify select/linear to achieve better load leveling with gang scheduler.
- -- Develop the sched/gang plugin to support select/linear and
-    select/cons_res. If sched/gang is enabled and Shared=FORCE is configured
-    for a partition, this plugin will gang-schedule or "timeslice" jobs that
-    share common resources within the partition. Note that resources that are
-    shared across partitions are not gang-scheduled.
- -- Add EpilogMsgTime configuration parameter. See "man slurm.conf" for details.
- -- Increase default MaxJobCount configuration parameter from 2000 to 5000. 
- -- Move all database common files from src/common to new lib in src/database.
- -- Move sacct to src/accounting added sacctmgr for scontrol like operations 
-    to accounting infrastructure.
- -- Basic functions of sacctmgr in place to make for administration of 
-    accounting.
- -- Moved clusteracct_storage plugin to accounting_storage plugin,
-    jobacct_storage is still it's own plugin for now.
- -- Added template for slurm php extention.
- -- Add infrastructure to support allocation of cluster-wide licenses to jobs.
-    Full support will be added some time after version 1.3.0 is released.
- -- In sched/wiki2 with select/bluegene, add support for WILLRUN command
-    to accept multiple jobs with start time specifications.
-
-* Changes in SLURM 1.3.0-pre9
-=============================
- -- Add spank support to sbatch. Note that spank_local_user() will be called 
-    with step_layout=NULL and gid=SLURM_BATCH_SCRIPT and spank_fini() will 
-    be called immediately afterwards.
- -- Made configure use mysql_config to find location of mysql database install
-    Removed bluegene specific information from the general database tables.
- -- Re-write sched/backfill to utilize new will-run logic in the select 
-    plugins. It now supports select/cons_res and all job options (required
-    nodes, excluded nodes, contiguous, etc.).
- -- Modify scheduling logic to better support overlapping partitions.
- -- Add --task-mem option and remove --job-mem option from srun, salloc, and 
-    sbatch commands. Enforce step memory limit, if specified and there is
-    no job memory limit specified (--mem). Also see DefMemPerTask and
-    MaxMemPerTask in "man slurm.conf". Enforcement is dependent upon job
-    accounting being enabled with non-zero value for JoabAcctGatherFrequency.
- -- Change default node tmp_disk size to zero (for diskless nodes).
-
-* Changes in SLURM 1.3.0-pre8
-=============================
- -- Modify how strings are packed in the RPCs, Maximum string size 
-    increased from 64KB (16-bit size field) to 4GB (32-bit size field).
- -- Fix bug that prevented time value of "INFINITE" from being processed.
- -- Added new srun/sbatch option "--open-mode" to control how output/error 
-    files are opened ("t" for truncate, "a" for append).
- -- Added checkpoint/xlch plugin for use with XLCH (Hongjia Cao, NUDT).
- -- Added srun option --checkpoint-path for use with XLCH (Hongjia Cao, NUDT).
- -- Added new srun/salloc/sbatch option "--acctg-freq" for user control over 
-    accounting data collection polling interval.
- -- In sched/wiki2 add support for hostlist expression use in GETNODES command
-    with HostFormat=2 in the wiki.conf file.
- -- Added new scontrol option "setdebug" that can change the slurmctld daemons
-    debug level at any time (Hongjia Cao, NUDT).
- -- Track total total suspend time for jobs and steps for accounting purposes.
- -- Add version information to partition state file.
- -- Added 'will-run' functionality to all of the select plugins (bluegene,
-    linear, and cons_res) to return node list and time job can start based 
-    on other jobs running.
- -- Major restructuring of node selection logic. select/linear now supports
-    partition max_share parameter and tries to match like size jobs on the 
-    same nodes to improve gang scheduling performance. Also supports treating 
-    memory as consumable resource for job preemption and  gang scheduling if 
-    SelectTypeParameter=CR_Memory in slurm.conf.
- -- BLUEGENE: Reorganized bluegene plugin for maintainability sake.
- -- Major restructuring of data structures in select/cons_res.
- -- Support job, node and partition names of arbitrary size.
- -- Fix bug that caused slurmd to hang when using select/linear with
-    task/affinity.
-
-* Changes in SLURM 1.3.0-pre7
-=============================
- -- Fix a bug in the processing of srun's --exclusive option for a job step.
-
-* Changes in SLURM 1.3.0-pre6
-=============================
- -- Add support for configurable number of jobs to share resources using the 
-    partition Shared parameter in slurm.conf (e.g. "Shared=FORCE:3" for two 
-    jobs to share the resources). From Chris Holmes, HP.
- -- Made salloc use api instead of local code for message handling.
-
-* Changes in SLURM 1.3.0-pre5
-=============================
- -- Add select_g_reconfigure() function to node changes in slurmctld configuration
-    that can impact node scheduling.
- -- scontrol to set/get partition's MaxTime and job's Timelimit in minutes plus
-    new formats: min:sec, hr:min:sec, days-hr:min:sec, days-hr, etc.
- -- scontrol "notify" command added to send message to stdout of srun for 
-    specified job id.
- -- For BlueGene, make alpha part of node location specification be case insensitive.
- -- Report scheduler-plugin specific configuration information with the 
-    "scontrol show configuration" command on the SCHEDULER_CONF line. This
-    information is not found in the "slurm.conf" file, but a scheduler plugin 
-    specific configuration (e.g. "wiki.conf").
- -- sview partition information reported now includes partition priority.
- -- Expand job dependency specification to support concurrent execution, 
-    testing of job exit status and multiple job IDs.
-
-* Changes in SLURM 1.3.0-pre4
-=============================
- -- Job step launch in srun is now done from the slurm api's all further
-    modifications to job launch should be done there.
- -- Add new partition configuration parameter Priority. Add job count to 
-    Shared parameter.
- -- Add new configuration parameters DefMemPerTask, MaxMemPerTask, and 
-    SchedulerTimeSlice.
- -- In sched/wiki2, return REJMESSAGE with details on why a job was 
-    requeued (e.g. what node failed).
-
-* Changes in SLURM 1.3.0-pre3
-=============================
- -- Remove slaunch command
- -- Added srun option "--checkpoint=time" for job step to automatically be 
-    checkpointed on a period basis.
- -- Change behavior of "scancel -s KILL <jobid>" to send SIGKILL to all job
-    steps rather than cancelling the job. This now matches the behavior of
-    all other signals. "scancel <jobid>" still cancels the job and all steps.
- -- Add support for new job step options --exclusive and --immediate. Permit
-    job steps to be queued when resources are not available within an existing 
-    job allocation to dedicate the resources to the job step. Useful for
-    executing simultaneous job steps. Provides resource management both at 
-    the level of jobs and job steps.
- -- Add support for feature count in job constraints, for example
-    srun --nodes=16 --constraint=graphics*4 ...
-    Based upon work by Kumar Krishna (HP, India).
- -- Add multi-core options to salloc and sbatch commands (sbatch.patch and
-    cleanup.patch from Chris Holmes, HP).
- -- In select/cons_res properly release resources allocated to job being 
-    suspended (rmbreak.patch, from Chris Holmes, HP).
- -- Removed database and jobacct plugin replaced with jobacct_storage 
-    and jobacct_gather for easier hooks for further expansion of the
-    jobacct plugin.
-
-* Changes in SLURM 1.3.0-pre2
-=============================
- -- Added new srun option --pty to start job with pseudo terminal attached 
-    to task 0 (all other tasks have I/O discarded)
- -- Disable user specifying jobid when sched/wiki2 configured (needed for 
-    Moab releases until early 2007).
- -- Report command, args and working directory for batch jobs with 
-    "scontrol show job".
-
-* Changes in SLURM 1.3.0-pre1
-=============================
- -- !!! SRUN CHANGES !!!
-    The srun options -A/--allocate, -b/--batch, and -a/--attach have been
-    removed!  That functionality is now available in the separate commands
-    salloc, sbatch, and sattach, respectively.
- -- Add new node state FAILING plus trigger for when node enters that state.
- -- Add new configuration parameter "PrivateData". This can be used to 
-    prevent a user from seeing jobs or job steps belonging to other users.
- -- Added configuration parameters for node power save mode: ResumeProgram
-    ResumeRate, SuspendExcNodes, SuspendExcParts, SuspendProgram and 
-    SuspendRate.
- -- Slurmctld maintains the IP address (rather than hostname) for srun 
-    communications. This fixes some possible network routing issues.
- -- Added global database plugin.  Job accounting and Job completion are the 
-    first to use it.  Follow documentation to add more to the plugin.
- -- Removed no-longer-needed jobacct/common/common_slurmctld.c since that is
-    replaced by the database plugin.
- -- Added new configuration parameter: CryptoType.
-    Moved existing digital signature logic into new plugin: crypto/openssl.
-    Added new support for crypto/munge (available with GPL license).
-
-* Changes in SLURM 1.2.36
-=========================
- -- For spank_get_item(S_JOB_ARGV) for batch job with script input via STDIN,
-    set argc value to 1 (rather than 2, argv[0] still set to path of generated
-    script).
- -- sacct will now display more properly allocations made with salloc with only 
-    one step.
-
-* Changes in SLURM 1.2.35
-=========================
- -- Permit SPANK plugins to dynamically register options at runtime base upon
-    configuration or other runtime checks.
- -- Add "include" keywork to SPANK plugstack.conf file to optionally include
-    other configuration files or directories of configuration files.
- -- Srun to wait indefinitely for resource allocation to be made. Used to
-    abort after two minutes.
-
-* Changes in SLURM 1.2.34
-=========================
- -- Permit the cancellation of a job that is in the process of being 
-    requeued.
- -- Ignore the show_flag when getting job, step, node or partition information
-    for user root.
- -- Convert some functions to thread-safe versions: getpwnam, getpwuid, 
-    getgrnam, and getgrgid to similar functions with "_r" suffix. While no
-    failures have been observed, a race condition would in the worst case
-    permit a user access to a partition not normally allowed due to the
-    AllowGroup specification or the wrong user identified in an accounting
-    record. The job would NOT be run as the wrong user.
- -- For PMI only (MPICH2/MVAPICH2) base address to send messages to (the srun)
-    upon the address from which slurmd gets the task launch request rather then
-    "hostname" where srun executes.
- -- Make test for StateSaveLocation directory more comprehensive.
- -- For jobcomp/script plugin, PROCS environment variable is now the actual
-    count of allocated processors rather than the count of processes to 
-    be started.
-
-* Changes in SLURM 1.2.33
-=========================
- -- Cancelled or Failed jobs will now report their job and step id on exit
- -- Add SPANK items available to get: SLURM_VERSION, SLURM_VERSION_MAJOR,
-    SLURM_VERISON_MINOR and SLURM_VERSION_MICRO.
- -- Fixed handling of SIGPIPE in srun. Abort job.
- -- Fix bug introduced to MVAPICH plugin preventing use of TotalView debugger.
- -- Modify slurmctld to get srun/salloc network address based upon the incoming
-    message rather than hostname set by the user command (backport of logic in
-    SLURM v1.3).
-
-* Changes in SLURM 1.2.32
-=========================
- -- LSF only: Enable scancel of job in RootOnly partition by the job's owner.
- -- Add support for sbatch --distribution and --network options.
- -- Correct pending job's wait reason to "Priority" rather than "Resources" if
-    required resources are being held in reserve for a higher priority job.
- -- In sched/wiki2 (Moab) report a node's state as "Drained" rather than 
-    "Draining" if it has no allocated work (An undocumented Moab wiki option, 
-    see CRI ticket #2394).
- -- Log to job's output when it is cancelled or reaches it's time limit (ported
-    from existing code in slurm v1.3).
- -- Add support in salloc and sbatch commands for --network option.
- -- Add support for user environment variables that include '\n' (e.g. 
-    bash functions).
- -- Partial rewrite of mpi/mvapich plugin for improved scalability.
-
-* Changes in SLURM 1.2.31
-=========================
- -- For Moab only: If GetEnvTimeout=0 in slurm.conf then do not run "su" to get
-    the user's environment, only use the cache file.
- -- For sched/wiki2 (Moab), treat the lack of a wiki.conf file or the lack 
-    of a configured AuthKey as a fatal error (lacks effective security).
- -- For sched/wiki and sched/wiki2 (Maui or Moab) report a node's state as 
-    Busy rather than Running when allocated if SelectType=select/linear. Moab
-    was trying to schedule job's on nodes that were already allocated to jobs
-    that were hidden from it via the HidePartitionJobs in Slurm's wiki.conf.
- -- In select/cons_res improve the resource selection when a job has specified
-    a processor count along with a maximum node count.
- -- For an srun command with --ntasks-per-node option and *no* --ntasks count,
-    spawn a task count equal to the number of nodes selected multiplied by the 
-    --ntasks-per-node value.
- -- In jobcomp/script: Set TZ if set in slurmctld's environment.
- -- In srun with --verbose option properly format CPU allocation information 
-    logged for clusters with 1000+ nodes and 10+ CPUs per node.
- -- Process a job's --mail_type=end option on any type of job termination, not
-    just normal completion (e.g. all failure modes too).
-
-* Changes in SLURM 1.2.30
-=========================
- -- Fix for gold not to print out 720 error messages since they are
-    potentally harmful.
- -- In sched/wiki2 (Moab), permit changes to a pending job's required features:
-    CMD=CHANGEJOB ARG=<jobid> RFEATURES=<features>
- -- Fix for not aborting when node selection doesn't load, fatal error instead
- -- In sched/wiki and sched/wiki2 DO NOT report a job's state as "Hold" if it's
-    dependencies have not been satisfied. This reverses a changed made in SLURM
-    version 1.2.29 (which was requested by Cluster Resources, but places jobs 
-    in a HELD state indefinitely).
-
-* Changes in SLURM 1.2.29
-=========================
- -- Modified global configuration option "DisableRootJobs" from number (0 or 1)
-    to boolean (YES or NO) to match partition parameter.
- -- Set "DisableRootJobs" for a partition to match the global parameters value 
-    for newly created partitions.
- -- In sched/wiki and sched/wiki2 report a node's updated features if changed
-    after startup using "scontrol update ..." command.
- -- In sched/wiki and sched/wiki2 report a job's state as "Hold" if it's 
-    dependencies have not been satisfied.
- -- In sched/wiki and sched/wiki2 do not process incoming requests until
-    slurm configuration is completely loaded.
- -- In sched/wiki and sched/wiki2 do not report a job's node count after it 
-    has completed (slurm decrements the allocated node count when the nodes
-    transition from completing to idle state).
- -- If job prolog or epilog fail, log the program's exit code.
- -- In jobacct/gold map job names containing any non-alphanumeric characters 
-    to '_' to avoid MySQL parsing problems.
- -- In jobacct/linux correct parsing if command name contains spaces.
- -- In sched/wiki and sched/wiki2 report make job info TASK count reflect the 
-    actual task allocation (not requested tasks) even after job terminates.
-    Useful for accounting purposes only.
-
-* Changes in SLURM 1.2.28
-=========================
- -- Added configuration option "DisableRootJobs" for parameter 
-    "PartitionName".  See "man slurm.conf" for details.
- -- Fix for faking a large system to correctly handle node_id in the task
-    afffinity plugin for ia64 systems.
-
-* Changes in SLURM 1.2.27
-=========================
- -- Record job eligible time in accounting database (for jobacct/gold only).
- -- Prevent user root from executing a job step within a job allocation 
-    belonging to another user.
- -- Fixed limiting issue for strings larger than 4096 in xstrfmtcat
- -- Fix bug in how Slurm reports job state to Maui/Moab when a job is requeued
-    due to a node failure, but we can't terminate the job's spawned processes.
-    Job was being reported as PENDING when it was really still COMPLETING.
- -- Added patch from Jerry Smith for qstat -a output
- -- Fixed looking at the correct perl path for Slurm.pm in torque wrappers.
- -- Enhance job requeue on node failure to be more robust.
- -- Added configuration parameter "DisableRootJobs". See "man slurm.conf" 
-    for details.
- -- Fixed issue with account = NULL in Gold job accounting plugin
-
-* Changes in SLURM 1.2.26
-=========================
- -- Correct number of sockets/cores/threads reported by slurmd (from
-    Par Andersson, National Supercomputer Centre, Sweden).
- -- Update libpmi linking so that libslurm is not required for PMI use
-    (from Steven McDougal, SiCortex).
- -- In srun and sbatch, do not check the PATH env var if an absolute pathname 
-    of the program is specified (previously reported an error if no PATH).
- -- Correct output of "sinfo -o %C" (CPU counts by node state).
-
-* Changes in SLURM 1.2.25
-=========================
- -- Bug fix for setting exit code in accounting for batch script.
- -- Add salloc option, --no-shell (for LSF).
- -- Added new options for sacct output
- -- mvapich: Ensure MPIRUN_ID is unique for all job steps within a job.
-    (Fixes crashes when running multiple job steps within a job on one node)
- -- Prevent "scontrol show job" from failing with buffer overflow when a job 
-    has a very long Comment field.
- -- Make certain that a job step is purged when a job has been completed.
-    Previous versions could have the job step persist if an allocated node
-    went DOWN and the slurmctld restarted.
- -- Fix bug in sbcast that can cause communication problems for large files.
- -- Add sbcast option -t/--timeout and SBCAST_TIMEOUT environment variable 
-    to control message timeout.
- -- Add threaded agent to manage a queue of Gold update requests for 
-    performance reasons.
- -- Add salloc options --chdir and --get-user-env (for Moab).
- -- Modify scontrol update to support job comment changes.
- -- Do not clear a DRAINED node's reason field when slurmctld restarts.
- -- Do not cancel a pending job if Moab or Maui try to start it on unusable nodes.
-    Leave the job queued.
- -- Add --requeue option to srun and sbatch (these undocumented options have no
-    effect in slurm v1.2, but are legitimate options in slurm v1.3).
- 
-* Changes in SLURM 1.2.24
-=========================
- -- In sched/wiki and sched/wiki2, support non-zero UPDATE_TIME specification
-    for GETNODES and GETJOBS commands.
- -- Bug fix for sending accounting information multiple times for same 
-    info.  patch from Hongjia Cao (NUDT).
- -- BLUEGENE - try FILE pointer rotation logic to avoid core dump on 
-    bridge log rotate
- -- Spread out in time the EPILOG_COMPLETE messages from slurmd to slurmctld
-    to avoid message congestions and retransmission.
-
-* Changes in SLURM 1.2.23
-=========================
- -- Fix for libpmi to not export unneeded variables like xstr*
- -- BLUEGENE - added per partition dynamic block creation
- -- fix infinite loop bug in sview when there were multiple partitions
- -- Send message to srun command when a job is requeued due to node failure.
-    Note this will be overwritten in the output file unless JobFileAppend
-    is set in slurm.conf. In slurm version 1.3, srun's --open-mode=append
-    option will offer this control for each job.
- -- Change a node's default TmpDisk from 1MB to 0MB and change job's default 
-    disk space requirement from 1MB to 0MB.
- -- In sched/wiki (Maui scheduler) specify a QOS (quality of service) by 
-    specifying an account of the form "qos-name".
- -- In select/linear, fix bug in scheduling required nodes that already have
-    a job running on them (req.load.patch from Chris Holmes, HP).
- -- For use with Moab only: change timeout for srun/sbatch --get-user-env 
-    option to 2 secs, don't get DISPLAY environment variables, but explicitly 
-    set ENVIRONMENT=BATCH and HOSTNAME to the execution host of the batch script.
- -- Add configuration parameter GetEnvTimeout for use with Moab. See
-    "man slurm.conf" for details.
- -- Modify salloc and sbatch to accept both "--tasks" and "--ntasks" as 
-    equivalent options for compatibility with srun.
- -- If a partition's node list contains space separators, replace them with 
-    commas for easier parsing.
- -- BLUEGENE - fixed bug in geometry specs when creating a block.
- -- Add support for Moab and Maui to start jobs with select/cons_res plugin
-    and jobs requiring more than one CPU per task.
-
-* Changes in SLURM 1.2.22
-=========================
- -- In sched/wiki2, add support for MODIFYJOB option "MINSTARTTIME=<time>"
-    to modify a job's earliest start time.
- -- In sbcast, fix bug with large files and causing sbcast to die.
- -- In sched/wiki2, add support for COMMENT= option in STARTJOB and CANCELJOB
-    commands.
- -- Avoid printing negative job run time in squeue due to clock skew.
- -- In sched/wiki and sched/wiki2, add support for wiki.conf option
-    HidePartitionJobs (see man pages for details).
- -- Update to srun/sbatch --get-user-env option logic (needed by Moab).
- -- In slurmctld (for Moab) added job->details->reserved_resources field
-    to report resources that were kept in reserve for job while it was 
-    pending.
- -- In sched/wiki (for Maui scheduler) report a pending job's node feature 
-    requirements (from Miguel Roa, BSC).
- -- Permit a user to change a pending job's TasksPerNode specification 
-    using scontrol (from Miguel Roa, BSC).
- -- Add support for node UP/DOWN event logging in jobacct/gold plugin
-    WARNING: using the jobacct/gold plugin slows the system startup set the
-    MessageTimeout variable in the slurm.conf to around 20+.
- -- Added check at start of slurmctld to look for /tmp/slurm_gold_first if
-    there, and using the gold plugin slurm will make record of all nodes in
-    downed or drained state.
-
-* Changes in SLURM 1.2.21
-=========================
- -- Fixed torque wrappers to look in the correct spot for the perl api
- -- Do not treat user resetting his time limit to the current value as
-    an error.
- -- Set correct executable names for Totalview when --multi-prog option 
-    is used and more than one node is allocated to the job step.
- -- When a batch job gets requeued, record in accounting logs that 
-    the job was cancelled, the requeued job's submit time will be 
-    set to the time of its requeue so it looks like a different job.
- -- Prevent communication problems if the slurmd/slurmstepd have a 
-    different JobAcct plugin configured than slurmctld.
- -- Adding Gold plugin for job accounting
- -- In sched/wiki2, add support for MODIFYJOB option "JOBNAME=<name>"
-    to modify a job's name.
- -- Add configuration check for sys/syslog.h and include it as needed.
- -- Add --propagate option to sbatch for control over limit propagation.
- -- Added Gold interface to the jobacct plugin.  To configure in the config
-    file specify...  
-    JobAcctType=jobacct/gold
-    JobAcctLogFile=CLUSTER_NAME:GOLD_AUTH_KEY_FILE:GOLDD_HOST:GOLDD_PORT7112
- -- In slurmctld job record, set begin_time to time when all of a job's
-    dependencies are met.
-
-* Changes in SLURM 1.2.20
-=========================
- -- In switch/federation, fix small memory leak effecting slurmd.
- -- Add PMI_FANOUT_OFF_HOST environment variable to control how message 
-    forwarding is done for PMI (MPICH2). See "man srun" for details.
- -- From sbatch set SLURM_NTASKS_PER_NODE when --ntasks-per-node option is 
-    specified.
- -- BLUEGENE: Documented the prefix should always be lower case and the 3
-    digit suffix should be uppercase if any letters are used as digits. 
- -- In sched/wiki and sched/wiki2, add support for --cpus-per-task option.
-    From Miguel Ros, BSC.
- -- In sched/wiki2, prevent invalid memory pointer (and likely seg fault) 
-    for job associated with a partition that has since been deleted.
- -- In sched/wiki2 plus select/cons_res, prevent invalid memory pointer 
-    (and likely seg fault) when a job is requeued.
- -- In sched/wiki, add support for job suspend, resume, and modify.
- -- In sched/wiki, add suppport for processor allocation (not just node allocation)
-    with layout control.
- -- Prevent re-sending job termination RPC to a node that has already completed 
-    the job. Only send it to specific nodes which have not reported completion.
- -- Support larger environment variables 64K instead of BUFSIZ (8k on some 
-    systems).
- -- If a job is being requeued, job step create requests will print a 
-    warning and repeatedly retry rather than aborting.
- -- Add optional mode value to srun and sbatch --get-user-env option.
- -- Print error message and retry job submit commands when MaxJobCount 
-    is reached. From Don Albert, Bull.
- -- Treat invalid begin time specification as a fatal error in sbatch and 
-    srun. From Don Albert, Bull.
- -- Validate begin time specification to avoid hours >24, minutes >59, etc.
-
-* Changes in SLURM 1.2.19
-=========================
-*** NOTE IMPORTANT CHANGE IN RPM BUILD BELOW ****
- -- slurm.spec file (used to build RPMs) was updated in order to support Mock, a
-    chroot build environment. See https://hosted.fedoraproject.org/projects/mock/
-    for more information. The following RPMs are no longer build by default:
-    aix-federation, auth_none, authd, bluegene, sgijob, and switch-elan. Change 
-    the RPMs built using the following options in ~/rpmmacros: "%_with_authd 1", 
-    "%_without_munge 1", etc. See the slurm.spec file for more details.
- -- Print warning if non-privileged user requests negative "--nice" value on
-    job submission (srun, salloc, and sbatch commands).
- -- In sched/wiki and sched/wiki2, add support for srun's --ntasks-per-node 
-    option.
- -- In select/bluegene with Groups defined for Images, fix possible memory 
-    corruption. Other configurations are not affected. 
- -- BLUEGENE - Fix bug that prevented user specification of linux-image, 
-    mloader-image, and ramdisk-image on job submission.
- -- BLUEGENE - filter Groups specified for image not just by submitting 
-    user's current group, but all groups the user has access to.
- -- BLUEGENE - Add salloc options to specify images to be loaded (--blrts-image, 
-    --linux-image, --mloader-image, and --ramdisk-image).
- -- BLUEGENE - In bluegene.conf, permit Groups to be comma separated in addition 
-    to colon separators previously supported.
- -- sbatch will accept batch script containing "#SLURM" options and advise
-    changed to "#SBATCH".
- -- If srun --output or --error specification contains a task number rather 
-    than a file name, send stdout/err from specified task to srun's stdout/err
-    rather than to a file by the same name as the task's number.
- -- For srun --multi-prog option, verify configuration file before attempting 
-    to launch tasks, report clear explanation of any configuration file errors.
- -- For sched/wiki2, add optional timeout option to srun's --get-user-env
-    parameter, change default timeout for "su - <user> env" from 3 to 8 seconds.
-    On timeout, attempt to load env from file at StateSaveLocation/env_cache/<user>.
-    The format of this file is the same as output of "env" command. If there
-    is no env cache file, then abort the request.
- -- squeue modified for completing job to remove nodes that have already 
-    completed the job before applying node filter logic.
- -- squeue formatted output option added for job comment, "%q" (the obvious 
-    choices for letters are already in use).
- -- Added configure option --enable-load-env-no-login for use with Moab. If
-    set then the user job runs with the environment built without a login
-    ("su <user> env" rather than "su - <user> env").
- -- Fix output of "srun -o %C" (allocated CPU count) for running jobs. This was
-    broken in 1.2.18 for handling requeue of Moab jobs.
- -- Added logic to mpiexec wrapper to read in the MPIEXEC_TIMEOUT var
- -- Updated qstat wrapper to display information for partitions (-Q) option
- -- NOTE: SLURM should now work directly with Globus using the PBS GRAM.
-
-* Changes in SLURM 1.2.18
-=========================
- -- BLUEGENE - bug fix for smap stating passthroughs are used when they aren't
- -- Fixed bug in sview to be able to edit partitions correctly
- -- Fixed bug so in slurm.conf files where SlurmdPort isn't defined things
-    work correctly.
- -- In sched/wiki2 and sched/wiki add support for batch job being requeued
-    in Slurm either when nodes fail or upon request.
- -- In sched/wiki2 and sched/wiki with FastSchedule=2 configured and nodes 
-    configured with more CPUs than actually exist, return a value of TASKS 
-    equal to the number of configured CPUs that are allocated to a job rather 
-    than the number of physical CPUs allocated.
- -- For sched/wiki2, timeout "srun --get-user-env ..." command after 3 seconds 
-    if unable to perform pseudo-login and get user environment variables.
- -- Add contribs/time_login.c program to test how long pseudo-login takes
-    for specific users or all users. This can identify users for which Moab 
-    job submissions are unable to set the proper environment variables.
- -- Fix problem in parallel make of Slurm.
- -- Fixed bug in consumable resources when CR_Core_Memory is enabled
- -- Add delay in slurmctld for "scontrol shutdown" RPC to get propagated 
-    to slurmd daemons.
-
-* Changes in SLURM 1.2.17
-=========================
- -- In select/cons_res properly release resources allocated to job being 
-    suspended (rmbreak.patch, from Chris Holmes, HP).
- -- Fix AIX linking problem for PMI (mpich2) support.
- -- Improve PMI logic for greater scalability (up to 16k tasks run).
- -- Add srun support for SLURM_THREADS and PMI_FANOUT environment variables.
- -- Fix support in squeue for output format with left justification of 
-    reason (%r) and reason/node_list (%R) output.
- -- Automatically requeue a batch job when a node allocated to it fails
-    or the prolog fails (unless --no-requeue or --no-kill option used).
- -- In sched/wiki, enable use of wiki.conf parameter ExcludePartitions to
-    directly schedule selected partitions without Maui control.
- -- In sched/backfill, if a job requires specific nodes, schedule other jobs
-    ahead of it rather than completely stopping backfill scheduling for that
-    partition.
- -- BLUEGENE - corrected logic making block allocation work in a circular 
-    fashion instead of linear.
- 
-* Changes in SLURM 1.2.16
-=========================
- -- Add --overcommit option to the salloc command.
- -- Run task epilog from job's working directory rather than directory
-    where slurmd daemon started from.
- -- Log errors running task prolog or task epilog to srun's output.
- -- In sched/wiki2, fix bug processing condensed hostlist expressions.
- -- Release contribs/mpich1.slurm.patch without GPL license. 
- -- Fix bug in mvapich plugin for read/write calls that return EAGAIN.
- -- Don't start MVAPICH timeout logic until we know that srun is starting 
-    an MVAPICH program.
- -- Fix to srun only allocating number of nodes needed for requested task
-    count when combining allocation and step creation in srun.
- -- Execute task-prolog within proctrack container to insure that all 
-    child processes get terminated.
- -- Fixed job accounting to work with sgi_job proctrack plugin.
-
-* Changes in SLURM 1.2.15
-=========================
- -- In sched/wiki2, fix bug processing hostlist expressions where hosts
-    lack a numeric suffix.
- -- Fix bug in srun. When user did not specify time limit, it defaulted to 
-    INFINITE rather than partition's limit.
- -- In select/cons_res with SelectTypeParameters=CR_Socket_Memory, fix bug in 
-    memory allocation tracking, mem.patch from Chris Holmes, HP.
- -- Add --overcommit option to the sbatch command.
-
-* Changes in SLURM 1.2.14
-=========================
- -- Fix a couple of bugs in MPICH/MX support (from Asier Roa, BSC).
- -- Fix perl api for AIX
- -- Add wiki.conf parameter ExcludePartitions for selected partitions to 
-    be directly schedule by Slurm without Moab control
- -- Optimize load leveling for shared nodes (alloc.patch, contributed 
-    by Chris Holmes, HP).
- -- Added PMI_TIME environment variable for user to control how PMI 
-    communications are spread out in time. See "man srun" for details.
- -- Added PMI timing information to srun debug mode to aid in tuning.
-    Use "srun -vv ..." to see the information.
- -- Added checkpoint/ompi (OpenMPI) plugin (still under development).
- -- Fix bug in load leveling logic added to v1.2.13 which can cause an 
-    infinite loop and hang slurmctld when sharing nodes between jobs.
- -- Added support for sbatch to read in #PBS options from a script
-
-* Changes in SLURM 1.2.13
-=========================
- -- Add slurm.conf parameter JobFileAppend.
- -- Fix for segv in "scontrol listpids" on nodes not in SLURM config.
- -- Add support for SCANCEL_CTLD env var.
- -- In mpi/mvapich plugin, add startup timeout logic. Time based upon 
-    SLURM_MVAPICH_TIMEOUT (value in seconds).
- -- Fixed pick_step_node logic to only pick the number of nodes requested
-    from the user when excluding nodes, to avoid an error message.
- -- Disable salloc, sbatch and srun -I/--immediate options with 
-    Moab scheduler.
- -- Added "contribs" directory with a Perl API and Torque wrappers for Torque 
-    to SLURM migration.  This directory should be used to put anything that 
-    is outside of SLURM proper such as a different API. Perl APIs contributed 
-    by Hongjia Cao (NUDT).
- -- In sched/wiki2: add support for tasklist with node name expressions 
-    and task counts (e.g. TASKLIST=tux[1-4]*2:tux[12-14]*4").
- -- In select/cons_res with sched/wiki2: fix bug in task layout logic.
- -- Removed all curses info from the bluegene plugin putting it into smap
-    where it belongs.  
- -- Add support for job time limit specification formats: min, min:sec, 
-    hour:min:sec, and days-hour:min:sec (formerly only supported minutes).
-    Applies to salloc, sbatch, and srun commands.
- -- Improve scheduling support for exclusive constraint list, nodes can 
-    now be in more than one constraint specific exclusively for a job
-    (e.g. "srun -C [rack1|rack2|rack3|rowB] srun")
- -- Create separate MPICH/MX plugin (split out from MPICH/GM plugin)
- -- Increase default MessageTimeout (in slurm.conf) from 5 to 10 secs.
- -- Fix bug in batch job requeue if node zero of allocation fails to respond 
-    to task launch request.
- -- Improve load leveling logic to more evenly distribute the workload 
-    (best_load.patch, contributed by Chris Holmes, HP).
- 
-* Changes in SLURM 1.2.12
-=========================
- -- Increase maximum message size from 1MB to 16MB (from Ernest Artiaga, BSC). 
- -- In PMI_Abort(), log the event and abort the entire job step.
- -- Add support for additional PMI functions: PMI_Get_clique_ranks and 
-    PMI_Get_clique_size (from Chuck Clouston, Bull).
- -- Report an error when a hostlist comes in appearing to be a box but not 
-    formatted in XYZxXYZ format.
- -- Add support for partition configuration "Shared=exclusive". This is 
-    equivalent to "srun --exclusive" when select/cons_res is configured.
- -- In sched/wiki2, report the reason for a node being unavailable for the 
-    GETNODES command using the CAT="<reason>" field.
- -- In sched/wiki2 with select/linear, duplicate hostnames in HOSTLIST, one
-    per allocated processor.
- -- Fix bug in scancel with specific signal and job lacks active steps.
- -- In sched/wiki2, add support for NOTIFYJOB ARG=<jobid> MSG=<message>.
-    This sends a message to an active srun command.
- -- salloc will now set SLURM_NPROCS to improve srun's behavior under salloc.
- -- In sched/wiki2 and select/cons_res: insure that Slurm's CPU allocation
-    is identical to Moab's (from Ernest Artiaga and Asier Roa, BSC).
- -- Added "scontrol show slurmd" command to status local slurmd daemon.
- -- Set node DOWN if prolog fails on node zero of batch job launch.
- -- Properly handle "srun --cpus-per-task" within a job allocation when 
-    SLURM_TASKS_PER_NODE environment varable is not set.
- -- Fixed return of slurm_send_rc_msg if msg->conn_fd is < 0 set errno ENOTCONN
-    and return SLURM_ERROR instead of return ENOTCONN
- -- Added read before we send anything down a socket to make sure the socket
-    is still there.
- -- Add slurm.conf variables UnkillableStepProgram and UnkillableStepTimeout.
- -- Enable nice file propagation from sbatch command.
- 
-* Changes in SLURM 1.2.11
-=========================
- -- Updated "etc/mpich1.slurm.patch" for direct srun launch of MPICH1_P4
-    tasks. See the "README" portion of the patch for details.
- -- Added new scontrol command "show hostlist <hostnames>" to translate a list 
-    of hostnames into a hostlist expression (e.g. "tux1,tux2" -> "tux[1-2]")
-    and "show hostnames <list>", returns a list of of nodes (one node per line)
-    from SLURM hostlist expression or from SLURM_NODELIST environment variable 
-    if no hostlist specified.
- -- Add the sbatch option "--wrap".
- -- Add the sbatch option "--get-user-env".
- -- Added support for mpich-mx (use the mpichgm plugin).
- -- Make job's stdout and stderr file access rights be based upon user's umask
-    at job submit time.
- -- Add support for additional PMI functions: PMI_Parse_option,
-    PMI_Args_to_keyval, PMI_Free_keyvals and PMI_Get_options (from Puenlap Lee
-    and Nancy Kritkausky, Bull).
- -- Make default value of SchedulerPort (configuration parameter) be 7321.
- -- Use SLURM_UMASK environment variable (if set) at job submit time as umask 
-    for spawned job.
- -- Correct some format issues in the man pages (from Gennero Oliva, ICAR).
- -- Added support for parallel make across an existing SLURM allocation
-    based upon GNU make-3.81. Patch is in "etc/make.slurm.patch".
- -- Added '-b' option to sbatch for easy MOAB trasition to sbatch instead of
-    srun.  Option does nothing in sbatch.
- -- Changed wiki2's handling of a node state in Completing to return 'busy' 
-    instead of 'running' which matches slurm version 1.1
-
-* Changes in SLURM 1.2.10
-=========================
- -- Fix race condititon in jobacct/linux with use of proctrack/pgid and a
-    realloc issue inside proctrack/linux
- -- Added MPICH1_P4 plugin for direct launch of mpich1/p4 tasks using srun
-    and a patched version of the mpi library. See "etc/mpich1.slurm.patch".
-    NOTE: This is still under development and not ready for production use.
-
-* Changes in SLURM 1.2.9
-========================
- -- Add new sinfo field to sort by "%E" sorts by the time associated with a 
-    node's state (from Prashanth Tamraparni, HP).
- -- In sched/wiki: fix logic for restarting backup slurmctld.
- -- Preload SLURM plugins early in the slurmstepd operation to avoid
-    multiple dlopens after forking (and to avoid a glibc bug
-    that leaves dlopen locks in a bad state after a fork).
- -- Added MPICH1_P4 patch to launch tasks using srun rather than rsh and
-    automatically generate mpirun's machinefile based upon the job's 
-    allocation.    See "etc/mpich1.slurm.patch".
- -- BLUEGENE - fix for overlap mode to mark all other base partitions as used
-    when creating a new block from the file to insure we only use the base 
-    partitions we are asking for.
-
-* Changes in SLURM 1.2.8
-========================
- -- Added mpi/mpich1_shmem plugin.
- -- Fix in proctrack/sgi_job plugin that could cause slurmstepd to seg_fault
-    preventing timely clean-up of batch jobs in some cases.
-
-* Changes in SLURM 1.2.7
-========================
- -- BLUEGENE - code to make it so you can make a 36x36x36 system.  
-    The wiring should be correct for a system with x-dim of 1,2,4,5,8,13
-    in emulation mode.  It will work with any real system no matter the size.
- -- Major re-write of jobcomp/script plugin: fix memory leak and 
-    general code clean-up.
- -- Add ability to change MaxNodes and ExcNodeList for pending job 
-    using scontrol.
- -- Purge zombie processes spawned via event triggers.
- -- Add support for power saving mode (experimental code to reduce voltage
-    and frequency on nodes that stay in the IDLE state, for more information 
-    see http://www.llnl.gov/linux/slurm/power_save.html). None of this
-    code is enabled by default.
-
-* Changes in SLURM 1.2.6
-========================
- -- Fix MPIRUN_PORT env variable in mvapich plugin
- -- Disable setting triggers by other than user SlurmUser unless SlurmUser
-    is root for improved security.
- -- Add event trigger for IDLE nodes.
-
-* Changes in SLURM 1.2.5
-========================
- -- Fix nodelist truncation in "scontrol show jobs" output
- -- In mpi/mpichgm, fix potential problem formatting GMPI_PORT, from
-    Ernest Artiaga, BSC.
- -- In sched/wiki2 - Report job's account, from Ernest Artiaga, BSC.
- -- Add sbatch option "--ntasks-per-node".
-
-* Changes in SLURM 1.2.4
-========================
- -- In select/cons_res - fix for function argument type mis-match in getting
-    CPU count for a job, from Ernest Artiaga, BSC.
- -- In sched/wiki2 - Report job's tasks_per_node requirement.
- -- In forward logic fix to check if the forwarding node recieves a connection
-    but doesn't ever get the message from the sender (network issue or
-    something) also check to make sure if we get something back we make sure
-    we account for everything we sent out before we call it good.
- -- Another fix to make sure steps with requested nodes have correct cpus
-    accounted for and a fix to make sure the user can't allocate more 
-    cpus than the have requested.
-
-* Changes in SLURM 1.2.3
-========================
- -- Cpuset logic added to  task/affinity, from Don Albert (Bull) and
-    Moe Jette (LLNL).  The /dev/cpuset file system must be mounted and 
-    set "TaskPluginParam=cpusets" in slurm.conf to enable.
- -- In sched/wiki2, fix possible overflow in job's nodelist, from 
-    Ernest Artiaga, BSC.
- -- Defer creation of new job steps until a suspended job is resumed.
- -- In select/linear - fix for potential stack corruption bug.
-
-* Changes in SLURM 1.2.2
-========================
- -- Added new command "strigger" for event trigger management, a new 
-    capability. See "man strigger" for details.
- -- srun --get-user-env now sends su's stderr to /dev/null
- -- Fix in node_scheduling logic with multiple node_sets, from 
-    Ernest Artiaga, BSC.
- -- In select/cons_res, fix for function argument type mis-match in getting 
-    CPU count for a job.
-
-* Changes in SLURM 1.2.1
-========================
- -- MPICHGM support bug fixes from Ernest Artiaga, BSC.
- -- Support longer hostlist strings, from Ernest Artiaga, BSC.
-
-* Changes in SLURM 1.2.0
-========================
- -- Srun to use env vars for SLURM_PROLOG, SLURM_EPILOG, SLURM_TASK_PROLOG, 
-    and SLURM_TASK_EPILOG. patch.1.2.0-pre11.070201.envproepilog from 
-    Dan Palermo, HP.
- -- Documenation update. patch.1.2.0-pre11.070201.mchtml from Dan Palermo, HP.
- -- Set SLURM_DIST_CYCLIC = 1 (needed for HP MPI, slurm.hp.env.patch).
- 
-* Changes in SLURM 1.2.0-pre15
-==============================
- -- Fix for another spot where the backup controller calls switch/federation
-    code before switch/federation is initialized.
-
-* Changes in SLURM 1.2.0-pre14
-==============================
- -- In sched/wiki2, clear required nodes list when a job is requeued.
-    Note that the required node list is set to every node used when 
-    a job is started via sched/wiki2.
- -- BLUEGENE - Added display of deallocating blocks to smap and other tools. 
- -- Make slurmctld's working directory be same as SlurmctldLogFile (if any),
-    otherwise StateSaveDir (which is likely a shared directory, possibly 
-    making core file identification more difficult).
- -- Fix bug in switch/federation that results in the backup controller
-    aborting if it receives an epilog-complete message.
-
-* Changes in SLURM 1.2.0-pre13
-==============================
- -- Fix for --get-user-env.
-
-* Changes in SLURM 1.2.0-pre12
-==============================
- -- BLUEGENE - Added correct node info for sinfo and sview for viewing
-    allocated nodes in a partition.
- -- BLUEGENE - Added state save on slurmctld shutdown of blocks in an error 
-    state on real systems and total block config on emulation systems.
- -- Major update to Slurm's PMI internal logic for better scalability.
-    Communications now supported directly between application tasks via 
-    Slurm's PMI library. Srun sends single message to one task on each node
-    and that tasks forwards key-pairs to other tasks on that nodes. The old 
-    code sent key-pairs directly to each task. 
-    NOTE: PMI applications must re-link with this new library.
- -- For multi-core support: Fix task distribution bug and add automated 
-    tests, patch.1.2.0-pre11.070111.plane from Dan Palermo (HP).
-
-* Changes in SLURM 1.2.0-pre11
-==============================
- -- Add multi-core options to slurm_step_launch API.
- -- Add man pages for slurm_step_launch() and related functions.
- -- Jobacct plugin only looks at the proctrack list instead of the entire
-    list of processes running on the node. Cutting down a lot of unnecessary
-    file opens in linux and cutting down the time to query the procs by
-    more than half.
- -- Multi-core bug fix, mask re-use with multiple job steps,
-    patch.1.2.0-pre10.061214.affinity_stepid from Dan Palermo (HP).
- -- Modify jobacct/linux plugin to completely eliminate open /proc files.
- -- Added slurm_sched_plugin_reconfig() function to re-read config files.
- -- BLUEGENE - --reboot option to srun, salloc, and sbatch actually works.
- -- Modified step context and step launch APIs.
-
-* Changes in SLURM 1.2.0-pre10
-==============================
- -- Fix for sinfo node state counts by state (%A and %F output options).
- -- Add ability to change a node's features via "scontrol update". NOTE: 
-    Update slurm.conf also to preserve changes over slurmctld restart or 
-    reconfig.
-    NOTE: Job and node state information can not be preserved from earlier 
-          versions.
- -- Added new slurm.conf parameter TaskPluginParam.
- -- Fix for job requeue and credential revoke logic from Hongjia Cao (NUDT).
- -- Fix for incorrectly generated masks for task/affinity plugin,
-    patch.1.2.0-pre9.061207.bitfmthex from Dan Palermo (HP).
- -- Make mask_cpu options of srun and slaunch commands not requeue prefix
-    of "0x". patch.1.2.0-pre9.061208.srun_maskparse from Dan Palermo (HP).
- -- Add -c support to the -B automatic mask generation for multi-core 
-    support, patch.1.2.0-pre9.061208.mcore_cpuspertask from Dan Palermo (HP).
- -- Fix bug in MASK_CPU calculation, 
-    patch.1.2.0-pre9.061211.avail_cpuspertask from Dan Palermo (HP).
- -- BLUEGENE - Added --reboot option to srun, salloc, and sbatch commands.
- -- Add "scontrol listpids [JOBID[.STEPID]]" support.
- -- Multi-core support patches, fixed SEGV and clean up output for large 
-    task counts, patch.1.2.0-pre9.061212.cpubind_verbose from Dan Palermo (HP).
- -- Make sure jobacct plugin files are closed before exec of user tasks to 
-    prevent problems with job checkpoint/restart (based on work by 
-    Hongjia Cao, NUDT).
-
-* Changes in SLURM 1.2.0-pre9
-=============================
- -- Fix for select/cons_res state preservation over slurmctld restart,
-    patch.1.2.0-pre7.061130.cr_state from Dan Palermo.
- -- Validate product of socket*core*thread count on node registration rather 
-    than individual values. Correct values will need to be specified in slurm.conf 
-    with FastSchedule=1 for correct multi-core scheduling behavior.
-
-* Changes in SLURM 1.2.0-pre8
-=============================
- -- Modity job state "reason" field to report why a job failed (previously 
-    previously reported only reason waiting to run). Requires cold-start of 
-    slurmctld (-c option).
- -- For sched/wiki2 job state request, return REJMESSAGE= with reason for 
-    a job's failure.
- -- New FastSchedule configuration parameter option "2" means to base 
-    scheduling decisions upon the node's configuration as specified in 
-    slurm.conf and ignore the node's actual hardware configuration. This 
-    can be useful for testing. 
- -- Add sinfo output format option "%C" for CPUs (active/idle/other/total).
-    Based upon work by Anne-Marie Wunderlin (BULL).
- -- Assorted multi-core bug fixes (patch1.2.0-pre7.061128.mcorefixes).
- -- Report SelectTypeParameters from "scontrol show config".
- -- Build sched/wiki plugin for Maui Scheduler (based upon new sched/wiki2 
-    code for Moab Scheduler).
- -- BLUEGENE - changed way of keeping track of smaller partitions using 
-    ionode range instead of quarter nodecard notation. 
-    (i.e. bgl000[0-3] instead of bgl000.0.0)
- -- Patch from Hongjia Cao (EINPROGRESS error message change)
- -- Fix for correct requid for jobacct plugin
- -- Added subsec timing display for sacct
-
-* Changes in SLURM 1.2.0-pre7
-=============================
- -- BLUEGENE - added configurable images for bluegene block creation.
- -- Plug a bunch of memory leaks.
- -- Support processors, core, and physical IDs that are not in numeric 
-    order (in slurmd to gathering node state information, based on patch
-    by Don Albert, Bull).
- -- Fixed bug with aix not looking in the correct dir for the proctrack
-    include files
- -- Removed global_srun.* from common merged it into srun proper
- -- Added bluegene section to troubleshooting guide (web page). 
- -- NOTE: Requires cold-start when moving from 1.2.0-pre6, save state 
-    info for jobs changed.
- -- BLUEGENE - Changed logic for wiring bgl blocks to be more maintainable.
-    (Haven't tested on large system yet, works on 2 base partition system)
- -- Do not read the select/cons_res state save file if slurmctld is 
-    cold-started (with the "-c" option).
-
-* Changes in SLURM 1.2.0-pre6
-=============================
- -- Maintain actually job step run time with suspend/resume use.
- -- Allow slurm.conf options to appear multiple times.  SLURM will use the
-    last instance of any particular option.
- -- Add version number to node state save file. Will not recover node 
-    state information on restart from older version.
- -- Add logic to save/restore multi-core state information.
- -- Updated multi-core logic to use types uint16_t and uint32_t instead 
-    of just type int.
- -- Race condition for forwarding logic fix from Hongjia Cao
- -- Add support for Portable Linux Processor Affinity (PLPA, see
-    http://www.open-mpi.org/software/plpa).
- -- When a job epilog completes on all non-DOWN nodes, immediately purge
-    it's job steps that lack switch windows. Needed for LSF operation. 
-    Based upon slurm.hp.node_fail.patch.
- -- Modify srun to ignore entries on --nodelist for job step creation 
-    if their count exceeds the task count. Based on slurm.hp.srun.patch.
-
-* Changes in SLURM 1.2.0-pre5
-=============================
- -- Patch from HP patch.1.2.0.pre4.061017.crcore_hints, supports cores as 
-    consumable resource.
-
-* Changes in SLURM 1.2.0-pre4
-=============================
- -- Added node_inx to job_step_info_t to get the node indecies for mapping out
-    steps in a job by nodes.
- -- sview grid added
- -- BLUEGENE node_inx added to blocks for reference.
- -- Automatic CPU_MASK generation for task launch, new srun option -B.
- -- Automatic logical to physical processor identification and mapping.
- -- Added new srun options to --cpu_bind: sockets, cores, and threads
- -- Updated select/cons_res to operate as socket granularity.
- -- New srun task distribution options to -m: plane
- -- Multi-core support in sinfo, squeue, and scontrol.
- -- Memory can be treated as a consumable resource.
- -- New srun options --ntasks-per-[node|socket|core].
-
-* Changes in SLURM 1.2.0-pre3
-=============================
- -- Remove configuration parameter ShedulerAuth (defunct).
- -- Add NextJobId to "scontrol show config" output.
- -- Add new slurm.conf parameter MailProg.
- -- New forwarding logic.  New recieve_msg functions depending on what you
-    are expecting to get back.  No srun_node_id anymore passed around in
-    a slurm_msg_t
- -- Remove sched/wiki plugin (use sched/wiki2 for now)
- -- Disable pthread_create() for PMI_send when TotalView is running for 
-    better performance.
- -- Fixed certain tests in test suite to not run with bluegene or front-end 
-    systems
- -- Removed addresses from slurm_step_layout_t
- -- Added new job field, "comment". Set by srun, salloc and sbatch. See 
-    with "scontrol show job". Used in sched/wiki2.
- -- Report a job's exit status in "scontrol show job".
- -- In sched/wiki2: add support for JOBREQUEUE command.
- 
-* Changes in SLURM 1.2.0-pre2
-=============================
- -- Added function slurm_init_slurm_msg to be used to init any slurm_msg_t
-    you no longer need do any other type of initialization to the type.
-
-* Changes in SLURM 1.2.0-pre2
-=============================
- -- Fixed task dist to work with hostfile and warn about asking for more tasks 
-    than you have nodes for in arbitray mode.
- -- Added "account" field to job and step accounting information and sacct output.
- -- Moved task layout to slurmctld instead of srun.  Job step create returns
-    step_layout structure with hostnames and addresses that corrisponds 
-    to those nodes. 
- -- Changed api slurm_lookup_allocation params, 
-    resource_allocation_response_msg_t changed to job_alloc_info_response_msg_t
-    this structure is being renamed so contents are the same.
- -- alter resource_allocation_response_msg_t see slurm.h.in
- -- remove old_job_alloc_msg_t and function slurm_confirm_alloc	
- -- Slurm configuration files now support an "Include" directive to
-    include other files inline.
- -- BLUEGENE New --enable-bluegene-emulation configure parameter to allow 
-    running system in bluegene emulation mode.  Only
-    really useful for developers.
- -- New added new tool sview GUI for displaying slurm info.
- -- fixed bug in step layout to lay out tasks correctly
-
-* Changes in SLURM 1.2.0-pre1
-=============================
- -- Fix bug that could run a job's prolog more than once
- -- Permit batch jobs to be requeued, scontrol requeue <jobid>
- -- Send overcommit flag from srun in RPCs and have slurmd set SLURM_OVERCOMMIT
-    flag at batch job launch time.
- -- Added new configuration parameter MessageTimeout (replaces #define in 
-    the code)
- -- Added support for OSX build.
-
-* Changes in SLURM 1.1.37
-=========================
- - In sched/wiki2: Add NAME to job record.
- - Changed -w (--nodelist) option to only read in number of nodes specified
-   by -N option unless nprocs was set and in Arbitrary layout mode.
- - Added some loops around pthread creates incase they fail and also fixed an
-   issue in srun to fail job has failed instead of waiting around for threads
-   that will never end.
- - Added fork handlers in the slurmstepd
- - In sched/wiki2: fix logic for restarting backup slurmctld.
- - In sched/wiki2: if job has no time limit specified, return the partition's 
-   time limit (which is the default for the job) rather than 365 days.
-
-* Changes in SLURM 1.1.36
-=========================
- - Permit node state specification of DRAIN in slurm.conf.
- - In jobcomp/script - fix bug that prevented UID and JOBID environment 
-   variables from being set.
-
-* Changes in SLURM 1.1.35
-=========================
- - In sched/wiki2: Add support for CMD=SIGNALJOB to accept option
-   of VALUE=SIGXXX in addition to VALUE=# and VALUE=XXX options.
- - In sched/wiki2: Add support for CMD=MODIFYJOB to accept option of
-   DEPEND=afterany:<jobid>, specify jobid=0 to clear. 
- - Correct logic for job allocation with task count (srun -n ...) AND
-   FastSchedule=0 AND low CPUs count in Slurm's node configuration.
- - Add new and undocumented scancel option, --ctld, to route signal 
-   requests through slurmctld rather than directly to slurmd daemons.
-   Useful for testing purposes.
- - Fixed issue with hostfile support not working in a job step.
- - Set supplemental groups for SlurmUser in slurmctld daemon, from
-   Anne Marie Wunderlin, Bull.
- - In jobcomp/script: Add ACCOUNT and PROCS (count) to environment 
-   variables set. Fix bug that prevented UID and JOBID from being 
-   overwritten.
-
-* Changes in SLURM 1.1.34
-=========================
- - Insure that slurm_signal_job_step() is defined in srun for mvapich
-   and mpichgm error conditions.
- - Modify /etc/init.d/slurm restart command to wait for daemon to terminate
-   before starting a new one
- - Permit job steps to be started on draining nodes that have already 
-   been allocated to that job.
- - Prevent backup slurmctld from purging pending batch job scripts when a 
-   SIGHUP is received.
- - BLUEGENE - check to make sure set_block_user works when the block
-   is in a ready state.
- - Fix to slurmstepd to not use local variables in a pthread create.
- - In sched/wiki2 - add wiki.conf parameter HostFormat specifying 
-   format of hostlists exchanged between Slurm and Moab (experimental).
- - mpi/mvapich: Support Adam Moody's fast MPI initialization protocol 
-   (MVAPICH protocol version 8).
-
-* Changes in SLURM 1.1.33
-=========================
- - sched/wiki2 - Do not wait for job completion before permitting 
-   additional jobs to be scheduled.
- - Add srun SLURM_EXCLUSIVE environment variable support, from 
-   Gilles Civario (Bull).
- - sched/wiki2 - Report job's node sharing options.
- - sched/wiki2 - If SchedulerPort is in use, retry opening it indefinitely.
- - sched/wiki2 - Add support for changing the size of a pending job.
- - BLUEGENE - Fix to correctly look at downed/drained nodes with picking 
-   a block to run a job and not confuse it with another running job.
-
-* Changes in SLURM 1.1.32
-=========================
- - If a job's stdout/err file names are unusable (bad path), use the 
-   default names.
- - sched/wiki2 - Fix logic to be compatible with select/cons_res plugin
-   for allocating individual processors within nodes.
- - Fix job end time calculation when changed from an initial value of 
-   INFINITE.
- 
-* Changes in SLURM 1.1.31
-=========================
- - Correctly identify a user's login shell when running "srun -b --uid"
-   as root.  Use the --uid field for the /etc/passwd lookup instead of
-   getuid().
-
-* Changes in SLURM 1.1.30
-=========================
- - Fix to make sure users don't include and exclude the same node in 
-   their srun line.
- - mpi/mvapich: Forcibly terminate job 60s after first MPI_Abort() 
-   to avoid waiting indefinitely for hung processes.
- - proctrack/sgi_job: Fix segv when destroying an active job container 
-   with processes still running.
- - Abort a job's stdout/err to srun if not processed within 5 minutes
-   (prevents node hanging in completing state if the srun is stopped).
-
-* Changes in SLURM 1.1.29
-=========================
- - Fix bug which could leave orphan process put into background from 
-   batch script.
-
-* Changes in SLURM 1.1.28
-=========================
- - BLUEGENE - Fixed issue with nodes that return to service outside of an
-   admin state is now updated in the bluegene plugin.
- - Fix for --get-user-env parsing of non-printing characters in users' logins.
- - Restore "squeue -n localhost" support.
- - Report lack of PATH env var as verbose message, not error in srun.
- 
-* Changes in SLURM 1.1.27
-=========================
- - Fix possible race condition for two simultaneous "scontrol show config" 
-   calls resulting in slurm_xfree() Error: from read_config.c:642 
- - BLUEGENE - Put back logic to make a block fail a boot 3 times before 
-   cancelling a users job.
- - Fix problem using srun --exclude option for a job step.
- - Fix problem generating slurmd error "Unrecognized request: 0" with 
-   some compilers.
-
-* Changes in SLURM 1.1.26
-=========================
- - In sched/wiki2, fixes for support of job features.
- - In sched/wiki2, add "FLAGS=INTERACTIVE;" to GETJOBS response for 
-   non-batch (not srun --batch) jobs.
-
-* Changes in SLURM 1.1.25
-=========================
- - switch/elan: Fix for "Failed to initialise stats structure" from
-   libelan when ELAN_STATKEY > MAX_INT.
- - Tune PMI support logic for better scalability and performance.
- - Fix for running a task on each node of an allocation if not specified.
- - In sched/wiki2, set TASKLIST for running jobs.
- - In sched/wiki2, set STARTDATE for pending jobs with deferred start.
- - Added srun --get-user-env option (for Moab scheduler).
-
-* Changes in SLURM 1.1.24
-=========================
- - In sched/wiki2, add support for direct "srun --dependency=" use.
- - mpi/mvapich: Add support for MVAPICH protocol version 6.
- - In sched/wiki2, change "JOBMODIFY" command to "MODIFYJOB".
- - In sched/wiki2, change "JOBREQUEUE" command to "REQUEUEJOB".
- - For sched/wiki2, permit normal user to specify arbitrary job id.
- - In sched/wiki2, set buffer pointer to NULL after free() to avoid 
-   possible memory corruption.
- - In sched/wiki2, report a job's exit code on completion.
- - For AIX, fix mail for job event notification.
- - Add documentation for propagation options in man srun and slurm.conf.
-
-* Changes in SLURM 1.1.23
-=========================
- - Fix bug in non-blocking connect() code affecting AIX.
-
-* Changes in SLURM 1.1.22
-=========================
- - Add squeue option to print a job step's task count (-o %A).
- - Initialize forward_struct to avoid trying to free a bad pointer,
-   patch from Anton Blanchard (SAMBA).
- - In sched/wiki2, fix fatal race condition on slurmctld startup.
- - Fix for displaying launching verbose messages for each node under the
-   tree instead of just the head one.
- - Fix job suspend bug, job accounting plugin would SEGV when given a 
-   bad job ID.
-
-* Changes in SLURM 1.1.21
-=========================
- - BLUEGENE - Wait on a fini to make sure all threads are finished before
-   cleaning up.
- - BLUEGENE - replacements to not destroy lists but just empty it to avoid
-   losing the pointer to the list in the block allocator.
- - BLUEGENE - added --enable-bluegene-emulation configure option to 1.1
- - In sched/wiki2, enclose a job's COMMENT value in double quotes.
- - In sched/wiki2, support newly defined SIGNALJOB command.
- - In sched/wiki2, maintain open event socket, don't open and close 
-   for each event.
- - In sched/wiki2, fix for scalability problem starting large jobs.
- - Fix logic to execute a batch job step (under an existing resource
-   allocation) as needed by LSF.
- - Patches from Hongjia Cao (pmi finialize issues and type declaration)
- - Delete pending job if it's associated partition is deleted.
- - fix for handling batch steps completing correctly and setting the 
-   return code.
- - Altered ncurses check to make sure programs can link before saying we 
-   have a working curses lib and header.
- - Fixed an init issue with forward_struct_init not being set correctly in
-   a few locations in the slurmd.
- - Fix for user to use the NodeHostname (when specified in the slurm.conf file)
-   to start jobs on.
-
-* Changes in SLURM 1.1.20
-=========================
- - Added new SPANK plugin hook slurm_spank_local_user_init() called
-   from srun after node allocation.
- - Fixed bug with hostfile support not working on a direct srun
-
-* Changes in SLURM 1.1.19
-=========================
- - BLUEGENE - make sure the order of blocks read in from the bluegene.conf
-   are created in that order (static mode).
- - Fix logic in connect(), slurmctld fail-over was broken in v1.1.18.
- - Fix logic to calculate the correct timeout for fan out.
-
-* Changes in SLURM 1.1.18
-=========================
- - In sched/wiki2, add support for EHost and EHostBackup configuration 
-   parameters in wiki.conf file
- - In sched/wiki2, fix memory management bug for JOBWILLRUN command.
- - In sched/wiki2, consider job Busy while in Completing state for 
-   KillWait+10 seconds (used to be 30 seconds).
- - BLUEGENE - Fixes to allow full block creation on the system and not to add
-   passthrough nodes to the allocation when creating a block. 
- - BLUEGENE - Fix deadlock issue with starting and failing jobs at the same
-   time
- - Make connect() non-blocking and poll() with timeout to avoid huge 
-   waits under some conditions.
- - Set "ENVIRONMENT=BATCH" environment variable for "srun --batch" jobs only.
- - Add logic to save/restore select/cons_res state information.
- - BLUEGENE - make all sprintf's into snprintf's
- - Fix for "srun -A" segfault on a node failure.
-
-* Changes in SLURM 1.1.17
-=========================
- - BLUEGENE - fix to make dynamic partitioning not go create block where
-    there are nodes that are down or draining.
- - Fix srun's default node count with an existing allocation when neither
-   SLURM_NNODES nor -N are set.
- - Stop srun from setting SLURM_DISTRIBUTION under job steps when a
-   specific was not explicitly requested by the user.
-
-* Changes in SLURM 1.1.16
-=========================
- - BLUEGENE - fix to make prolog run 5 minutes longer to make sure we have
-   enough time to free the overlapping blocks when starting a new job on a 
-   block.
- - BLUEGENE - edit to the libsched_if.so to read env and look at 
-   MPIRUN_PARTITION to see if we are in slurm or running mpirun natively.
- - Plugins are now dlopened RTLD_LAZY instead of RTLD_NOW.
-
-* Changes in SLURM 1.1.15
-=========================
- - BLUEGENE - fix to be able to create static partitions
- - Fixed fanout timeout logic.
- - Fix for slurmctld timeout on outgoing message (Hongjia Cao, NUDT.edu.cn).
-
-* Changes in SLURM 1.1.14
-=========================
- - In sched/wiki2: report job/node id and state only if no changes since 
-   time specified in request.
- - In sched/wiki2: include a job's exit code in job state information.
- - In sched/wiki2: add event notification logic on job submit and completion.
- - In sched/wiki2: add support for JOBWILLRUN command type.
- - In sched/wiki2: for job info, include required HOSTLIST if applicable.
- - In sched/wiki2: for job info, replace PARTITIONMASK with RCLASS (report
-   partition name associated with a job, but no task count)
- - In sched/wiki2: for job and node info, report all data if TS==0, 
-   volitile data if TS<=update_time, state only if TS>update_time
- - In sched/wiki2: add support for CMD=JOBSIGNAL ARG=jobid SIGNAL=name or #
- - In sched/wiki2: add support for CMD=JOBMODIFY ARG=jobid [BANK=name]
-   [TIMELIMIT=minutes] [PARTITION=name]
- - In sched/wiki2: add support for CMD=INITIALIZE ARG=[USEHOSTEXP=T|F]
-   [EPORT=#]; RESPONSE=EPORT=# USEHOSTEXP=T
- - In sched/wiki2: fix memory leak.
- - Fix sinfo node state filtering when asking for idle nodes that are also 
-   draining. 
- - Add Fortran extension to slurm_get_rem_time() API.
- - Fix bug when changing the time limit of a running job that has previously 
-   been suspended (formerly failed to account for suspend time in setting 
-   termination time).
- - fix for step allocation to be able to specify only a few nodes in a 
-   step and ask for more that specified.
- - patch from Hongjia Cao for forwarding logic
- - BLUEGENE - able to allocate specific nodes without locking up.
- - BLUEGENE - better tracking of blocks that are created dynamically, 
-   less hitting the db2.
-
-* Changes in SLURM 1.1.13
-=========================
- - Fix hang in sched/wiki2 if Moab stops responding responding when 
-   response is outgoing. 
- - BLUEGENE - fix to make sure the block is good to go when picking it
- - BLUEGENE - add libsched_if.so so mpirun doesn't try to create a block
-   by itself.
- - Enable specification of srun --jobid=# option with --batch (for user root).
- - Verify that job actually starts when requested by sched/wiki2.
- - Add new wiki.conf parameters: EPort and JobAggregationTime for event 
-   notification logic (see wiki.conf man page for details)
-
-* Changes in SLURM 1.1.12
-=========================
- - Sched/wiki2 to report a job's account as COMMENT response to GETJOBS
-    request.
- - Add srun option "--comment" (maps to job account until slurm v1.2, 
-   needed for Moab scheduler functionality).
- - fixed some timeout issues in the controller hopefully stopping all the 
-   issues with excessive timeouts.
- - unit conversion (i.e. 1024 => 1k) only happens on bgl systems for node 
-   count.
- - Sched/wiki2 to report a job's COMPETETIME and SUSPENDTIME in GETJOBS 
-   response.
- - Added support for Mellanox's version of mvapich-0.9.7.
-
-* Changes in SLURM 1.1.11
-=========================
- - Update file headers adding permission to link with OpenSSL.
- - Enable sched/wiki2 message authentication.
- - Fix libpmi compilation issue.
- - Remove "gcc-c++ python" from slurm.spec BuildRequires.  It breaks
-   the AIX build, so we'll have to find another way to deal with that.
-
-* Changes in SLURM 1.1.10
-=========================
- -- task distribution fix for steps that are smaller than job allocation.
- -- BLUEGENE - fix to only send a success when block was created when trying
-    to allocate the block.
- -- fix so if slurm_send_recv_node_msg fails on the send the auth_cred returned
-    by the resp is NULL.
- -- Fix switch/federation plugin so backup controller can assume control 
-    repeatedly without leaking or corrupting memory.
- -- Add new error code (for Maui/Moab scheduler): ESLURM_JOB_HELD
- -- Tweak slurmctld's node ping logic to better handle failed nodes with 
-    hierarchical communications fail-over logic.
- -- Add support for sched/wiki specific configuration file "wiki.conf".
- -- Added sched/wiki2 plugin (new experimental wiki plugin).
- 
-* Changes in SLURM 1.1.9
-========================
- -- BLUEGENE - fix to handle a NO_VAL sent in as num procs in the job 
-    description.
- -- Fix bug in slurmstepd code for parsing --multi-prog command script.
-    Parser was failing for commands with no arguments.
- -- Fix bug to check unsigned ints correctly in bitstring.c
- -- Alter node count covert to kilo to only convert number divisible by 
-    1024 or 512
-
-* Changes in SLURM 1.1.8
-========================
- -- Added bug fixes (fault-tolerance and memory leaks) from Hongjia Cao 
-    <hjcao@nudt.edu.cn>
- -- Gixed some potential BLUEGENE issues with the bridge log file not having
-    a mutex around the fclose and fopen.
- -- BLUEGENE - srun -n procs now regristers correctly
- -- Fixed problem with reattach double allocating step_layout->tids
- -- BLUEGENE - fix race condition where job is finished before it starts.
-
-* Changes in SLURM 1.1.7
-========================
- -- BLUEGENE - fixed issue with doing an allocation for nodes since asking 
-    for 32,128, or 512 all mean 1 to the controller. 
- -- Add "Include" directive to slurm.conf files.  If "Include" is found
-    at the beginning of a line followed by whitespace and then
-    the full path to a file, that file is included inline with the current
-    slurm.conf file.
-
-* Changes in SLURM 1.1.6
-========================
- -- Improved task layout for relative positions
- -- Fixed heterogeous cpu overcommit issue
- -- Fix bug where srun would hang if it ran on one node and that 
-    node's slurmd died
- -- Fix bug where srun task layout would be bad when min-max node range is 
-    specified (e.g. "srun -N1-4 ...")
- -- Made slurmctld_conf.node_prefix only be set on Bluegene systems.
- -- Fixed a race condition in the controller to make it so a plugin thread
-    wouldn't be able to access the slurmctld_conf structure before it was 
-    filled.
-
-* Changes in SLURM 1.1.5
-========================
- -- Ignore partition's MaxNodes for SlurmUser and root.
- -- Fix possible memory corruption with use of PMI_KVS_Create call.
- -- Fix race condition when multiple PMI_KVS_Barrier calls.
- -- Fix logic in which slurmctld outgoing RPC requests could get delayed.
- -- Fix logic for laying out steps without a hostlist.
-
-* Changes in SLURM 1.1.4
-========================
- -- Improve error handling in hierarchical communications logic.
-
-* Changes in SLURM 1.1.3
-========================
- -- Fix big-endian bug in the bitstring code which plagued AIX.
- -- Fix bug in handling srun's --multi-prog option, could go off end of buffer.
- -- Added support for job step completion (and switch window release) on 
-    subset of allocated nodes.
- -- BLUEGENE - removed configure option --with-bg-link bridge is linked with 
-    dlopen now no longer needing fake database so files on frontend node.
- -- BLUEGENE - implemented use of rm_get_partition_info instead of 
-    ...partitions_info which has made a much better design improving stability.
- -- Streamline PMI communications and increase timeouts for highly parallel 
-    jobs. Improves scalability of PMI.
-
-* Changes in SLURM 1.1.2
-========================
- -- Fix bug in jobcomp/filetxt plugin to report proper NodeCnt when a job 
-    fails due to a node failure.
- -- Fix Bluegene configure to work with the new 64bit libs.
- -- Fix bug in controller that causes it to segfault when hit with a malformed
-    message.
- -- For "srun --attach=X" to other users job, report an error and exit (it 
-    previously just hung).
- -- BLUEGENE - fix for doing correct small block logic on user error. 
- -- BLUEGENE - Added support in slurmd to create a fake libdb2.so if it
-    doesn't exist so smap won't seg fault
- -- BLUEGENE - "scontrol show job" reports "MaxProcs=None" and "Start=None"
-    if values are not specified at job submit time
- -- Add retry logic for PMI communications, may be needed for highly parallel
-    jobs.
- -- Fix bug in slurmd where variable is used in logging message after freed
-    (slurmstepd rank info).
- -- Fix bug in scontrol show daemons if NodeName=localhost will work now to
-    display slurmd as place where it is running.  
- -- Patch from HP for init nodes before init_bitmaps
- -- ctrl-c killed sruns will result in job state as cancelled instead of 
-    completed.
- -- BLUEGENE - added configure option --with-bg-link to choose dynamic linking
-    or static linking with the bridgeapi.
-
-* Changes in SLURM 1.1.1
-========================
- -- Fix bug in packing job suspend/resume RPC.
- -- If a user breaks out of srun before the allocation takes place, mark the 
-    job as CANCELLED rather than COMPLETED and change its start and end time 
-    to that time.
- -- Fix bug in PMI support that prevented use of second PMI_Barrier call.
-    This fix is needed for MVAPICH2 use.
- -- Add "-V" options to slurmctld and slurmd to print version number and exit.
- -- Fix scalability bug in sbcast.
- -- Fix bug in cons_res allocation strategy.
- -- Fix bug in forwarding with mpi
- -- Fix bug sacct forwarding with stat option
- -- Added nodeid to sacct stat information
- -- cleaned up way slurm_send_recv_node_msg works no more clearing errno
- -- Fix error handling bug in the networking code that causes the slurmd to
-    xassert if the server is not running when the slurmd tries to register.
-
-* Changes in SLURM 1.1.0
-========================
- -- Fix bug that could temporarily make nodes DOWN when they are really 
-    responding. 
- -- Fix bug preventing backup slurmctld from responding to PING RPCs.
- -- Set "CFLAGS=-DISO8601" before configuration to get ISO8601 format 
-    times for all SLURM commands. NOTE: This may break Moab, Maui, and/or 
-    LSF schedulers.
- -- Fix for srun -n and -O options when paired with -b.
- -- Added logic for fanout to failover to forward list if main node is 
-    unreachable 
- -- sacct also now keeps track of submitted, started and ending times of jobs
- -- reinit config file mutex at beginning of slurmstepd to avoid fork issues
-  
-* Changes in SLURM 1.1.0-pre8
-=============================
- -- Fix bug in enforcement of partition's MaxNodes limit.
- -- BLUEGENE - added support for srun -w option also fixed the geometry option
-    for srun.
-
-* Changes in SLURM 1.1.0-pre7
-=============================
- -- Accounting works for aix systems, use jobacct/aix
- -- Support large (over 2GB) files on 32-bit linux systems
- -- changed all writes to safe_write in srun
- -- added $float to globals.example in the testsuite
- -- Set job's num_proc correctly for jobs that do not have exclusive use 
-    of it's allocated nodes.
- -- Change in support for test suite: 'testsuite/expect/globals.example'
-    is now 'testsuite/expect/globals' and you can override variable 
-    settings with a new file 'testsuite/expect/globals.local'.
- -- Job suspend now sends SIGTSTP, sleep(1), sends SIGSTOP for better
-    MPI support.
- -- Plug a bunch of memory leaks in various places.
- -- Bluegene - before assigning a job to a block the plugin will check the bps
-    to make sure they aren't in error state.
- -- Change time format in job completion logging (JobCompType=jobcomp/filetxt)
-    from "MM/DD HH:MM:SS" to "YYYY-MM-DDTHH:MM:SS", conforming with the ISO8601 
-    standard format.
- 
-* Changes in SLURM 1.1.0-pre6
-=============================
- -- Added logic to "stat" a running job with sacct option -S use -j to specify
-    job.step 
- -- removed jobacct/bluegene (no real need for this) meaning, I don't think 
-    there is a way to gather the data yet.
- -- Added support for mapping "%h" in configured SlurmdLog to the hostname.
- -- Add PropagatePrioProcess to control propagation of a user's nice value 
-    to spawned tasks (based upon work by Daniel Christians, HP).
- 
-* Changes in SLURM 1.1.0-pre5
-=============================
- -- Added step completion RPC logic
- -- Vastly changed sacct and the jobacct plugin.  Read documentation for full
-    details.
- -- Added jobacct plugin for AIX and BlueGene, they currently don't work, 
-    but infrastructure is in place.
- -- Add support for srun option --ctrl-comm-ifhn to set PMI communications
-    address (Hongjia Cao, National University of Defense Technology).
- -- Moved safe_read/write to slurm_protocol_defs.h removing multiple copies.
- -- Remove vestigial functions slurm_allocate_resources_and_run() and 
-    slurm_free_resource_allocation_and_run_response_msg().
- -- Added support for different executable files and arguments by task based
-    upon a configuration file. See srun's --multi-prog option (based upon 
-    work by Hongjia Cao, National University of Defense Technology).
- -- moved the way forward logic waited for fanout logic mostly eliminating 
-    problems with scalability issues.
- -- changed -l option in sacct to display different params see sacct/sacct.h
-    for details.
-
-* Changes in SLURM 1.1.0-pre4
-=============================
- -- Bluegene specific - Added support to set bluegene block state to 
-    free/error via scontrol update BlockName 
- -- Add needed symbol to select/bluegene in order to load plugin.
-
-* Changes in SLURM 1.1.0-pre3
-=============================
- -- Added framework for XCPU job launch support.
- -- New general configuration file parser and slurm.conf handling code.
-    Allows long lines to be continued on the next line by ending with a "\".
-    Whitespace is allowed between the key and "=", and between the "=" and
-    value.
-    WARNING: A NodeName may now occur only once in a slurm.conf file.
-             If you want to temporarily make nodes DOWN in the slurm.conf,
-             use the new DownNodes keyword (see "man slurm.conf").
- -- Gracefully handle request to submit batch job from within an existing 
-    batch job.
- -- Warn user attempting to create a job allocation from within an existing job
-    allocation.
- -- Add web page description for proctrack plugin.
- -- Add new function slurm_get_rem_time() for job's time limit.
- -- JobAcct plugin renamed from "log" to "linux" in preparation for support of 
-    new system types. 
-    WARNING: "JobAcctType=jobacct/log" is no longer supported.
- -- Removed vestigal 'bg' names from bluegene plugin and smap
- -- InactiveLimit parameter is not enforced for RootOnly partitions.
- -- Update select/cons_res web page (Susanne Balle, HP, 
-    cons_res_doc_patch_3_29_06).
- -- Build a "slurmd.test" along with slurmd. slurmd.test has the path to 
-    slurmstepd set allowing it to run unmodified out of the builddir for 
-    testing (Mark Grondona).
- 
-* Changes in SLURM 1.1.0-pre2
-=============================
- -- Added "bcast" command to transmit copies of a file to compute nodes
-    with message fanout.
- -- Bluegene specific - Added support for overlapping partitions and 
-    dynamic partitioning. 
- -- Bluegene specific - Added support for nodecard sized blocks.
- -- Added logic to accept 1k for 1024 and so on for --nodes option of srun. 
-    This logic is through display tools such as smap, sinfo, scontrol, and 
-    squeue.
- -- Added bluegene.conf man page.
- -- Added support for memory affinity, see srun --mem_bind option.
- 
-* Changes in SLURM 1.1.0-pre1
-=============================
- -- New --enable-multiple-slurmd configure parameter to allow running
-    more than one copy of slurmd on a node at the same time.  Only
-    really useful for developers.
- -- New communication is now branched on all processes to slurmd's from 
-    slurmctld and srun launch command.  This is done with a tree type 
-    algorithm.  Spawn and batch mode work the same as before.  New slurm.conf
-    variable TreeWidth=50 is default.  This is the number of threads per 
-    stop on the tree.  
- -- Configuration parameter HeartBeatInterval is depracated. Now used half
-    of SlurmdTimeout and SlurmctldTimeout for communications to slurmd and
-    slurmctld daemons repsectively.
- -- Add hash tables for select/cons_res plugin (Susanne Balle, HP, 
-    patch_02222006).
- -- Remove some use of cr_enabled flag in slurmctld job record, use 
-    new flag "test_only" in select_g_job_test() instead.
-
-* Changes in SLURM 1.0.17
-=========================
- -- Set correct user groups for task epilogs.
- -- Add more debugging for tracking slow slurmd job initiations
-    (slurm.hp.replaydebug.patch).
-
-* Changes in SLURM 1.0.16
-=========================
- -- For "srun --attach=X" to other users job, report an error and exit (it
-    previously just hung).
- -- Make sure that "scancel -s KILL" terminates the job just like "scancel" 
-    including deletion of all job steps (Chris Holmes, HP, slurm,patch).
- -- Recognize ISO-8859 input to srun as a script (for non-English scripts).
- -- switch/elan: Fix bug in propagation of ELAN_STATKEY environment variable.
- -- Fix bug in slurmstepd IO code that can result in it spinning if a
-    certain error occurs.
- -- Remove nodes from srun's required node list if their count exceeds 
-    the number of requested tasks.
- -- sched/backfill to schedule around jobs that are hung in a completing 
-    state.
- -- Avoid possibly re-running the epilog for a job on slurmctld restart or 
-    reconfig by saving and restoring a hostlist of nodes still completing 
-    the job.
-
-* Changes in SLURM 1.0.15
-=========================
- -- In srun, reset stdin to blocking mode (if it was originally blocking before
-    we set it to O_NONBLOCK) on exit to avoid trouble with things like running
-    srun under a bash shell in an emacs *shell* buffer.
- -- Fix srun race condition that occasionally causes segfaults at shutdown
- -- Fix obscure locking issues in log.c code.
- -- Explicitly close IO related sockets.  If an srun gets "stuck", possibly
-    because of unkillable tasks in its job step, it will not hold many TCP
-    sockets in the CLOSE_WAIT state.
- -- Increase the SLURM protocol timeout from 5 seconds to 10 seconds.
-    (In 1.2 there will be a slurm.conf parameter for this, rather than having
-    it hardcoded.)
-
-* Changes in SLURM 1.0.14
-=========================
- -- Fix for bad xfree() call in auth/munge which can raise an assert().
- -- Fix installed fork handlers for the conf mutex for slurmd and slurmstepd.
-
-* Changes in SLURM 1.0.13
-=========================
- -- Fix for AllowGroups option to work when the /etc/group file doesn't 
-    contain all users in group by adding the uids of the names in /etc/passwd
-    that have a gid of that which we are looking for.
- -- Fix bug in InactiveLimit support that can potentially purge active jobs.
-    NOTE: This is highly unlikely except on very large AIX clusters.
- -- Fix bug for reiniting the config_lock around the control_file in 
-    slurm_protocol_api.c logic has changed in 1.1 so no need to merge
- 
-* Changes in SLURM 1.0.12
-=========================
- -- Report node state of DRAIN rather than DOWN if DOWN with DRAIN flag set.
- -- Initialize job->mail_type to 0 (NONE) for job submission.
- -- Fix for stalled task stdout/stderr when buffered I/O is used, and
-    a single line exceeds 4096 bytes.
- -- Memory leak fixes for maui plugin (hjcao@nudt.edu.cn)
- -- Fix for spinning srun when the terminal to which srun is talking
-    goes away.
- -- Don't set avail_node_bitmap for DRAINED nodes on slurmctld reconfig
-    (can schedule a job on drained node after reconfig).
- 
-
-* Changes in SLURM 1.0.11
-=========================
- -- Fix for slurmstepd hang when launching a task. (Needed to install
-    list library's atfork handlers).
- -- Fix memory leak on AIX (and possibly other architectures) due to
-    missing pthread_attr_destroy() calls.
- -- Fix rare task standard I/O setup bug.  When the bug hit, stdin, stdout,
-    or stderr could be an invalid file descriptor.
- -- General slurmstepd file descriptor cleanup.
- -- Fix memory leak in job accounting logic (Andy Riebs, HP, memory_leak.patch).
-
-* Changes in SLURM 1.0.10
-=========================
- -- Fix for job accounting logic submitted from Andy Riebs to handle issues
-    with suspending jobs and such. patch file named requeue.patch
- -- Make select/cons_res interoperate with mpi/lam plugin for task counts.
- -- Fix race condition where srun could seg-fault due to use of logging functions
-    within pthread after calling log_fini.
- -- Code changes for clean build with gcc 2.96 (gcc_2_96.patch, Takao Hatazaki, HP).
- -- Add CacheGroups configuration support in configurator.html (configurator.patch,
-    Takao Hatazaki, HP).
- -- Fix bug preventing use of mpich-gm plugin (mpichgm.patch, Takao Hatazaki, HP).
-
-* Changes in SLURM 1.0.9
-========================
- -- Fix job accounting logic to open new log file on slurmctld reconfig.
-    (Andy Riebs, slurm.hp.logfile.patch).
- -- Fix bug which allows a user to run a batch script on a node not allocated
-    by the slurmctld.
- -- Fix poe MP_HOSTFILE handling bug on AIX.
-
-* Changes in SLURM 1.0.8
-========================
- -- Fix to communication between slurmd and slurmstepd to allow for partial
-    reads and writes on their communication pipes.
-
-* Changes in SLURM 1.0.7
-========================
- -- Change in how AuthType=auth/dummy is handled for security testing.
- -- Fix for bluegene systems to allow full system partitions to stay booted 
-    when other jobs are submitted to the queue.
-
-* Changes in SLURM 1.0.6
-========================
- -- Prevent slurmstepd from crashing when srun attaches to batch job.
-
-* Changes in SLURM 1.0.5
-========================
- -- Restructure logic for scheduling BlueGene small block jobs. Added
-    "test_only" flag to select_p_job_test() in select plugin.
- -- Correct squeue "NODELIST" output for BlueGene small block jobs.
- -- Fix possible deadlock situations on BlueGene plugin on errors.
-
-* Changes in SLURM 1.0.4
-========================
- -- Release job allocation if step creation fails (especially for BlueGene).
- -- Fix bug select/bluegene warm start with changed bglblock layout.
- -- Fix bug for queuing full-system BlueGene jobs.
-
-* Changes in SLURM 1.0.3
-========================
- -- Fix bug that could refuse to queue batch jobs for BlueGene system.
- -- Add BlueGene plugin mutex lock for reconfig.
- -- Ignore BlueGene bgljobs in ERROR state (don't try to kill).
- -- Fix job accounting for batch jobs (Andy Riebs, HP, 
-    slurm.hp.jobacct_divby0a.patch).
- -- Added proctrack/linuxproc.so to the main RPM.
- -- Added mutex around bridge api file to avoid locking up the api.
- -- BlueGene mod: Terminate slurm_prolog and slurm_epilog immediately if 
-    SLURM_JOBID environment variable is invalid.
- -- Federation driver: allow selection of a sepecific switch interface
-    (sni0, sni1, etc.) with -euidevice/MP_EUIDEVICE.
- -- Return an error for "scontrol reconfig" if there is already one in
-    progress
- 
-* Changes in SLURM 1.0.2
-========================
- -- Correctly report DRAINED node state as type OTHER for "sinfo --summarize".
- -- Fixes in sacct use of malloc (Andy Riebs, HP, sacct_malloc.patch).
- -- Smap mods: eliminate screen flicker, fix window resize, report more clear
-    message if window too small (Dan Palermo, HP, patch.1.0.0.1.060126.smap).
- -- Sacct mods for inconsistent records (race condition) and replace --debug
-    option with --verbose (Andy Riebs, HP, slurm.hp.sacct_exp_vvv.patch).
- -- scancel of a job step will now send a job-step-completed message
-    to the controller after verifying that the step has completed on all nodes.
- -- Fix task layout bug in srun.
- -- Added times to node "Reason" field when set down for insufficient 
-    resources or if not responding.
- -- Validate operation with Elan switch and heterogeneous nodes.
-
-* Changes in SLURM 1.0.1
-========================
- -- Assorted updates and clarifications in documentation.
- -- Detect which munge installation to use 32/64 bit.
-
-* Changes in SLURM 1.0.0
-========================
- -- Fix sinfo filtering bug, especially "sinfo -R" output.
- -- Fix node state change bug, resuming down or drained nodes.
- -- Fix "scontrol show config" to display JobCredentialPrivateKey instead
-    of JobCredPrivateKey and JobCredentialPublicCertificate instead of
-    JobCredPublicKey.  They now match the options in the slurm.conf.
- -- Fix bug in job accounting for very long node list records (Andy Riebs,
-    HP, sacct_buf.patch).
- -- BLUEGENE SPECIFIC - added load function to smap to load an already 
-    exsistant bluegene.conf file.
- -- Fix bug in sacct: If user requests specific job or job step ID,
-    only the last one with that ID will be reported. If multiple 
-    nodes fail, the job has its state recorded as "JOB_TERMINATED...nf"
-    (Andy Riebs, HP, slurm.hp.sacct_dup.patch).
- -- Fix some inconsistencies in sacct's help message (Andy Riebs, HP, 
-    slurm.hp.sacct_help.patch).
- -- Validate input to sacct command and allows embedded spaces in 
-    arguments (Andy Riebs, HP, slurm.hp.sacct_validate.patch).
-
-* Changes in SLURM 0.7.0-pre8
-=============================
- -- BGL specific -- bug fix for smap configure function down configuration
- -- Add support for job suspend/resume.
- -- Add slurmd cache for group IDs (Takao Hatazaki, HP).
- -- Fix bug in processing of "#SLURM" batch script option parsing.
-
-* Changes in SLURM 0.7.0-pre7
-=============================
- -- Fix issue with NODE_STATE_COMPLETING, could start job on node before
-    epilog completed.
- -- Added some infrastructure for job suspend/resume (scontrol, api, and 
-    slurmctld stub).
- -- Set job's num_procs to the actual processor count allocated to the job.
- -- Fix bug in HAVE_FRONT_END support for cluster emulation.
-
-* Changes in SLURM 0.7.0-pre6
-=============================
- -- Added support for task affinity for binding tasks to CPUs (Daniel
-    Palermo, HP).
- -- Integrate task affinity support with configuration, add validation 
-    test.
-
-* Changes in SLURM 0.7.0-pre5
-=============================
- -- Enhanced performance and debugging for slurmctld reconfiguration.
- -- Add "scontrol update Jobid=# Nice=#" support.
- -- Basic slurmctld and tool functionality validated to 16k nodes.
- -- squeue and smap now display correct info for jobs in bluegene enviornment.
- -- Fix setting of SLURM_NODELIST for batch jobs.
- -- Add SubmitTime to job information available for display.
- -- API function slurm_confirm_allocation() has been marked OBSOLETE
-    and will go away in some future version of SLURM.  Use
-    slurm_allocation_lookup() instead.
- -- New API calls slurm_signal_job and slurm_signal_job_step to send
-    signals directly to the slurmds without triggering the shutdown sequence.
- -- remove "uid" from old_job_alloc_msg_t, no longer needed.
- -- Several bug fixes in maui scheduler plugin from Dave Jackon 
-    (Cluster Resources).
-
-* Changes in SLURM 0.7.0-pre4
-=============================
- -- Remove BNR libary functions and add those for PMI (KVS and basic
-    MPI-1 functions only for now)
- -- Added Hostfile support for POE and srun.  MP_HOSTFILE env var to set
-    location of hostfile.  Tasks will run from list order in the file.  
- -- Removes the slurmd's use of SysV shared memory.  Instead the slurmd
-    communicates with the slurmstepd processes through the slurmstepd's
-    new named unix domain socket.  The "stepd_api" is used to talk to the
-    slurmstepd (src/slurmd/common/stepd_api.[ch]).
- -- Bluegene specific - bluegene block allocator will find most any 
-    partition size now.  Added support to start at any point in smap 
-    to request a partition instead of always starting at 000.
- -- Bluegene specific - Support to smap to down or bring up nodes in 
-    configure mode.  Added commands include allup, alldown, 
-    up [range], down [range]
- -- Time format in sinfo/squeue/smap/sacct changed from D:HH:MM:SS to 
-    D-HH:MM:SS per POSIX standards document.
- -- Treat scontrol update request without any requested changes as an 
-    error condition.
- -- Bluegene plugin renamed with BG instead of BGL.  partition_allocator moved 
-    into bluegene plugin and renamed block_allocator.  Format for bluegene.conf
-    file changed also.  Read bluegene html page.  Code is backwards compatable
-    smap will generate in new form
- -- Add srun option --nice to give user some control over job priority.
-
-* Changes in SLURM 0.7.0-pre3
-=============================
- -- Restructure node states: DRAINING and DRAINED states are replaced 
-    with a DRAIN flag. COMPLETING state is changed to a COMPLETING flag. 
- -- Test suite moved into testsuite/expect from separate repository.
- -- Added new document describing slurm APIs (doc/html/api.html).
- -- Permit nodes to be in multiple partitions simultaneously.
-
-* Changes in SLURM 0.7.0-pre2
-=============================
- -- New stdio protocol.  Now srun has just a single TCP stream to each node
-    of a job-step.  srun and slurmd comminicate over the TCP stream using a
-    simple messaging protocol.
- -- Added task plugin and use task prolog/epilog(s).
- -- New slurmd_step functionality added.  Fork exec instead of using shared
-    memory.  Not completely tested.
- -- BGL small partition logic in place in plugin and smap.  Scheduler needs  
-    to be rewritten to handle multiple partitions on a single node. No 
-    documentation written on process yet.
- -- If running select/bluegene plugin without access to BGL DB2, then 
-    full-system bglblock is of system size defined in bluegene.conf.
-
-* Changes in SLURM 0.7.0-pre1
-=============================
- -- Support defered initiation of job (e.g. srun --begin=11:30 ...).
- -- Add support for srun --cpus-per-task through task allocation in 
-    slurmctld.
- -- fixed partition_allocator to work without curses
- -- made change to srun to start message thread before other threads 
-    to make sure localtime doesn't interfere.   
- -- Added new RPCs for slurmctld REQUEST_TERMINATE_JOB or TASKS, 
-    REQUEST_KILL_JOB/TASKS changed to REQUEST_SIGNAL_JOB/TASKS.
- -- Add support for e-mail notification on job state changes.
- -- Some infrastructure added for task launch controls (slurm.conf:
-    TaskProlog, TaskEpilog, TaskPlugin; srun --task-prolog, --task-epilog).
-
-* Changes in SLURM 0.6.11
-=========================
- -- Fix bug in sinfo partition sorting order.
- -- Fix bugs in srun use of #SLURM options in batch script.
- -- Use full Elan credential space rather than re-using credentials as soon 
-    as job step completes (helps with fault-tolerance).
-
-* Changes in SLURM 0.6.10
-=========================
- -- Fix for slurmd job termination logic (could hang in COMPLETING state).
- -- Sacct bug fixes: Report correct user name for job step, show "uid.gid"
-    as fifth field of job step record (Andy Riebs, slurm.hp.sacct_uid.patch).
- -- Add job_id to maui scheduler plugin start job status message.
- -- Fix for srun's handling of null characters in stdout or stderr.
- -- Update job accounting for larger systems (Andy Riebs, uptodate.patch).
- -- Fixes for proctrack/linuxproc and mpich-gm support (Takao Hatazaki, HP).
- -- Fix bug in switch/elan for large task count job having irregular task 
-    distribution across nodes.
-
-* Changes in SLURM 0.6.9
-========================
- -- Fix bug in mpi plugin to set the ID correctly
- -- Accounting bug causing segv fixed (Andy Riebs, 14oct.jobacct.patch)
- -- Fix for failed launch of a debugged job (e.g. bad executable name).
- -- Wiki plugin fix for tracking allocated nodes (Ernest Artiaga, BSC).
- -- Fix memory leaks in slurmctld and federation plugin.
- -- Fix sefault in federation plugin function fed_libstate_clear().
- -- Align job accounting data (Andy Riebs, slurm.hp.unal_jobacct.patch)
- -- Restore switch state in backup controller restarts
-
-* Changes in SLURM 0.6.8
-========================
- -- Invalid AllowGroup value in slurm.conf to not cause seg fault.
- -- Fix bug that would cause slurmctld to seg-fault with select/cons_res
-    and batch job containing more than one step.
-
-* Changes in SLURM 0.6.7
-========================
- -- Make proctrack/linuxproc thread safe, could cause slurmd seg fault.
- -- Propagate umask from srun to spawned tasks.
- -- Fix problem in switch/elan error handling that could hang a slurmd 
-    step manager process.
- -- Build on AIX with -bmaxdata:0x70000000 for memory limit more than 256MB.
- -- Restore srun's return code support.
-
-* Changes in SLURM 0.6.6
-========================
- -- Fix for bad socket close() in the spawn-io code.
-
-* Changes in SLURM 0.6.5
-========================
- -- Sacct to report on job steps that never actually started.
- -- Added proctrack/rms to elan rpm.
- -- Restructure slurmctld/agent.c logic to insure timely reaping of 
-    terminating pthreads.
- -- Srun not to hang if job fails before task launches not all completed.
- -- Fix for consumable resources properly scheduling nodes that have more 
-    nodes than configured (Susanne Balle, HP, cons_res_patch.10.14.2005)
-
-* Changes in SLURM 0.6.4
-========================
- -- Bluegene plugin drains an entire bglblock on repeated boot failures
-    only if it has not identified a specific node as being bad.
-
-* Changes in SLURM 0.6.3
-========================
- -- Fix slurmctld mem leaks (step name and hostlist struct).
- -- Bluegene plugin sets end time for job terminated due to removed 
-    bglblock.
-
-* Changes in SLURM 0.6.2
-========================
- -- Fix sinfo and squeue formatting to properly handle slurm nodes, 
-    jobs, and other names containing "%".
-
-* Changes in SLURM 0.6.1
-========================
- -- Fixed smap -Db to display slurm partitions correctly (take 2).
- -- Add srun fork() retry logic for very heavily loaded system.
- -- Fix possible srun hang on task launch failure.
- -- Add support for mvapich v0.9.4, 0.9.5 and gen2.
-
-* Changes in SLURM 0.6.0
-========================
- -- Add documentation for ProctrackType=proctrack/rms.
- -- Make proctrack/rms be the default for switch/elan.
- -- Do not preceed SIGKILL or SIGTERM to job step with (non-requested) SIGCONT.
- -- Fixed smap -Db to display slurm partitions correctly.  
- -- Explicitly disallow ProctrackType=proctrack/linuxproc with 
-    SwitchType=switch/elan. They will not work properly together.
-
-* Changes in SLURM 0.6.0-pre8
-=============================
- -- Remove debugging xassert in switch/federation that were accidentally
-    committed
- -- Make slurmd step manager retry slurm_container_destroy() indefinitely
-    instead of giving up after 30 seconds.  If something prevents a job
-    step's processes from being killed, the job will be stuck in the
-    completing until the container destroy succeeds.
-
-* Changes in SLURM 0.6.0-pre7
-=============================
- -- Disable localtime_r() calls from forked processes (semaphore set 
-    in another pthread can deadlock calls to localtime_r made from 
-    the forked process, this will be properly fixed in the next 
-    major release of SLURM).
- -- Added SLURM_LOCALID environment variable for spawned tasks
-    (Dan Palermo, HP).
- -- Modify switch logic to restore state based exclusively upon
-    recovered job steps (not state save file).
- -- Gracefully refuse job if there are too many job steps in slurmd.
- -- Fix race condition in job completion that can leave nodes in 
-    COMPLETING state after job is COMPLETED.
- -- Added frees for BGL BrigeAPI strdups that were to this point unknown.
- -- smap scrolls correctly for BGL systems.
- -- slurm_pid2jobid() API call will now return the jobid for a step
-    manager slurmd process.
-
-* Changes in SLURM 0.6.0-pre6
-=============================
- -- Added logic to return scheduled nodes to Maui scheduler (David
-    Jackson, Cluster Resources)
- -- Fix bug in handling job request with maximum node count.
- -- Fix node selection scheduling bug with heterogeneous nodes and
-    srun --cpus-per-task option
- -- Generate error file to note prolog failures.
-
-* Changes in SLURM 0.6.0-pre5
-=============================
- -- Modify sfree (BGL command) so that --all option no longer requires
-    an argument.
- -- Modify smap so it shows all nodes and partitions by default (even 
-    nodes that the user can't access, otherwise there are holes in 
-    its maps).
- -- Added module to parse time string (src/common/parse_time.c) for 
-    future use.
- -- Fix BlueGene hostlist processing for non-rectangular prisms and
-    add string length checking.
- -- Modify orphan batch job time calculation for BGL to account for 
-    slowness when booting many bglblocks at the same time.
-
-* Changes in SLURM 0.6.0-pre4
-=============================
- -- Added etc/slurm.epilog.clean to kill processes initiated outside of 
-    slurm when a user's last job on a node terminates.
- -- Added config.xml and configurator.html files for use by OSCAR.
- -- Increased maximum job step count from 64 to 130 for BGL systems only.
-
-* Changes in SLURM 0.6.0-pre3
-=============================
- -- Add code so job request for shared nodes gets explicitly requested 
-    nodes, but lightly loaded nodes otherwise.
- -- Add job step name field.
- -- Add job step network specification field.
- -- Add proctrack/rms plugin
- -- Change the proctrack API to send a slurmd_job_t pointer to both
-    slurm_container_create() and slurm_container_add().  One of those
-    functions MUST set job->cont_id.
- -- Remove vestigial node_use (virtual or coprocessor) field from job
-    request RPC.
- -- Fix mpich-gm bugs, thanks to Takao Hatazaki (HP).
- -- Fix code for clean build with gcc 2.96, Takao Hatazaki (HP).
- -- Add node update state of "RESUME" to return DRAINED, DRAINING, or 
-    DOWN node to service (IDLE or ALLOCATED state).
- -- smap keeps trying to connect to slurmctld in iterative mode rather 
-    than just aborting on failure.
- -- Add squeue option --node to filter by node name.
- -- Modify squeue --user option to accept not only user names, but also
-    user IDs.
-
-* Changes in SLURM 0.6.0-pre2
-=============================
- -- Removed "make rpm" target.
-
-* Changes in SLURM 0.6.0-pre1
-=============================
- -- Added bgl/partition_allocator/smap changes from 0.5.7.
- -- Added configurable resource limit propagation  (Daniel Christians, HP).
- -- Added mpi plugin specify at start of srun.
- -- Changed SlurmUser ID from 16-bit to 32-bit.
- -- Added MpiDefault slurm.conf parameter.
- -- Remove KillTree configuration parameter (replace with
-    "ProctrackType=proctrack/linuxproc")
- -- Remove MpichGmDirectSupport configuration parameter (replace with
-    "MpiDefault=mpich-gm")
- -- Make default plugin be "none" for mpi.
- -- Added mpi/none plugin and made it the default.
- -- Replace extern program_invocation_short_name with program_invocation_name
-    due to short name being truncated to 16 bytes on some systems.
- -- Added support for Elan clusters with different CPU counts on nodes
-    (Chris Holmes, HP).
- -- Added Consumable Resources web page (Susanne Balle, HP).
- -- "Session manager" slurmd process has been eliminated.
- -- switch/federation fixes migrated from 0.5.*
- -- srun pthreads really set detached, fixes scaling problem
- -- srun spawns message handler process so it can now be stopped (via 
-    Ctrl-Z or TotalView) without inducing failures.
-
-* Changes in SLURM 0.5.7
-========================
- -- added infrastructure for (eventual) support of AIX checkpointing
-    of slurm batch and interactive poe jobs
- -- added wiring for BGL to do wiring for physical location first and then
-    logical.
- -- only one thread used to query database before polling thread is there.
-
-* Changes in SLURM 0.5.6
-========================
- -- fix for BGL hostnames and full system partition finding
-
-* Changes in SLURM 0.5.5
-========================
- -- Increase SLURM_MESSAGE_TIMEOUT_MSEC_STATIC to 15000
- -- Fix for premature timeout in _slurm_send_timeout
- -- Fix for federation overlapping calls to non-thread-safe _get_adapters
-
-* Changes in SLURM 0.5.4
-========================
- -- Added support for no reboot for VN to CO on BGL
- -- Fix for if a job starts after it finishes on BGL
-
-* Changes in SLURM 0.5.3
-========================
- -- federation patch so the slurm controller has sane window status at
-    start-up regardless of the window status reported in the slurmd
-    registration.
- -- federation driver exits with fatal() if the federation driver can not
-    find all of the adapters listed in the federation.conf
-
-* Changes in SLURM 0.5.2
-========================
- -- Extra federation driver sanity checks
-
-* Changes in SLURM 0.5.1
-========================
- -- Fix federation driver bad free(), other minor fed fixes
- -- Allow slurm to parse very long lines in the slurm.conf
-
-* Changes in SLURM 0.5.0
-========================
- -- Fix race condition in job accouting plugin, could hang slurmd
- -- Report SlurmUser id over 16 bits as an error (fix on v0.6)
-
-* Changes in SLURM 0.5.0-pre19
-==============================
- -- Fix memory management bug in federation driver
-
-* Changes in SLURM 0.5.0-pre18
-==============================
- -- elan switch plugin memory leak plugged
- -- added g_slurmctld_jobacct_fini() to release all memory (useful 
-    to confirm no memory leaks)
- -- Fix slurmd bug introduced in pre17
-
-* Changes in SLURM 0.5.0-pre17
-==============================
- -- slurmd calls the proctrack destroy function at job step completion
- -- federation driver tries harder to clean up switch windows
- -- BGL wiring changes
-
-* Changes in SLURM 0.5.0-pre16
-==============================
- -- Check slurm.conf values for under/overflows (some are 16 bit values).
- -- Federation driver clears windows at job step completion
- -- Modify code for clean build with gcc v4.0
- -- New SLURM_NETWORK environmant variable used by slurm_ll_api
-
-* Changes in SLURM 0.5.0-pre15
-==============================
- -- Added "network" field to "scontrol show job" output. 
- -- Federation fix for unfreed windows when multiple adapters on
-    one node use the same LID
-
-* Changes in SLURM 0.5.0-pre14
-==============================
- -- RDMA works on fed plugin.
-
-* Changes in SLURM 0.5.0-pre13
-==============================
- -- Major mods to support checkpoint on AIX.
- -- Job accounting documenation expanded, added tuning options, minor bug fixes
- -- BGL wiring will now work on <= 4 node X-dim partitions and also 8 node 
-    X-dim partitions.
- -- ENV variables set for spawning jobs. 
- -- jobacct patch from HP to not erroneously lock a mutex in the 
-    jobacct_log plugin.
- -- switch/federation supports multiple adapters per task.  sn_all behaviour
-    is now correct, and it also supports sn_single.
-
-* Changes in SLURM 0.5.0-pre12
-==============================
- -- Minor build changes to support RPM creation on AIX
-
-* Changes in SLURM 0.5.0-pre11
-==============================
- -- Slurmd tests for initialized session manager (user's) slurmd pid before 
-    killing it to avoid killing system daemon (race condition).
- -- srun --output or --error file names of "none" mapped to /dev/null for 
-    batch jobs rather than a file actually named "none".
- -- BGL: don't try to read bglblock state until they are all created to 
-    avoid having BGL Bridge API seg fault.
-
-* Changes in SLURM 0.5.0-pre10
-==============================
- -- Fix bug that was resetting BGL job geometry on unrelated field update.
- -- squeue and sinfo print timestamp in interate mode by default.
- -- added scrolling windows in smap
- -- introduced new variable to start polling thread in the bluegene plugin.
- -- Latest accounting patches from Riebs/HP, retry communications.
- -- Added srun option --kill-on-bad-exit from Holmes/HP.
- -- Support large (64-bit address) log files where possible.
- -- Fix problem of signals being delivered twice to tasks.  Note that as
-    part of the fix the slurmd session manger no longer calls setsid to
-    create a new session.
-
-* Changes in SLURM 0.5.0-pre9
-=============================
- -- If a job and node are in COMPLETING state and slurmd stops responding for
-    SlurmdTimeout, then set the node DOWN and the job COMPLETED.
- -- Add logic to switch/elan to track contexts allocated to active job steps 
-    rather than just using a cyclic counter and hoping to avoid collisions. 
- -- Plug memory leak in freeing job info retrieved using API.
- -- Bluegene Plugin handles long deallocating states from driver 202.
- -- Fix bug in bitfmt2int() which can go off allocated memory.
-
-* Changes in SLURM 0.5.0-pre8
-=============================
- -- BlueGene srun --geometry was not getting propogated properly.
- -- Fix race condition with multiple simultaneous epilogs.
- -- Modify slurmd to resend job completion RPC to slurmctld in the 
-    case where slurmctld is not responding.
- -- Updated sacct: handle cancelled jobs correctly, add user/group
-    output, add ntasks ans synonym for nprocs, display error field 
-    by default, display ncpus instead of nprocs
- -- Parallelization of queing jobs up to 32 at once.  Variable 
-    MAX_AGENT_COUNT used in bgl_job_run.c to specify.
- -- bgl_job_run.c fixed threading issue with uid_to_string use.
-
-* Changes in SLURM 0.5.0-pre7
-=============================
- -- Preserve next_job_id across restarts.
- -- Add support for really long job names (256 bytes).
- -- Add configuration parameter SchedulerRootFilter to control what 
-    entity manages prioritization of jobs in RootOnly partition 
-    (internal scheduler plugin or external entity).
- -- Added support for job accounting.
- -- Added support for consumable resource based node scheduling.
- -- Permit batch job to be launched to re-existing allocation.
-
-* Changes in SLURM 0.5.0-pre6
-=============================
- -- Load bluegene.conf and federation.conf based upon SLURM_CONF env 
-    var (if set).
- -- Fix slurmd shutdown signal synchronization bug (not consistently 
-    terminating).
- -- Add doc/html/ibm.html document. Update bluegene.html.
- -- Add sfree to bluegene plugin. 
- -- Remove geometry[SYSTEM_DIMENSIONS] from opaque node_select data 
-    type if SYSTEM_DIMENSIONS==0 (not ASCI-C compliant).
- -- Modify smap to test for valid libdb2.so before issuing any BGL 
-    Bridge API calls.
- -- Modify spec file for optional inclusion of select_bluegene and 
-    sched_wiki plugin libraries.
- -- Initialize job->network in data structure, could cause job 
-    submit/update to fail depending upon what is left on stack.
-
-* Changes in SLURM 0.5.0-pre5
-=============================
- -- Expand buffer to hold node_select info in job termination log.
- -- Modify slurmctld node hashing function to reduce collisions.
- -- Treat bglblock vanishing as fatal error for job, prolog and epilog 
-    exit immediately.
- -- bug fix for following multiple X-dim partitions
-
-* Changes in SLURM 0.5.0-pre4
-=============================
- -- Fix bug in slurmd that could double KillWait time on job timeout.
- -- Fix bug in srun's error code reporting to slurmctld, could DOWN 
-    a node if job run as root has non-zero error code.
- -- Remove a node's partition info when removed from existing partition.
- -- Use proctrack plugin to call all processes in a job step before 
-    calling interconnect_postfini() to insure no processes escape from 
-    job and prevent switch windows from being released.
- -- Added mail.html web page telling how to get on slurm mailing lists.
- -- Added another directory to search for DB2 files on BGL system.
- -- Added overview man page slurm.1.
- -- Added new configure option "--with-db2-dir=PATH" for BGL.
-
-* Changes in SLURM 0.5.0-pre3
-=============================
- -- Merge of SLURM v0.4-branch into v0.5/HEAD.
-
-* Changes in SLURM 0.5.0-pre2
-=============================
- -- Fix bug in srun to clean-up upon failure of an allocated node
-    (srun -A would generate a segmentation fault, Chris Holmes, HP).
- -- If slurmd's node name is mapped to NULL (due to bad configuration)
-    terminate slurmd with a fatal error and don't crash slurmctld.
- -- Add SLURMD_DEBUG env var for use with AIX/POE in spawn_task RPC.
- -- Always pack job's "features" for access by prolog/epilog
-
-* Changes in SLURM 0.5.0-pre1
-=============================
- -- Add network option to srun and job creation API for specification 
-    of communication protocol over IBM Federation switch.
- -- Add new slurm.conf parameter ProctrackType (process tracking) and 
-    associated plugin in the slurmd module.
- -- Send node's switch state with job epilog completion RPC and 
-    node registration (only when slurmd starts, not on periodic 
-    registrtions).
- -- Add federation switch plugin.
- -- Add new configuration keyword, SchedulerRootFilter, to control 
-    external scheduler control of RoolOnly partition (Chris Holmes, HP).
- -- Modify logic to set process group ID for spawned processes (last 
-    patch from slurm v0.3.11).
- -- "srun -A" modified to return exit code of last command executed
-    (Chris Holmes, HP).
- -- Add support for different slurm.conf files controlled via SLURM_CONF
-    env var (Brian O'Sullivan, pathscale)
- -- Fix bug if srun given --uid without --gid option (Chris Holmes, HP).
-
-* Changes in SLURM 0.4.24
-=========================
- -- DRAIN nodes with switches on base partitions are in ERROR, MISSING, 
-    or DOWN states.
- 
-* Changes in SLURM 0.4.23
-========================= 
- -- Modified bluegene plugin to only sync bglblocks to jobs on initial 
-    startup, not on reconfig. Fixes race condition.
- -- Modified bluegene plugin to work with 141 driver. Enabling it to 
-    only have to reboot when switching from coproc -> virtual and back.
- -- added support for a full system partition to make sure every other 
-    partition is free and vice-verse.
- -- smap resizing issue fixed.
- -- change prolog not to add time when a partition is in deallocating 
-    state.
- -- NOTE: This version of SLURM requires BGL driver 141/2005.
-
-* Changes in SLURM 0.4.22
-=========================
- -- Modified bluegene plugin to not do anything if the bluegene.conf file 
-    is altered.
- -- added checking for lists before trying to create iterator on the list.
-
-* Changes in SLURM 0.4.21
-=========================
- -- Fix in race condition with time in Status Thread of BGL
- -- Fix no leading zeros in smap output.
-
-* Changes in SLURM 0.4.20
-=========================
- -- Smap output is more user friendly with -c option
-
-* Changes in SLURM 0.4.19
-=========================
- -- Added new RPCs for getting bglblock state info remotely and cache data 
-    within the plugin (permits removal of DB2 access from BGL FEN and 
-    dramatically increases smap responsivenss, also changed prolog/epilog
-    operation)
- -- Move smap executable to main slurm RPM (from separate RPM).
- -- smap uses RPC instead of DB2 to get info about bgl partitions.
- -- Status function added to bluegene_agent thread.  Keeps current state
-    of BGL partitions updating every second.  will handle multiple attempts 
-    at booting if booting a partition fails. 
-
-* Changes in SLURM 0.4.18
-=========================
- -- Added error checking of rm_remove_partition calls.
- -- job_term() was terminating a job in real time rather than 
-    queueing the request. This would result in slurmctld hanging 
-    for many seconds when a job termination was required.
-
-* Changes in SLURM 0.4.17
-========================
- -- Bug fixes from testing .16.
-
-* Changes in SLURM 0.4.16
-========================
- -- Added error checking to a bunch of Bridge API calls and more 
-    gracefully handle failure modes.
- -- Made smap more robust for more jobs.
-
-* Changes in SLURM 0.4.15
-========================
- -- Added error checking to a bunch of Bridge API calls and more 
-    gracefully handle failure modes.
-
-* Changes in SLURM 0.4.14
-========================
- -- job state is kept on warm start of slurm
-
-* Changes in SLURM 0.4.13
-========================
- -- epilog fix for bgl plugin
-
-* Changes in SLURM 0.4.12
-========================
- -- bug shot for new api calls.
- -- added BridgeAPILogFile as an option for bluegene.conf file
- 
-* Changes in SLURM 0.4.11
-========================
- -- changed as many rm_get_partition() to rm_get_partitions_info as we could 
-    for time saving.
- 
-* Changes in SLURM 0.4.10
-========================
- -- redesign for BGL external wiring.
- -- smap display bug fix for smaller systems.
-
-* Changes in SLURM 0.4.9
-========================
- -- setpnum works now, have to include this in bluegene.conf
-
-* Changes in SLURM 0.4.8
-========================
- -- Changed the prolog and the epilog to use the env var MPIRUN_PARTITION
-    instead of BGL_PARTITION_ID
-
-* Changes in SLURM 0.4.7
-========================
- -- Remove some BGL specific headers that IBM now distributes, NOTE
-    BGL driver 080 or greater required.
- -- Change autogen.sh to deal with problems running autoconf on one
-    system and configure on another with different software versions.
-
-* Changes in SLURM 0.4.6
-========================
- -- smap now works on non-BGL systems.
- -- took tv.h out of partition_allocator so it would work withn driver 080 
-    from IBM.
- -- updated slurmd signal handling to prevent possible user killing of daemon.
-
-* Changes in SLURM 0.4.5
-========================
- -- Change sinfo default time limit field to have 10 bytes (up from 9).
- -- Fix bug in bluegene partition selection (sorting bug).
- -- Don't display any completed jobs in smap.
- -- Add NodeCnt to filetxt job completion plugin.
- -- Minor restructuring of how MMCS is polled for DOWN nodes and switches.
- -- Fix squeue output format for "%s" (node select data).
- -- Queue job requesting more resources than exist in a partition if 
-    that partition's state is DOWN (rather than just abort it).
- -- Add prolog/epilog for bluegene to code base (moved from mpirun in CVS)
- -- Add prolog, epilog and bluegene.conf.example to bluegene RPM
- -- In smap, Admin can get the Rack/midplane id from an XYZ input and vice versa.
- -- Add smap line-oriented output capability.
-
-* Changes in SLURM 0.4.4
-========================
- -- Fix race condition in slurmd seting pgid of spawned tasks for 
-    process tracking.
- -- Fix scontrol reconfig does nothing to running jobs nor crash the system
- -- Fix sort of bgl_list only happens once in select_bluegene.c instead of every
-    time a new job is inserted.
-
-* Changes in SLURM 0.4.3
-========================
- -- Turn off some RPM build checks (bug in RPM, see slurm.spec.in)
- -- starting slurmctrld will destroy all RMP*** partitions everytime.  
- 
-* Changes in SLURM 0.4.2
-========================
- -- Fix memory leak in BlueGene plugin.
- -- Srun's --test-only option takes precedence over --batch option.
- -- Add sleep(1) after setting bglblock owner due to apparent race condition 
-    in the BGL API.
- -- Slurm was timing out and killing batch jobs if the node registered when 
-    a job prolog was still running.
-
-* Changes in SLURM 0.4.1
-========================
- -- BlueGene plugin kills jobs running in defunct bglblock on restart.
- -- Smap displays pending jobs now, in addition to running and completing jobs.
- -- Remove node "use=" from bluegene.conf file, create both coprocessor and 
-    virtual bglblocks for now (later create just one and use API to change 
-    it when such an API is available).
- -- Add "ChangeNumpsets" parameter to bluegene.conf to use script to 
-    update the numpsets parameter for newly created bglblocks (to be 
-    removed once the API functions).
- -- Add all patches from slurm v0.3.11 (through 2/7/2005)
-   - Added srun option --disable-status,-X to disable srun status feature
-     and instead forward SIGINT immediately to job upon receipt of Ctrl-C.
-   - Fix for bogus slurmd error message "Unable to put task N into pgrp..."
-   - Fix case where slurmd may erroneously detect shared memory entry
-     as "stale" and delete entry for unkillable or slow-to-exit job.
-   - (qsnet) Fix for running slurmd on node without and elan3 adapter.
-   - Fix for reported problem: slurm/538: user tasks block writing to stdio
-
-* Changes in SLURM 0.4.0
-========================
- -- Minor tweak to init.d/slurm for BlueGene systems.
- -- Added smap RPM package (to install binary built on BlueGene 
-    service node on front-end nodes).
- -- Added wait between bglblock destroy and creation of new blocks
-    so that MMCS can complete the operation.
- -- Fix bug in synchronizing bglblock owners on slurmctld restart.
-
-* Changes in SLURM 0.4.0-pre11
-==============================
- -- Add new srun option "--test-only" for testing slurm_job_will_run API.
- -- Fix bugs in slurm_job_will_run() processing.
- -- Change slurm_job_will_run() to not return a message, just an error code.
- -- Sync partition owners with running jobs on slurmctld restart.
-
-* Changes in SLURM 0.4.0-pre10
-==============================
- -- Specify number of I/O nodes associated with BlueGene partition.
- -- Do not launch a job's tasks if the job is cancelled while its
-    prolog is running (which can be slow on BlueGene).
- -- Add new error code, ESLURM_BATCH_ONLY for attepts to launch 
-    job steps on front-end system (e.g. Blue Gene).
- -- Updates to html documents.
- -- Assorted fixes in smap, partition creation mode.
- -- Add proper support for "srun -n" option on BGL recognizing 
-    processor count in both virual and coprocessor modes.
- -- Make default node_use on Blue Gene be coprocessor, as documented.
- -- Add SIGKILL to BlueGene jobs as part of cleanup.
-
-* Changes in SLURM 0.4.0-pre9
-=============================
- -- Change in /etc/init.d/slurm for RedHat and Suze compatability
-
-* Changes in SLURM 0.4.0-pre8
-=============================
- -- Add logic to create and destroy Bluegene Blocks automatically as needed.
- -- Update smap man page to include Bluegene configuration commands.
-
-* Changes in SLURM 0.4.0-pre7
-=============================
- -- Port all patches from slurm v0.3 up through v0.3.10:
-   - Remove calls in auth/munge plugin deprecated by munge-0.4.
-   - Allow single task id to be selected with --input, --output, and --error.
-   - Create shared memory segment for Elan statistics when using the
-     switch/elan plugin.
-   - More fixes necessary for TotalView.
-
-* Changes in SLURM 0.4.0-pre6
-=============================
- -- Add new job reason value "JobHeld" for jobs with priority==0
- -- Move startup script from "/etc/rc.d/init.d/slurm" to "/etc/init.d/slurm"
- -- Modify prolog/epilog logic in slurmd to accomodate very long run times, 
-    on BGL these scripts wait for events that can take a very long time 
-    (tens of seconds).
- -- This code base was used for BGLb acceptance test with pre-defined 
-    BGL blocks.
-
-* Changes in SLURM 0.4.0-pre5
-=============================
- -- select/bluegene plugin confirms db.properties file in $sysconfdir
-    and copies it to StateSaveLocation (slurmctld's working directory)
- -- select/bluegene plugin confirms environment variable required for 
-    DB2 interaction are set (execute "db2profile" script before slurmctld)
- -- slurmd to always give jobs KillWait time between SIGTERM and SIGKILL
-    at termination
- -- set job's start_time and end_time = now rather than leaving zero if 
-    they fail to execute
- -- modify srun to forward SIGTERM
- -- enable select/bluegene testing for DOWN nodes and switches
- -- select/bluegene plugin to delete orphan jobs, free BGLblocks and 
-    set owner as jobs terminate/start
-
-* Changes in SLURM 0.4.0-pre4
-=============================
- -- Fixes for reported problems:
-   - slurm/512: Let job steps run on DRAINING nodes
-   - slurm/513: Gracefully deal with UIDs missing from passwd file
- -- Add support for MPICH-GM (from takao.hatazaki@hp.com)
- -- Add support for NodeHostname in node configuration
- -- Make "scontrol show daemons" function properly on front-end system 
-    (e.g. Blue Gene)
- -- Fix srun bug when --input, --output and --error are all "none"
- -- Don't schedule jobs for user root if partition is DOWN
- -- Modify select/bluegene to honor job's required node list
- -- Modify user name logic to explicitly set UID=0 to "root", 
-    Suse Linux was not handling multiple users with UID=0 well.
-
-* Changes in SLURM 0.4.0-pre3
-=============================
- -- Send SIGTERM to batch script before SIGKILL for mpirun cleanup on 
-    Blue Gene/L
- -- Create new allocation as needed for debugger in case old allocation 
-    has been purged
- -- Add Blue Gene User Guide to html documents
- -- Fix srun bug that could cause seg fault with --no-shell option if not 
-    running under a debugger
- -- Propogate job's task count (if set) for batch job via SLURM_NPROCS.
- -- Add new job parameters for Blue Gene: geometry, rotate, mode (virtual
-    or co-processor), communications type (mesh or torus), and partition ID.
- -- Exercise a bunch of new switch plugin functions for Federation 
-    switch support.
- -- Fix bug in scheduling jobs when a processor count is specified
-    and FastSchedule=0 and the cluster is heterogeneous.
-
-* Changes in SLURM 0.4.0-pre2
-=============================
- -- NOTE: "startclean" when transitioning from version 0.4.0-pre1, JOBS ARE LOST
- -- Fixes for reported problems:
-   - slurm/477: Signal of batch job script (scancel -b) fixed
-   - slurm/481: Permit clearing of AllowGroups field for a partition
-   - slurm/482: Adjust Elan base context number to match RMS range
-   - slurm/489: Job completion logger was writing NULL to text file
- -- Preserve job's requested processor count info after job is initiated 
-    (for viewing by squeue and scontrol)
- -- srun cancels created job if job step creation fails
- -- Added a lots of Blue Gene/L support logic: slurmd executes on a single 
-    node to front-end the 512-CPU base-partitions (Blue Gene/L's nodes)
- -- Add node selection plugin infrastructure, relocate existing logic 
-    to select/linear, add configuration parameter SelectType
- -- Modify node hashing algorithm for better performance on Blue Gene/L
- -- Add ability to specify node ranges for 3-D rectangular prism
-
-* Changes in SLURM 0.4.0-pre1
-=============================
- -- NOTE: "startclean" when transitioning from version 0.3, JOBS ARE LOST
- -- Added support for job account information (arbitrary string)
- -- Added support for job dependencies (start job X after job Y completes)
- -- Added support for configuration parameter CheckpointType
- -- Added new job state "CANCELLED"
- -- Don't strip binaries, breaks parallel debuggers
- -- Fix bug in Munge authentication retry logic
- -- Change srun handling of interupts to work properly with TotalView
- -- Added "reason" field to job info showing why a job is waiting to run
-
-* Changes in SLURM 0.3.7
-========================
- -- Fixes required for TotalView operability under RHEL3.0
-    (Reported by Dong Ahn <dahn@llnl.gov>)
-   - Do not create detached threads when running under parallel debugger.
-   - Handle EINTR from sigwait().
-
-* Changes in SLURM 0.3.6
-========================
- -- Fixes for reported problems:
-   - slurm/459: Properly support partition's "Shared=force" configuration.
- -- Resync node state to DRAINED or DRAINING on restart in case job 
-    and node state recovered are out of sync.
- -- Added jobcomp/script plugin (execute script on job completion, 
-    from Nathan Huff, North Dakota State University).
- -- Added new error code ESLURM_FRAGMENTED for immediate resource 
-    allocation requests which are refused due to completing job (formerly 
-    returned ESLURM_NOT_TOP_PRIORITY)
- -- Modified job completion logging plugin calling sequence.
- -- Added much of the infrastructure required for system checkpoint
-    (APIs, RPCs, and NULL plugin)
-
-* Changes in SLURM 0.3.5
-========================
- -- Fix "SLURM_RLIMIT_* not found in environment" error message when
-    distributing large rlimit to jobs.
- -- Add support for slurm_spawn() and associated APIs (needed for IBM 
-    SP systems).
- -- Fix bug in update of node state to DRAINING/DRAINED when update 
-    request occurs prior to initial node registration.
- -- Fix bug in purging of batch jobs (active batch jobs were being 
-    improperly purged starting in version 0.3.0).
- -- When updating a node state to DRAINING/DRAINED a Reason must be 
-    provided. The user name and a timestamp will automatically be 
-    appended to that Reason.
-
-* Changes in SLURM 0.3.4
-========================
- -- Fixes for reported problems:
-   - slurm/404: Explicitly set pthread stack size to 1MB for srun
- -- Allow srun to respond to ctrl-c and kill queued job while waiting
-    for allocation from controller.
-
-* Changes in SLURM 0.3.3 
-========================
- -- Fix slurmctld handling of heterogeneous processor count on elan 
-    switch (was setting DRAINED nodes in state DRAINING).
- -- Fix sinfo -R, --list-reasons to list all relevant node states.
- -- Fix slurmctld to honor srun's node configuration specifications 
-    with FastSchedule==0 configuration.
- -- Added srun option --debugger-test to confirm that slurm's debugger 
-    infrastructure is operational.
- -- Removed debugging hacks for srun.wrapper.c. Temporarily use 
-    RPM's debugedit utility if available for similar effect.
-
-* Changes in SLURM 0.3.2
-========================
- -- The srun command wakes immeditely upon resource allocation (via new RPC)
-    rather than polling.
- -- SLURM daemons log current version number at startup.
- -- If slurmd can't respond to ping (e.g. paging is keeping it from 
-    responding in a timely fashion) then send a registration RPC
-    to slurmctld.
- -- Fix slurmd -M option to call mlockall() after daemonizing.
- -- Add "slurm_" prefix to slurm's hostlist_ function man pages.
- -- More AIX support added.
- -- Change get info calls from using show_all to more general show_flags
-    with #define for SHOW_ALL flag. 
-
-* Changes in SLURM 0.3.1
-========================
- -- Set SLURM_TASKS_PER_NODE env var for batch jobs (and LAM/MPI).
- -- Fix for slurmd spinning when stdin buffers full (gnats:434)
- -- Change some slurmctld malloc sizes to reduce demand for realloc calls, 
-    improves performance and eliminates realloc failure on RH EL3 under 
-    extremely heavy workload apparently due to memory fragmentation.
- -- Fix scheduling logic for heterogeneous processor count.
- -- Modify security_2_2 test to function with release 0.3
- -- Fix broken rpm build when libslurm not already installed.
- -- New slurmd option -M to mlock() slurmd process into memory.
- -- New srun option --no-shell causes srun to exit instead of spawning 
-    shell when using --allocate, -A.
- -- Modify  srun --uid=user and --gid=group options to maintain invoking 
-    user's credentials until after nodes have been allocated to requested 
-    user/group (allows root to run jobs and allocate nodes for other users 
-    in a RootOnly partition).
- -- Fix node processing if state change requested via scontrol prior to 
-    initial node registration.
- 
-* Changes in SLURM 0.3.0
-========================
- -- Support for AIX added (a few bugs do remain).
- -- Fix memory leak in slurmctld, slurm_cred_create().
- -- On ELF systems, export BNR_* functions from SLURM API. 
- -- Add support for "hidden" partitions (applies to their 
-    nodes, jobs, and job steps as well). APIs and commands 
-    modified to optionally display hidden partitions.
- -- Modify partition's group_allow test to be based upon the user 
-    of the allocation rather than the user making the allocation 
-    request (user root for LCRM batch jobs).
- -- Restructure plugin directory structure.
- -- New --core=type option in srun for lightweight corefile support.
-    (requires liblwcf).
- -- Let user root and SlurmUser exceed any partition limits.
- -- Srun treats "--time=0" as a request for an infinite time limit.
- 
-* Changes in SLURM 0.3.0.0-pre10
-================================
- -- Fix bugs in support of slurmctld "-f" option (specify different 
-    slurm.conf pathname).
- -- Remove slurmd "-f" option.
- -- Several documenation changes for slurm administrators.
- -- On ELF systems, export only slurm_* functions from slurm API and 
-    ensure plugins use only slurm_ prefixed functions (created aliases
-    where necessary).
- -- New srun option -Q, --quiet to suppress informational messages.
- -- Fix bug in slurmctld's building of nodelist for job (failed if 
-    more than one numeric field in node name).
- -- Change "scontrol completing" and "sinfo" to use job's node bitmap
-    to identify nodes associated with that particular job that are 
-    still processing job completion. This will work properly for 
-    shared nodes.
- -- Set SLURM_DISTRIBUTION environment varible for user tasks.
- -- Fix for file descriptor leak in slurmd.
- -- Propagate stacksize limit to jobs along with other resource limits
-    that were previously ignored.
-
-* Changes in SLURM 0.3.0.0-pre9
-===============================
- -- Restructure how slurmctld state saves are performed for better 
-    scalability.
- -- New sinfo option "--list-reason" or "-R". Displays down or drained 
-    nodes along with their REASON field.
-
-* Changes in SLURM 0.3.0.0-pre8
-===============================
- -- Queue outgoing message traffic rather than immediately spawning 
-    pthreads (under heavy load this resulted in hundreds of pthreads 
-    using more memory than was available).
- -- Restructure slurmctld message agent for higher throughput.
- -- Add new sinfo options --responding and --dead (i.e. non-responding)
-    for filtering node states.
- -- Fix bug in sinfo to properly process specified state filter including
-    "*" suffix for non-responding nodes.
- -- Create StateSaveLocation directory if changes via slurmctld reconfig
-
-* Changes in SLURM 0.3.0.0-pre7
-===============================
- -- Fixes for reported problems:
-   - slurm/381: Hold jobs requesting more resources than partition limit.
-   - slurm/387: Jobs lost and nodes DOWN on slurmctld restart.
- -- Add support for getting node's real memory size on AIX.
- -- Sinfo sort partitions in slurm.conf order, new sort option ("#P").
- -- Document how to gracefully change plugin values.
- -- Slurmctld does not attempt to recover jobs when the switch plugin
-    value changes (decision reached when any job's switch state recovery
-    fails).
- -- Node does not transition from COMPLETING to DOWN state due to
-    not responding. Wait for tasks to complete or admin to set DOWN.
- -- Always chmod SlurmdSpoolDir to 755 (a umask of 007 was resulting 
-    in batch jobs failing).
- -- Return errors when trying to change configuration parameters
-    AuthType, SchedulerType, and SwitchType via "scontrol reconfig"
-    or SIGHUP. Document how to safely change these parameters.
- -- Plugin-specific error number definitions and descriptive strings 
-    moved from common into plugin modules.
- -- Documentation for writing scheduler, switch, and job completion 
-    logging plugins added.
- -- Added job and node state descriptions to the squeue and sinfo man pages.
- -- Backup slurmctld to generate core file on SIGABRT.
- -- Backup slurmctld to re-read slurm.conf on SIGHUP.
- -- Added -q,--quit-on-interrupt option to srun.
- -- Elan switch plugin now starts neterr resolver thread on all Elan3
-    systems (QsNet and QsNetII).
- -- Added some missing read locks for references for slurmctld's 
-    configuration data structure
- -- Modify processing of queued slurmctld message traffic to get better
-    throughput (resulted in job inactivity limit being reached improperly 
-    when hundreds of jobs running simultaneously)
-
-* Changes in SLURM 0.3.0.0-pre6
-===============================
- -- Fixes for reported problems:
-   - slurm/372: job state descriptions added to squeue man page
- -- Switch plugin added. Add "SwitchType=switch/elan" to slurm.conf for 
-    systems with Quadrics Elan3 or Elan4 switches.
- -- Don't treat DOWN nodes with too few CPUs as a fatal error on Elan
- -- Major re-write of html documents
- -- Updates to node pinging for large numbers of unresponsive nodes 
- -- Explicitly set default action for SIGTERM (action on Thunder was 
-    to ignore SIGTERM)
- -- Sinfo "--exact" option only applies to fields actually displayed
- -- Partition processor count not correctly computed for heterogeneous 
-    clusters with FastSchedule=0 configuration
- -- Only return DOWN nodes to service if the reason for them being in 
-    that state is non-responsiveness and "ReturnToService=1" configuration
- -- Partition processor count now correctly computed for heterogeneous 
-    clusters with FastSchedule configured off
- -- New macros and function to export SLURM version number
-
-* Changes in SLURM 0.3.0.0-pre5
-===============================
- -- Fixes for reported problems:
-   - slurm/346: Support multiple colon-separated PluginDir values
- -- Fix node state transition: DOWN to DRAINED (instead of DRAINING)
- -- Fix a couple of minor slurmctld memory leaks
-
-* Changes in SLURM 0.3.0.0-pre4
-===============================
- -- Fix bug where early launch failures (such as invalid UID/GID) resulted
-    in jobs not terminating properly.
- -- Initial support for BNR committed (not yet functional).
- -- QsNet: SLURM now uses /etc/elanhosts exclusively for converting 
-    hostnames to ElanIDs.
-
-* Changes in SLURM 0.3.0.0-pre3
-===============================
- -- Fixes for reported problems:
-   - slurm/328: Slurmd was restarting with a new shared memory segment and 
-     losing track of jobs
-   - slurm/329: Job processing may be left running when one task dies
-   - slurm/333: Slurmd fails to launch a job and deletes a step, due to 
-     a race condition in shared memory management
-   - slurm/334: Slurmd was getting a segv due to a race condition in shared 
-     memory management
-   - slurm/342: Properly handle nodes being removed from configuration 
-     even when there are partitions, nodes, or job steps still associated 
-     with them
- -- Srun properly terminates jobs/steps upon node failure (used to hang 
-    waiting for I/O completion)
- -- Job time limits enforced even if InactiveLimit configured as zero
- -- Support the sending of an arbitrary signal to a batch script (but not 
-    the processses in its job steps) 
- -- Re-read slurm configuration file whenever changed, needed by users 
-    of SLURM APIs
- -- Scancel was generating a assert failure
- -- Slurmctld sends a launch response message upon scheduling of a queued
-    job (for immediate srun response)
- -- Maui scheduler plugin added
- -- Backfill scheduler plugin added
- -- Batch scripts can now have arguments that are propogated
- -- MPICH support added (via patch, not in SLURM CVS)
- -- New SLURM environment variables added SLMR_CPUS_ON_NODE and 
-    SLURM_LAUNCH_NODE_IPADDR, these provide support needed for LAM/MPI
-    (version 7.0.4+)
- -- The TMPDIR directory is created as needed before job launch
- -- Do not create duplicate SLURM environment variables with the same name
- -- Insure proper enforcement of node sharing by job
- -- Treat lack of SpoolDir or StateSaveDir as a fatal error
- -- Quickstart.html guide expanded
- -- Increase maximum jobs steps per node from 16 to 64
- -- Delete correct shared memory segment on slurmd -c (clean start)
-
-* Changes in SLURM 0.3.0.0-pre2
-===============================
- -- Fixes for reported problems:
-   - slurm/326: Properly clean-up jobs terminating on non-responding nodes
- -- Move all configuration data structure into common/read_config, scontrol
-    now always shows default values if not specified in slurm.conf file
- -- Remove the unused "Prioritize" configuration parameter
-
-* Changes in SLURM 0.3.0.0-pre1
-===============================
- -- Fixes for reported problems:
-   - slurm/252: "jobs left orphaned when using TotalView:" SLURM controller 
-     now pings srun and kills defunct jobs.
-   - slurm/253: "srun fails to accept new IO connection." 
-   - slurm/317: "Lack of default partition in config file causes errors." 
-   - slurm/319: Socket errors on multiple simultaneous job launches fixed
-   - slurm/321: slurmd shared memory synchronization error.
- -- Removed slurm_tv_clean daemon which has been obsoleted by slurm/252 fix.
- -- New scontrol command ``delete'' and RPC added to delete a partition
- -- Squeue can now print and sort by group id/name
- -- Scancel has new option -q,--quiet to not report an error if a job 
-    is already complete 
- -- Add the excluded node list to job information reported.
- -- RPC version mis-match now properly handled
- -- New job completion plugin interface added for logging completed jobs.
- -- Fixed lost digit in scontrol job priority specification.
- -- Remove restriction in the number of consecutive node sets (no longer
-    needed after DPCS upgrade)
- -- Incomplete state save write now properly handled.
- -- Modified slurmd setrlimit error for greater clarity.
- -- Slurmctld performs load-leveling across shared nodes.
- -- New user function added slurm_get_end_time for user jobs.
- -- Always compile srun with stabs debug section when TotalView support 
-    is requested.
-
-* Changes in SLURM 0.2.21
-=========================
- -- Fixes for reported problems:
-   - slurm/253: Try using different port if connect() fails (was rarely 
-     failing when an existing defunct connection was in TIME_WAIT state)
-   - slurm/300: Possibly killing wrong job on slurmd restart
-   - slurm/312: Freeing non-allocated memory and killing slurmd
- -- Assorted changes to support RedHat Enterprise Linux 3.0 and IA64
- -- Initial Elan4 and libelanctrl support (--with-elan).
- -- Slurmctld was sometimes inappropriately setting a job's priority 
-    to 1 when a node was down (even if up nodes could be used for the 
-    job when a running job completes)
- -- Convert all user commands from use of popt library to getopt_long()
- -- If TotalView support is requested, srun exports "totalview_jobid"
-    variable for `%J' expansion in TV bulk launch string.
- -- Fix several locking bugs in slurmd IO layer.
- -- Throttle back repetitious error messages in slurmd to avoid filling
-    log files.
- 
-
-* Changes in SLURM 0.2.20
-=========================
- -- Fixes for reported problems:
-   - slurm/298: Elan initialization error (Invalid vp 2147483674).
-   - slurm/299: srun fails to exit with multiple ^C's.
- -- Temporarily prevent DPCS from allocating jobs with more than eight 
-    sets of consecutive nodes. This was likely causing user applications 
-    to fail with libelan errors. This will be removed after DPCS is updated.
- -- Fix bug in popt use, was failing in some versions of Linux.
- -- Resend KILL_JOB messages as needed to clear COMPLETING jobs.
- -- Install dummy SIGCHLD handler in slurmd to fix problem on NPTL systems
-    where slurmd was not notified of terminated tasks.
-
-* Changes in SLURM 0.2.19
-=========================
- -- Memory corruption bug fixed, it was causing slurmctld to seg-fault
-
-* Changes in SLURM 0.2.18
-=========================
- -- Fixes for reported problems:
-   - slurm/287: slurm protocol timeouts when using TotalView.
-   - slurm/291: srun fails using ``-n 1'' under multi-node allocation.
-   - slurm/294: srun IO buffer reports ENOSPC.
- -- Memory corruption bug fixed, it was causing slurmctld to seg-fault
- -- Non-responding nodes now go from DRAINING to DRAINED state when 
-    jobs complete
- -- Do not schedule pending jobs while any job is actively COMPLETING 
-    unless the submitted job specifically identifies its nodes (like DPCS)
- -- Reset priority of jobs with priority==1 when a non-responding node 
-    starts to respond again
- -- Ignore jobs with priority==1 when establishing new baseline upon 
-    slurmctld restart
- -- Make slurmctld/message retry be timer based rather than queue based 
-    for better scalability
- -- Slurmctld logging is more concise, using hostlists more
- -- srun --no-allocate used special job_id range to avoid conflicts 
-    or premature job termination (purging by slurmctld)
- -- New --jobid=id option in srun to initiate job step under an existing 
-    allocation.
- -- Support in srun for TotalView bulk launch.
-
-* Changes in SLURM 0.2.17
-=========================
- -- Fixes for reported problems:
-   - slurm/279: Hold jobs that can't execute due to DOWN or DRAINED 
-     nodes and release when nodes are returned to service.
-   - slurm/285: "srun killed due to SIGPIPE"
- -- Support for running job steps on nodes relative to current 
-    allocation via srun -r, --relative=n option.
- -- SIGKILL no longer broadcasted to job via srun on task failure unless
-    --no-allocate option is used.
- -- Re-enabled "chkconfig --add" in default RPMs.
- -- Backup controller setting proper PID into slurmctld.pid file.
- -- Backup controller restores QSW state each time it assumes control
- -- Backup controller purges old job records before assuming control
-    to avoid resurrecting defunct jobs.
- -- Kill jobs on non-responding DRAINING nodes and make their state
-    DRAINED.
- -- Save state upon completion of a job's last EPILOG_COMPLETION to 
-    reduce possibility of inconsistent job and node records when the 
-    controller is transitioning between primary and backup. 
- -- Change logging level of detailed communication errors to not print 
-    them unless detailed debugging is requested.
- -- Increase number of concurrent controller server threads from 20 
-    to 50 and restructure code to handle backlogs more efficiently.
- -- Partition state at controller startup is based upon slurm.conf 
-    rather than previously saved state. Additional improvements to 
-    avoid inconsistent job/node/partition states at restart. Job state 
-    information is used to arbitrate conflicts.
- -- Orphaned file descriptors eliminated.
-
-* Changes in SLURM 0.2.16
-=========================
- -- Fixes for reported problems:
-   - slurm/265: Early termination of srun could cause job to remain in queue.
-   - slurm/268: Slurmctld could deadlock if there was a delay in the 
-     termination of a large node-count job. An EPILOG_COMPLETE RPC was 
-     added so that slurmd could notify slurmctld whenever the job 
-     termination was completed.
-   - slurm/270: Segfault in sinfo if a configured node lacked a partition.
-   - slurm/278: Exit code in scontrol did not indicate failure.
- -- Fixed bug in slurmd that caused the daemon to occaisionally kill itself.
- -- Fixed bug in srun when running with --no-allocate and >1 process per node.
- -- Small fixes and updates for srun manual.
-
-* Changes in SLURM 0.2.15
-=========================
- -- Fixes for reported problems:
-   - slurm/265: Job was orphaned when allocation response message could 
-     not be sent. Job is now killed on allocation response message transmit 
-     failure and socket error details are logged.
-   - Fix for slurm/267: "Job epilog may run multiple times."
- -- Squeue job TIMELIMIT format changed from "h:mm" to "d:h:mm:ss".
- -- DPCS initiated jobs have steps execute properly without explicit 
-    specification of node count.
-
-* Changes in SLURM 0.2.14
-=========================
- -- Fixes for reported problems:
-   - slurm/194: "srun doesn't handle most options when run under an allocation."
-   - slurm/244: "REQ: squeue shows requested size of pending jobs."
- -- SLURM_NODELIST environment variable now exported to all jobs, not
-    only batch jobs.
- -- Nodelist displayed in squeue for completing jobs is now restricted to 
-    completing nodes.
- -- Node "reason" field properly displayed in sinfo even with filtering. 
- -- ``slurm_tv_clean'' daemon now supports a log file.
- -- Batch jobs are now re-queued on launch failure.
- -- Controller confirms job scripts for batch jobs are still running on 
-    node zero at node registration.
- -- Default RPMs no longer stop/start SLURM daemons on upgrade or install.
-
-* Changes in SLURM 0.2.13
-=========================
- -- Fixes for reported problems:
-   - Fixed bug in slurmctld where "drained" nodes would go back into
-     the "idle" state under some conditions (slurm/228).
-   - Added possible fix for slurm/229: "slurmd occasionally fails
-     to reap all children."
- -- Fixed memory leak in auth_munge plugin.
- -- Added fix to slurmctld to allow arbitrarily large job specifications
-    to be saved and recovered in the state file.
- -- Allow "updates" in the configuration file of previously defined
-    node state and reason. 
- -- On "forceful termination" of a running job step, srun now exits
-    unconditionally, instead of waiting for all I/O.
- -- Slurmctld now uses pidfile to kill old daemon when a new one is started.
- -- Addition of new daemon "slurm_tv_clean" used to clean up jobs orphaned
-    due to use of the TotalView parallel debugger.
-
-* Changes in SLURM 0.2.12
-=========================
- -- Fixes for reported problems:
-   - Fix for "waitpid: No child processes" when using TotalView (slurm/217).
-   - Implemented temporary workaround for slurm/223: "Munge decode failed: 
-     Munged communication error." 
-   - Temporary fix for slurm/222: "elan3_create(0): Invalid argument."
- -- Fixed memory leaks in slurmctld (mostly due to reconfigure).
- -- More squeue/sinfo interface changes (see squeue(1), sinfo(1)).
- -- Sinfo now accepts list of node states to -t,--state option.
- -- Node "reason" field now available via sinfo command (see sinfo(1)).
- -- Wrapper source for srun (srun.wrapper.c) now installed and available
-    for TotalView support.
- -- Improved retry login in user commands for periods when slurmctld
-    primary is down and backup has not yet taken over.
-
-* Changes in SLURM 0.2.11
-=========================
- -- Changes in srun:
-   - Fixed bug in signal handling that occaisonally resulted in orphaned 
-     jobs when using Ctrl-C.
-   - Return non-zero exit code when remote tasks are killed by a signal.
-   - SIGALRM is now blocked by default.
- -- Added ``reason'' string for down, drained, or draining nodes. 
- -- Added -V,--version option to squeue and sinfo.
- -- Improved some error messages from user utilities.
-
-* Changes in SLURM 0.2.10
-=========================
- -- New slurm.conf configuration parameters:
-   - WaitTime:    Default for srun -w,--wait parameter.
-   - MaxJobCount: Maximum number of jobs SLURM can handle at one time.
-   - MinJobAge:   Minimum time since completing before job is purged from 
-                  slurmctld memory.
- -- Block user defined signals USR1 and USR2 in slurmd session manager.
- -- More squeue cleanup.
- -- Support for passing options to sinfo via environment variables.
- -- Added option to scontrol to find intersection of completing jobs and nodes.
- -- Added fix in auth_munge to prevent "Munged communication error" message.
-
-* Changes in SLURM 0.2.9
-========================
- -- Fixes for reported problems:
-   - Argument to srun `-n' option was taken as octal if preceded with a `0'.
- -- New format for Elan hosts config file (/etc/elanhosts. See README)
- -- Various fixes for managing COMPLETING jobs.
- -- Support for passing options to squeue via environment variables 
-    (see squeue(1))
-
-* Changes in SLURM 0.2.8
-=========================
- -- Fix for bug in slurmd that could make debug messages appear in job output.
- -- Fix for bug in slurmctld retry count computation.
- -- Srun now times out slow launch threads.
- -- "Time Used" output in squeue now includes seconds.
-
-* Changes in SLURM 0.2.7
-=========================
- -- Fix for bug in Elan module that results in slurmd hang.
- -- Added completing job state to default list of states to print with squeue.
-
-* Changes in SLURM 0.2.6
-=========================
- -- More fixes for handling cleanup of slow terminating jobs.
- -- Fixed bug in srun that might leave nodes allocated after a Ctrl-C.
-
-* Changes in SLURM 0.2.5
-=========================
- -- Various fixes for cleanup of slow terminating or unkillable jobs.
- -- Fixed some small memory leaks in communications code.
- -- Added hack for synchronized exit of jobs on large node count.
- -- Long lists of nodes are no longer truncated in sinfo.
- -- Print more descriptive error message when tasks exit with nonzero status.
- -- Fixed bug in srun where unsuccessful launch attempts weren't detected.
- -- Elan network error resolver thread now runs from elan module in slurmd.
- -- Slurmctld uses consecutive Elan context and program description numbers
-    instead of choosing them randomly.
-
-* Changes in SLURM 0.2.4
-==========================
- -- Fix for file descriptor leak in slurmctld.
- -- auth_munge plugin now prints credential info on decode failure.
- -- Minor changes to scancel interface.
- -- Filename format option "%J" now works again for srun --output and --error.
- 
-* Changes in SLURM 0.2.3
-==========================
- -- Fix bug in srun when using per-task files for stderr.
- -- Better error reporting on failure to open per-task input/output files.
- -- Update auth_munge plugin for munge 0.1.
- -- Minor changes to squeue interface.
- -- New srun option `--hold' to submit job in "held" state.
-
-* Changes in SLURM 0.2.2
-==========================
- -- Fixes for reported problems:
-   - Execution of script allocate mode fails in some cases. (gnats:161)
-   - Errors using per-task input files with Elan support. (gnats:162)
-   - srun doesn't handle all environment variables properly. (gnats:164)
- -- Parallel job is now terminated if a task is killed by a signal.
- -- Exit status of srun is set based on exit codes of tasks.
- -- Redesign of sinfo interface and options.
- -- Shutdown of slurmctld no longer propagates shutdown to all nodes.
-
-* Changes in SLURM 0.2.1
-===========================
- -- Fix bug where reconfigure request to slurmctld killed the daemon.
-
-* Changes in SLURM 0.2.0
-============================
-
- -- SlurmdTimeout of 0 means never set a non-responding node to DOWN.
- -- New srun option, -u,--unbuffered, for unbuffered stdout.
- -- Enhancements for sinfo
-   - Non-responding nodes show "*" character appended instead of "NoResp+".
-   - Node states show abbreviated variant by default
- -- Enhancements for scontrol.
-   - Added "ping" command to show current state of SLURM controllers.
-   - Job dump in scontrol shows user name as well as UID. 
-   - Node state of DRAIN is appropriately mapped to DRAINING or DRAINED.
- -- Fix for bug where request for task count greater than partition limit
-    was queued anyway.
- -- Fix for bugs in job end time handling.
- -- Modifications for error free builds on 64 bit architectures.
- -- Job cancel immediately deallocates nodes instead of waiting on srun.
- -- Attempt to create slurmd spool if it does not exist.
- -- Fixed signal handling bug in srun allocate mode.
- -- Earlier error detection in slurmd startup.
- -- "fatal: _shm_unlock: Numerical result out of range" bug fixed in slurmd.
- -- Config file parsing is now case insensitive.
- -- SLURM_NODELIST environment variable now set in allocate mode.
- 
-* Changes in SLURM 0.2.0-pre2
-=============================
-  
- -- Fix for reconfigure when public/private key path is changed.
- -- Shared memory fixes in slurmd. 
-   - fix for infinite semaphore incrementation bug.
- -- Semaphore fixes in slurmctld.
- -- Slurmctld now remembers which nodes have registered after recover.
- -- Fixed reattach bug when tasks have exited.
- -- Change directory to /tmp in slurmd if daemonizing.
- -- Logfiles are reopened on reconfigure.
- 
-$Id$
diff --git a/README b/README.rst
similarity index 96%
rename from README
rename to README.rst
index 8c9528ac6..eedb043ff 100644
--- a/README
+++ b/README.rst
@@ -1,3 +1,6 @@
+SLURM:  the Simple Linux Utility for Resource Management
+--------------------------------------------------------
+
 This is SLURM, the Simple Linux Utility for Resource Management. SLURM
 is an open-source cluster resource management and job scheduling system
 that strives to be simple, scalable, portable, fault-tolerant, and
@@ -72,5 +75,3 @@ PROBLEMS
 
 If you experience problems compiling, installing, or running SLURM
 please send e-mail to either slurm-dev@lists.llnl.gov.
-
-$Id$
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index d7a6c2951..e3e4cfaaf 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -1,10 +1,10 @@
-RELEASE NOTES FOR SLURM VERSION 2.2
-10 January 2011
+RELEASE NOTES FOR SLURM VERSION 2.3
+28 July 2011
 
 
 IMPORTANT NOTE:
 If using the slurmdbd (SLURM DataBase Daemon) you must update this first.
-The 2.2 slurmdbd will work with SLURM daemons of version 2.1.3 and above.
+The 2.3 slurmdbd will work with SLURM daemons of version 2.1.3 and above.
 You will not need to update all clusters at the same time, but it is very
 important to update slurmdbd first and having it running before updating
 any other clusters making use of it.  No real harm will come from updating
@@ -18,351 +18,203 @@ innodb_buffer_pool_size=64M
 under the [mysqld] reference in the my.cnf file and restarting the mysqld.
 This is needed when converting large tables over to the new database schema.
 
-SLURM can be upgraded from version 2.1 to version 2.2 without loss of jobs or
+SLURM can be upgraded from version 2.2 to version 2.3 without loss of jobs or
 other state information.
 
 
 HIGHLIGHTS
 ==========
-* Slurmctld restart/reconfiguration operations have been altered.
-  NOTE: There will be no change in behavior unless partition configuration
-  or node Features/Weight are altered using the scontrol command to differ
-  from the contents of the slurm.conf configuration file.
-
-  Preserve current partition state information plus node Feature and Weight
-  state information after slurmctld receives a SIGHUP signal or is restarted
-  with the -R option. Recreate partition plus node information (except node
-  State and Reason) from slurm.conf file after executing "scontrol reconfig"
-  or restarting slurmctld *without* the -R option.
-
-     OPERATION            ACTION
-     slurmctld -R         Recover all job, node and partition state
-     slurmctld            Recover job state plus state and reason for DOWN
-                          and DRAINED nodes only, recreate all other node state
-                          plus all partition state
-     slurmctld -c         Recover no jobs, recreate node and partition state
-     SIGHUP to slurmctld  Preserve all job, node and partition state
-     scontrol reconfig    Preserve job state, recreate node and partition state
-
-  Old logic preserved node Feature plus partition state after "slurmctld" or
-  "scontrol reconfig" rather than recreating it from slurm.conf. Node Weight
-  was formerly always recreated from slurm.conf.
-
-* SLURM commands (squeue, sinfo, sview, etc...) can now operate between
-  clusters. Jobs can also be submitted with sbatch to other cluster(s) with the
-  job routed to the one cluster expected to initiated the job first.
-
-* Accounting through the SlurmDBD with the MySQL plugin can now support
-  a default account and wckey per cluster.
+* Support has been added for Cray XT and XE computers
+* Support has been added for BlueGene/Q computers.
+* For architectures where the slurmd daemon executes on front end nodes (Cray
+  and BlueGene systems) more than one slurmd daemon may be executed using more
+  than one front end node for improved fault-tolerance and performance.
+  NOTE: The slurmctld daemon will report the lack of a front_end_state file
+  as an error when first started in this configuration.
+* The ability to expand running jobs was added
+* The ability to control how many leaf switches a job is allocated and the
+  maximum delay to get that leaf switch count can be controlled.
 
 CONFIGURATION FILE CHANGES (see "man slurm.conf" for details)
 =============================================================
-* A hash of the slurm.conf running on each node in the cluster is sent when
-  registering with the slurmctld so it can verify the slurm.conf is the same
-  as the one it is running.  If not an error message is displayed.  To
-  silence this message add NO_CONF_HASH to DebugFlags in your slurm.conf.
-
-* Added VSizeFactor to enforce virtual memory limits for jobs and job steps as
-  a percentage of their real memory allocation.
-
-* Added new option for SelectTypeParameters of CR_ONE_TASK_PER_CORE. This
-  option will allocate one task per core by default. Without this option,
-  by default one task will be allocated per thread on nodes with more than
-  one ThreadsPerCore configured (i.e. no change in behavior without this
-  option).
-
-* Add new configuration parameters GroupUpdateForce and GroupUpdateTime. These
-  control when slurmctld updates its information of which users are in the
-  groups allowed to use partitions. NOTE: There is no change in the default
-  behavior.
-
-* Added new configuration parameters SlurmSchedLogFile and SlurmSchedLogLevel
-  to support writing scheduling events to a separate log file.
-
-* Added new configuration parameter JobSubmitPlugins which provides a mechanism
-  to set default job parameters or perform other site-configurable actions at
-  job submit time. Site-specific job submission plugins may be written either C
-  or LUA.
-
-* MaxJobCount changed from 16-bit to 32-bit field. The default MaxJobCount was
-  changed from 5,000 to 10,000.
-
-* Added support for a PropagatePrioProcess configuration parameter value of 2
-  to restrict spawned task nice values to that of the slurmd daemon plus 1.
-  This insures that the slurmd daemon always have a higher scheduling priority
-  than spawned tasks. Also added support in slurmctld, slurmd and slurmdbd for
-  option of "-n <value>" to reset the daemon's nice value.
-
-* Support has been added for the allocation of generic resources (GRES). A
-  new configuration parameter, GresPlugins, has been added along with a node-
-  specific parameter, Gres. There is also a gres.conf file to be configured on
-  each node. For more information, see the web page
-  https://computing.llnl.gov/linux/slurm/gang_scheduling.html
-  Support for enforcement of these allocations using Linux CGroup will be
-  provided in a later release.
-
-* Added support for new partition states of DRAIN (run queued jobs, but accept
-  no new jobs) and INACTIVE (do not accept or run any more jobs) and new
-  partition option of "Alternate" (alternate partition to use for jobs
-  submitted to partitions that are currently in a state of DRAIN or INACTIVE).
-
-* Added the ability to configure PreemptMode on a per-partition or per-QOS
-  basis.
-
-* Modified the meaning of InactiveLimit slightly. It will now cancel the job
-  allocation created using the salloc or srun command if those commands cease
-  responding for the InactiveLimit regardless of any running job steps. This
-  parameter will no longer effect jobs spawned using sbatch.
-
-* Added SchedulerParameters option of bf_window to control how far into the
-  future that the backfill scheduler will look when considering jobs to start.
-  The default value is one day.
-
-* Added the ability to specify a range of ports in the SlurmctldPort parameter
-  for better handling of high bursts of RPCs (e.g. "SlurmctldPort=1234-1237").
+* In order to support more than one front end node, new parameters have been
+  added to support a new data structure: FrontendName, FrontendAddr, Port,
+  State and Reason.
+* Added DebugFlags option of Frontend
+* Added new configuration parameter MaxJobId. Use with FirstJobId to limit
+  range of job ID values.
+* Added new configuration parameter MaxStepCount to limit the effect of
+  bad batch scripts. The default value is 40,000 steps per job.
+* Changed node configuration parameter from "Procs" to "CPUs". Both parameters
+  will be supported for now.
+* Added GraceTime to Partition and QOS data structures. Preempted jobs will be
+  given this time interval before termination.
+* Added AccountingStoreJobComment to control storing job's comment field in
+  the accounting database.
+* More than one TaskPlugin can be configured in a comma separated list.
+* DefMemPerCPU, DefMemPerNode, MaxMemPerCPU and MaxMemPerNode configuration
+  options added on a per-partition basis.
+* SchedulerParameters can now control the maximum delay that a job can set in
+  order to be allocated some desired leaf switch count by specifying a value
+  for max_switch_wait.
 
 COMMAND CHANGES (see man pages for details)
 ===========================================
-* sinfo -R now has the user and timestamp in separate fields from the reason.
-
-* Job submission commands (salloc, sbatch and srun) have a new option,
-  --time-min, that permits the job's time limit to be reduced to the extent
-  required to start early through backfill scheduling with the minimum value
-  as specified.
-
-* scontrol now has the ability to change a job step's time limit.
-
-* scontrol now has the ability to shrink a job's size. Use a command of
-  "scontrol update JobId=# NumNodes=#" or
-  "scontrol update JobId=# NodeList=<names>". This command generates a script
-  to be executed in order to reset SLURM environment variables for proper
-  execution of subsequent job steps.
-
-* We have given Operators, Administrators, and bank account Coordinators (as
-  defined in the SLURM database) the ability to invoke commands that view/modify
-  user jobs and reservations.  Previously, one had to be root to invoke
-  "scontrol update JobId" for example.  In addition, Administrators have the
-  ability to view/modify node and partition info without having to become root.
-  For moredetails, see AUTHORIZATION section of the man pages for the
-  following commands: scontrol, scancel and sbcast.
-
-* Users can hold and release their own jobs. Submit in held state using srun
-  or sbatch --hold or -H options. Hold after submission using the command
-  "scontrol hold <jobid>". Release with "scontrol release <jobid>". Users can
-  not release jobs held by a system administrator unless the adminstrator uses
-  the command "scontrol uhold <jobid>" ("uhold" for "user hold").
-
-* Add support for slurmctld and slurmd option of "-n <value>" to reset the
-  daemon's nice value.
-
-* srun's --core option has been removed. Use the SPANK "Core" plugin from
-  http://code.google.com/p/slurm-spank-plugins/ for continued support.
-
-* Added salloc and sbatch option --wait-all-nodes. If set non-zero, job
-  initiation will be delayed until all allocated nodes have booted. Salloc
-  will log the delay with the messages "Waiting for nodes to boot" and "Nodes
-  are ready for job".
-
-* Added scontrol "wait_job <job_id>" option to wait for nodes to boot as needed.
-  Useful for batch jobs (in Prolog, PrologSlurmctld or the script) if powering
-  down idle nodes.
-
-* Modified sview to display database configuration and add/remove visible tabs.
-
-* Modified sview to save default configuration in .slurm/sviewrc file.
-  Default setting can be set by using the menus Options->Set Default Settings
-  or typing Ctrl-S.
-
-* Modified select/cons_res plugin so that if MaxMemPerCPU is configured and a
-  job specifies it's memory requirement, then more CPUs than requested will
-  automatically be allocated to a job to honor the MaxMemPerCPU parameter.
-
-* Add new scontrol option of "show aliases" to report every NodeName that is
+* Added scontrol ability to get and set front end node state.
+* Added scontrol ability to set slurmctld's DebugFlags.
+* Added scontrol ability to increment or decrement a job or step time limit.
+* Added new scontrol option of "show aliases" to report every NodeName that is
   associated with a given NodeHostName when running multiple slurmd daemons
   per compute node (typically used for testing purposes).
+* Added new squeue optioni of -R/--reservation option as a job filter.
+* A reservation flag of "License_Only" has been added for use by the sview and
+  scontrol commands. If set, then jobs using the reservation may use the
+  licenses associated with it plus any compute nodes. Otherwise the job is
+  limited to the compute nodes associated with the reservation.
+* The dependency option of "expand" has been added. This option identifies a
+  job whose resource allocation is intended to be used to expand the allocation
+  of another job. See http://www.schedmd.com/slurmdocs//faq.html#job_size
+  for a description of it's use.
+* Added --switches option to salloc, sbatch and srun commands to control the
+  desired number of switches allocated to a job and the maximum delay before
+  starting the job with more leaf switches.
+* Added scontrol ability to modify a job's desired switch count or delay.
 
 BLUEGENE SPECIFIC CHANGES
 =========================
+* Bluegene/Q support added.
+* The select/bluegene plugin has been substantially re-written.
 
 OTHER CHANGES
 =============
-* Added support for a default account and wckey per cluster within accounting.
-
-* Added support for several new trigger types: SlurmDBD failure/restart,
-  Database failure/restart, Slurmctld failure/restart.
-
-* Support has been added for TotalView to attach to a subset of launched tasks
-  instead of requiring that all tasks be attached to. This is the default
-  behavior unless an option of "--enable-partial-attach=no" be passed to the
-  configure (build) script.
-
-* A web application (chart_stats.cgi) has been added that invokes sreport to
-  retrieve from the accounting storage db a user's request for job usage or
-  machine utilization statistics and charts the results to a browser.
-
-* Much functionality has been added to account_storage/pgsql.  The plugin
-  is still in a very beta state.
-
-* SLURM's PMI library (for MPICH2) has been modified to properly execute an
-  executable program stand-alone (single MPI task launched without srun).
-
-* The PMI was also modified to use more socket connections for better
-  scalability and to clear state between job step invocations.
-
-* Added support for spank_get_item() to get S_STEP_ALLOC_CORES and
-  S_STEP_ALLOC_MEM. Support will remain for S_JOB_ALLOC_CORES and
-  S_JOB_ALLOC_MEM.
-
-* Changed error message from "Requested time limit exceeds partition limit"
-  to "Requested time limit is invalid (exceeds some limit)". The error can be
-  triggered by a time limit exceeding the user/bank limit or the time-min
-  exceeding the job or partition's time limit.
-
-* Added proctrack/cgroup plugin which uses Linux control groups (aka cgroup) to
-  track processes on Linux systems with this feature (kernel >= 2.6.24).
-
-* Added the derived_ec (exit code) member to job_info_t.  exit_code captures
-  the exit code of the job script (or salloc) while derived_ec contains the
-  highest exit code of all the job steps.
-
-* Added the derived exit code and derived exit string fields to the database's
-  job record.  Both can be modified by the user after the job completes.  See
-  job_exit_code.html
-
+* Improved accuracy of estimated job start time for pending jobs. This should
+  substantially improve scheduling of jobs elibable to execute on more than one
+  cluster.
+* Job dependency information will only show the currently active dependencies
+  rather than the original dependencies.
+* Added a reservation flag of "License_Only". If set, then jobs using the
+  reservation may use the licenses associated with it plus any compute nodes.
+* Added proctrack/cgroup and task/cgroup plugins to support Linux cgroups.
 
 API CHANGES
 ===========
 
 Changed members of the following structs
 ========================================
-job_info_t
-	num_procs -> num_cpus
-	job_min_cpus -> pn_min_cpus
-	job_min_memory -> pn_min_memory
-	job_min_tmp_disk -> pn_min_tmp_disk
-	min_sockets -> sockets_per_node
-	min_cores -> cores_per_socket
-	min_threads -> threads_per_core
+block_info_t
+	Added	     job_list
+	Added        used_mp_inx
+	Added        used_mp_str
+	bp_inx    -> mp_inx
+	conn_type -> conn_type(DIMENSIONS]
+	ionodes   -> ionode_str
+	nodes     -> mp_str
+	node_cnt  -> cnode_cnt
 
 job_desc_msg_t
-	num_procs -> min_cpus
-	job_min_cpus -> pn_min_cpus
-	job_min_memory -> pn_min_memory
-	job_min_tmp_disk -> pn_min_tmp_disk
-	min_sockets -> sockets_per_node
-	min_cores -> cores_per_socket
-	min_threads -> threads_per_core
-
-partition_info_t
-	state_up (new states added PARTITION_DRAIN and PARTITION_INACTIVE)
-	default_part -> flags (as PART_FLAG_DEFAULT flag)
-	disable_root_jobs -> flags (as PART_FLAG_NO_ROOT flag)
-	hidden -> flags (as PART_FLAG_HIDDEN flag)
-	root_only -> flags (as PART_FLAG_ROOT_ONLY flag)
-
-slurm_step_ctx_params_t
-	node_count -> min_nodes
+	conn_type -> conn_type(DIMENSIONS]
 
-slurm_ctl_conf_t
-	cache_groups -> group_info (as GROUP_CACHE flag)
+job_step_info_t
+	Added	    select_jobinfo
 
+partition_info_t
+	Added       def_mem_per_cpu and max_mem_per_cpu
 
 Added the following struct definitions
 ======================================
-block_info_t (BlueGene-specific information)
-	reason
+block_job_info_t		entirely new structure
+
+front_end_info_msg_t		entirely new structure
 
+front_end_info_t		entirely new structure
+	
 job_info_t
-	derived_ec
-	gres
-	max_cpus
-	resize_time
-	show_flags
-	time_min
+	batch_host		name of the host running the batch script
+	batch_script		contents of batch script
+	preempt_time		time that a job become preempted
+	req_switches		maximum number of leaf switches
+	wait4switches		maximum delay to get desired leaf switch count
 
-job_desc_msg_t
-	gres
-	max_cpus
-	time_min
-	wait_all_nodes
+job_step_create_response_msg_t
+	select_jobinfo		data needed from the select plugin for a step
 
 job_step_info_t
-	gres
+	select_jobinfo		data needed from the select plugin for a step
 
 node_info_t
-	boot_time
-	gres
-	reason_time
-	reason_uid
-	slurmd_start_time
+	node_addr		communication name (optional)
+	node_hostname		node's hostname (optional)
 
 partition_info_t
-	alternate
-	flags
-	preempt_mode
-
-slurm_ctl_conf_t
-	gres_plugins
-	group_info
-	hash_val
-	job_submit_plugins
-	sched_logfile
-	sched_log_level
-	slurmctld_port_count
-	vsize_factor
-
-slurm_step_ctx_params_t
-	features
-	gres
-	max_nodes
-
-update_node_msg_t
-	gres
-	preempt_mode
-	reason_uid
-
-
-Changed the following enums
-===========================
+	grace_time		preempted job's grace time in seconds
+
+slurm_ctl_conf
+	acctng_store_job_comment  if set, store job's comment field in
+				accounting database
+	max_job_id		maximum supported job id before starting over
+				with first_job_id
+	max_step_count		maximum number of job steps permitted per job
+
+slurm_step_layout
+	front_end		name of front end host running the step
+
+slurmdb_qos_rec_t
+	grace_time		preempted job's grace time in seconds
+
+update_front_end_msg_t		entirely new structure
+
+
+Changed the following enums and #defines
+========================================
 job_state_reason
 	FAIL_BANK_ACCOUNT -> FAIL_ACCOUNT
 	FAIL_QOS        	/* invalid QOS */
 	WAIT_QOS_THRES        	/* required QOS threshold has been breached */
 
-select_jobdata_type
-	SELECT_JOBDATA_PTR	/* data-> select_jobinfo_t *jobinfo */
+select_jobdata_type (Size of many data structures increased)
+	SELECT_JOBDATA_BLOCK_NODE_CNT /* data-> uint32_t block_cnode_cnt */
+	SELECT_JOBDATA_BLOCK_PTR /* data-> bg_record_t *bg_record */
+	SELECT_JOBDATA_DIM_CNT   /* data-> uint16_t dim_cnt */
+	SELECT_JOBDATA_NODE_CNT  /* data-> uint32_t cnode_cnt */
+	SELECT_JOBDATA_PAGG_ID   /* data-> uint64_t job container ID */
+	SELECT_JOBDATA_PTR	 /* data-> select_jobinfo_t *jobinfo */
+	SELECT_JOBDATA_START_LOC /* data-> uint16_t
+				  * start_loc[SYSTEM_DIMENSIONS] */
+select_jobdata_type (Added)
+	SELECT_PRINT_START_LOC   /* Print just the start location */
+select_jobdata_type (Names changed)
+	SELECT_GET_BP_CPU_CNT --> SELECT_GET_MP_CPU_CNT
+	SELECT_SET_BP_CNT ------>SELECT_SET_MP_CNT
 
 select_nodedata_type
 	SELECT_NODEDATA_PTR     /* data-> select_nodeinfo_t *nodeinfo */
 
-select_type_plugin_info is no longer and it's contents are now mostly #defines
+select_print_mode
+	SELECT_PRINT_START_LOC	/* Print just the start location */
+
+select_type_plugin_info no longer exists. It's contents are now mostly #defines
+
+DEBUG_FLAG_FRONT_END		added DebugFlags of Frontend
+
+JOB_PREEMPTED			added new job termination state to indicated
+				job termination was due to preemption
+
+RESERVE_FLAG_LIC_ONLY		reserve licenses only, use any nodes
+
+TRIGGER_RES_TYPE_FRONT_END	added trigger for frontend state changes
+
 
 Added the following API's
 =========================
-slurm_checkpoint_requeue()
-slurm_init_update_step_msg()
-slurm_job_step_get_pids()
-slurm_job_step_pids_free()
-slurm_job_step_pids_response_msg_free()
-slurm_job_step_stat()
-slurm_job_step_stat_free()
-slurm_job_step_stat_response_msg_free()
-slurm_list_append()
-slurm_list_count()
-slurm_list_create()
-slurm_list_destroy()
-slurm_list_find()
-slurm_list_is_empty()
-slurm_list_iterator_create()
-slurm_list_iterator_reset()
-slurm_list_iterator_destroy()
-slurm_list_next()
-slurm_list_sort()
-slurm_set_schedlog_level()
-slurm_step_launch_fwd_wake()
-slurm_update_step()
+slurm_free_front_end_info_msg	free front end state information
+slurm_init_update_front_end_msg	initialize data structure for front end update
+slurm_load_front_end		load front end state information
+slurm_print_front_end_info_msg	print all front end state information
+slurm_print_front_end_table	print state information for one front end node
+slurm_set_debugflags		set new DebugFlags in slurmctld daemon
+slurm_sprint_front_end_table	output state information for one front end node
+slurm_update_front_end		update state of front end node
 
 
 Changed the following API's
 ===========================
-slurm_load_block_info(): Added show_flag parameter
diff --git a/RELEASE_NOTES_LLNL b/RELEASE_NOTES_LLNL
index 207d17148..de8ffaf3d 100644
--- a/RELEASE_NOTES_LLNL
+++ b/RELEASE_NOTES_LLNL
@@ -1,55 +1,10 @@
-LLNL CHAOS-SPECIFIC RELEASE NOTES FOR SLURM VERSION 2.2
-1 December 2010
+LLNL CHAOS-SPECIFIC RELEASE NOTES FOR SLURM VERSION 2.3
+3 January 2011
 
-This lists only the most significant changes from SLURM v2.1 to v2.2
+This lists only the most significant changes from SLURM v2.2 to v2.3
 with respect to Chaos systems. See the file RELEASE_NOTES for a more
 complete description of changes.
 
 Mostly for system administrators:
 
-* SLURM version 2.2 is able to read version 2.1 state files and preserve all
-  running and pending state. SLURM version 2.1 is *not* able to use state save
-  files generated by version 2.2, so this is a non-reversible transition.
-
-* Added new configuration parameter JobSubmitPlugins which provides a mechanism
-  to set default job parameters or perform other site-configurable actions at
-  job submit time. Site-specific job submission plugins may be written either C
-  or LUA.
-
-* We have given Operators, Administrators, and bank account Coordinators (as
-  defined in the SLURM database) the ability to invoke commands that view/modify
-  user jobs and reservations.  Previously, one had to be root to invoke
-  "scontrol update JobId" for example.  In addition, Administrators have the
-  ability to view/modify node and partition info without having to become root.
-  For more details, see AUTHORIZATION section of the man pages for the
-  following commands: scontrol, scancel and sbcast.
-
 Mostly for users:
-
-* Job submission commands (salloc, sbatch and srun) have a new option,
-  --time-min, that permits the job's time limit to be reduced to the extent
-  required to start early through backfill scheduling with the minimum value
-  as specified.
-
-* Support has been added for TotalView to attach to a subset of launched tasks
-  instead of requiring that all tasks be attached to.
-
-* scontrol now has the ability to shrink a job's size. Use a command of
-  "scontrol update JobId=# NumNodes=#" or
-  "scontrol update JobId=# NodeList=<names>". This command generates a script
-  to be executed in order to reset SLURM environment variables for proper
-  execution of subsequent job steps.
-
-* Users can hold and release their own jobs. Submit in held state using srun
-  or sbatch --hold or -H options. Hold after submission using the command
-  "scontrol hold <jobid>". Release with "scontrol release <jobid>". Users can
-  not release jobs held by system administrator.
-
-* Added support for a default account and wckey per cluster within accounting.
-
-* SLURM commands (squeue, sinfo, sbatch, etc...) can now operate between
-  clusters. Jobs can also be submitted with sbatch to other cluster(s) with the
-  job routed to the one cluster expected to initiated the job first. This
-  functionality relies upon the SlurmDBD (SLURM DataBase Daemon) to provide
-  communication information (address and port) for a command to locate the
-  SLURM control daemon (slurmctld) on other clusters.
diff --git a/aclocal.m4 b/aclocal.m4
index 3bbc5710e..97b874641 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -353,7 +353,7 @@ $$1_PKG_ERRORS
 Consider adjusting the PKG_CONFIG_PATH environment variable if you
 installed software in a non-standard prefix.
 
-_PKG_TEXT])dnl
+_PKG_TEXT])[]dnl
         ])
 elif test $pkg_failed = untried; then
      	AC_MSG_RESULT([no])
@@ -364,7 +364,7 @@ path to pkg-config.
 
 _PKG_TEXT
 
-To get pkg-config, see <http://pkg-config.freedesktop.org/>.])dnl
+To get pkg-config, see <http://pkg-config.freedesktop.org/>.])[]dnl
         ])
 else
 	$1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
@@ -1367,6 +1367,7 @@ m4_include([auxdir/x_ac_gpl_licensed.m4])
 m4_include([auxdir/x_ac_hwloc.m4])
 m4_include([auxdir/x_ac_iso.m4])
 m4_include([auxdir/x_ac_lua.m4])
+m4_include([auxdir/x_ac_man2html.m4])
 m4_include([auxdir/x_ac_munge.m4])
 m4_include([auxdir/x_ac_ncurses.m4])
 m4_include([auxdir/x_ac_pam.m4])
@@ -1377,5 +1378,6 @@ m4_include([auxdir/x_ac_setpgrp.m4])
 m4_include([auxdir/x_ac_setproctitle.m4])
 m4_include([auxdir/x_ac_sgi_job.m4])
 m4_include([auxdir/x_ac_slurm_ssl.m4])
+m4_include([auxdir/x_ac_srun.m4])
 m4_include([auxdir/x_ac_sun_const.m4])
 m4_include([auxdir/x_ac_xcpu.m4])
diff --git a/auxdir/Makefile.am b/auxdir/Makefile.am
index 1364b9ecd..6042614c5 100644
--- a/auxdir/Makefile.am
+++ b/auxdir/Makefile.am
@@ -15,6 +15,7 @@ EXTRA_DIST = \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
     x_ac_cray.m4 \
+    x_ac_databases.m4 \
     x_ac_debug.m4 \
     x_ac_elan.m4 \
     x_ac_env.m4 \
@@ -22,12 +23,17 @@ EXTRA_DIST = \
     x_ac_gpl_licensed.m4 \
     x_ac_hwloc.m4 \
     x_ac_iso.m4 \
-    x_ac_pam.m4 \
+    x_ac_lua.m4 \
+    x_ac_man2html.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
     x_ac_pam.m4 \
+    x_ac_printf_null.m4 \
     x_ac_ptrace.m4 \
     x_ac_readline.m4 \
     x_ac_setproctitle.m4 \
+    x_ac_sgi_job.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4
+    x_ac_srun.m4 \
+    x_ac_sun_const.m4 \
+    x_ac_xcpu.m4
diff --git a/auxdir/Makefile.in b/auxdir/Makefile.in
index bbf8904dc..d717819e6 100644
--- a/auxdir/Makefile.in
+++ b/auxdir/Makefile.in
@@ -61,6 +61,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -71,6 +72,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -92,7 +94,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -129,6 +134,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -186,6 +192,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -221,6 +228,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -284,6 +292,7 @@ EXTRA_DIST = \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
     x_ac_cray.m4 \
+    x_ac_databases.m4 \
     x_ac_debug.m4 \
     x_ac_elan.m4 \
     x_ac_env.m4 \
@@ -291,15 +300,20 @@ EXTRA_DIST = \
     x_ac_gpl_licensed.m4 \
     x_ac_hwloc.m4 \
     x_ac_iso.m4 \
-    x_ac_pam.m4 \
+    x_ac_lua.m4 \
+    x_ac_man2html.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
     x_ac_pam.m4 \
+    x_ac_printf_null.m4 \
     x_ac_ptrace.m4 \
     x_ac_readline.m4 \
     x_ac_setproctitle.m4 \
+    x_ac_sgi_job.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4
+    x_ac_srun.m4 \
+    x_ac_sun_const.m4 \
+    x_ac_xcpu.m4
 
 all: all-am
 
diff --git a/auxdir/ltmain.sh b/auxdir/ltmain.sh
index 7ed280bc9..fa4b1e1f8 100755
--- a/auxdir/ltmain.sh
+++ b/auxdir/ltmain.sh
@@ -65,7 +65,7 @@
 #       compiler:		$LTCC
 #       compiler flags:		$LTCFLAGS
 #       linker:		$LD (gnu? $with_gnu_ld)
-#       $progname:		(GNU libtool) 2.2.6b Debian-2.2.6b-2ubuntu1
+#       $progname:		(GNU libtool) 2.2.6b Debian-2.2.6b-2ubuntu3
 #       automake:		$automake_version
 #       autoconf:		$autoconf_version
 #
@@ -73,7 +73,7 @@
 
 PROGRAM=ltmain.sh
 PACKAGE=libtool
-VERSION="2.2.6b Debian-2.2.6b-2ubuntu1"
+VERSION="2.2.6b Debian-2.2.6b-2ubuntu3"
 TIMESTAMP=""
 package_revision=1.3017
 
diff --git a/auxdir/slurm.m4 b/auxdir/slurm.m4
index c730cdee1..31911083b 100644
--- a/auxdir/slurm.m4
+++ b/auxdir/slurm.m4
@@ -204,7 +204,7 @@ if echo "$RELEASE" | grep -e "UNSTABLE"; then
    SLURM_RELEASE="unstable svn build $DATE" 
    SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR ($SLURM_RELEASE)"
 else
-   SLURM_RELEASE="`echo $RELEASE | sed 's/^.*\.//'`"
+   SLURM_RELEASE="`echo $RELEASE | sed 's/^0\.//'`"
    SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR.$SLURM_MICRO"
    test $RELEASE = "1" || SLURM_VERSION_STRING="$SLURM_VERSION_STRING-$SLURM_RELEASE"
 fi
@@ -225,4 +225,25 @@ AC_SUBST(SLURM_VERSION_STRING)
 
 ]) dnl AC_SLURM_VERSION
  
+dnl
+dnl Test if we want to include rpath in the executables (default=yes)
+dnl Doing so is generally discouraged due to problems this causes in upgrading
+dnl software and general incompatability issues
+dnl
+AC_DEFUN([X_AC_RPATH], [
+  ac_with_rpath=yes
 
+  AC_MSG_CHECKING([whether to include rpath in build])
+  AC_ARG_WITH(
+    [rpath],
+    AS_HELP_STRING(--without-rpath, Do not include rpath in build),
+      [ case "$withval" in
+        yes) ac_with_rpath=yes ;;
+        no)  ac_with_rpath=no ;;
+        *)   AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$withval" for --without-rpath]) ;;
+        esac
+      ]
+  )
+  AC_MSG_RESULT([$ac_with_rpath])
+])
diff --git a/auxdir/x_ac_bluegene.m4 b/auxdir/x_ac_bluegene.m4
index 007c9261e..717ef1444 100644
--- a/auxdir/x_ac_bluegene.m4
+++ b/auxdir/x_ac_bluegene.m4
@@ -15,6 +15,7 @@
 
 AC_DEFUN([X_AC_BGL],
 [
+	ac_real_bluegene_loaded=no
 	ac_bluegene_loaded=no
 
    	AC_ARG_WITH(db2-dir, AS_HELP_STRING(--with-db2-dir=PATH,Specify path to parent directory of DB2 library), [ trydb2dir=$withval ])
@@ -109,6 +110,7 @@ AC_DEFUN([X_AC_BGL],
      		AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
+		ac_real_bluegene_loaded=yes
   	fi
 
    	AC_SUBST(BG_INCLUDES)
@@ -193,7 +195,8 @@ AC_DEFUN([X_AC_BGP],
      		AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
-   	fi
+		ac_real_bluegene_loaded=yes
+	fi
 
    	AC_SUBST(BG_INCLUDES)
 ])
@@ -212,6 +215,7 @@ AC_DEFUN([X_AC_BGQ],
    	if test "x$ac_bluegene_loaded" = "xyes" ; then
 		bg_default_dirs=""
 	elif test "x$bgq_emulation" = "xyes"; then
+      		AC_DEFINE(HAVE_4D, 1, [Define to 1 if 4-dimensional architecture])
   		AC_DEFINE(SYSTEM_DIMENSIONS, 4, [4-dimensional schedulable architecture])
 		AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
       		AC_DEFINE(HAVE_BGQ, 1, [Define to 1 if emulating or running on Blue Gene/Q system])
@@ -220,11 +224,13 @@ AC_DEFUN([X_AC_BGQ],
 		bg_default_dirs=""
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
+		ac_bgq_loaded=yes
 	else
 		bg_default_dirs="/bgsys/drivers/ppcfloor"
 	fi
 
 	libname=bgsched
+	loglibname=log4cxx
 
    	for bg_dir in $trydb2dir "" $bg_default_dirs; do
       	# Skip directories that don't exist
@@ -232,49 +238,80 @@ AC_DEFUN([X_AC_BGQ],
 			continue;
       		fi
 
-		soloc=$bg_dir/lib64/lib$libname.so
+		soloc=$bg_dir/hlcs/lib/lib$libname.so
       		# Search for required BG API libraries in the directory
       		if test -z "$have_bg_ar" -a -f "$soloc" ; then
 			have_bgq_ar=yes
-			bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
+			if test "$ac_with_rpath" = "yes"; then
+				bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/hlcs/lib -L$bg_dir/hlcs/lib -l$libname"
+			else
+				bg_ldflags="$bg_ldflags -L$bg_dir/hlcs/lib -l$libname"
+			fi
+		fi
+
+  		soloc=$bg_dir/extlib/lib/lib$loglibname.so
+    		if test -z "$have_bg_ar" -a -f "$soloc" ; then
+			have_bgq_ar=yes
+			if test "$ac_with_rpath" = "yes"; then
+				bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/extlib/lib -L$bg_dir/extlib/lib -l$loglibname"
+			else
+				bg_ldflags="$bg_ldflags -L$bg_dir/extlib/lib -l$loglibname"
+			fi
 		fi
 
       		# Search for headers in the directory
-      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+      		if test -z "$have_bg_hdr" -a -f "$bg_dir/hlcs/include/bgsched/bgsched.h" ; then
 			have_bgq_hdr=yes
-			bg_includes="-I$bg_dir/include"
+			bg_includes="-I$bg_dir/hlcs/include"
       		fi
+     		if test -z "$have_bg_hdr" -a -f "$bg_dir/extlib/include/log4cxx/logger.h" ; then
+			have_bgq_hdr=yes
+			bg_includes="$bg_includes -I$bg_dir/extlib/include"
+    		fi
    	done
 
    	if test ! -z "$have_bgq_ar" -a ! -z "$have_bgq_hdr" ; then
       		# ac_with_readline="no"
 		# Test to make sure the api is good
 		saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
-		AC_LINK_IFELSE([AC_LANG_PROGRAM([[ int rm_set_serial(char *); ]], [[ rm_set_serial(""); ]])],[have_bgq_files=yes],[AC_MSG_ERROR(There is a problem linking to the BG/P api.)])
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64 $bg_includes"
+		AC_LANG_PUSH(C++)
+		AC_LINK_IFELSE([AC_LANG_PROGRAM(
+                                [[#include <bgsched/bgsched.h>
+#include <log4cxx/logger.h>]],
+				[[ bgsched::init("");
+ log4cxx::LoggerPtr logger_ptr(log4cxx::Logger::getLogger( "ibm" ));]])],
+			        [have_bgq_files=yes],
+				[AC_MSG_ERROR(There is a problem linking to the BG/Q api.)])
+		AC_LANG_POP(C++)
 		LDFLAGS="$saved_LDFLAGS"
    	fi
 
   	if test ! -z "$have_bgq_files" ; then
+      		BG_LDFLAGS="$bg_ldflags"
       		BG_INCLUDES="$bg_includes"
 		CFLAGS="$CFLAGS -m64"
    		CXXFLAGS="$CXXFLAGS $CFLAGS"
-      		AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
-  		AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
+      		AC_DEFINE(HAVE_4D, 1, [Define to 1 if 4-dimensional architecture])
+  		AC_DEFINE(SYSTEM_DIMENSIONS, 4, [4-dimensional architecture])
       		AC_DEFINE(HAVE_BG, 1, [Define to 1 if emulating or running on Blue Gene system])
       		AC_DEFINE(HAVE_BGQ, 1, [Define to 1 if emulating or running on Blue Gene/Q system])
       		AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
 		AC_DEFINE(HAVE_BG_FILES, 1, [Define to 1 if have Blue Gene files])
-		AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
-
-		AC_MSG_CHECKING(for BG serial value)
-		bg_serial="BGQ"
-    		AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
-     		AC_MSG_RESULT($bg_serial)
-     		AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
+		#AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
+
+    		AC_MSG_NOTICE([Running on a legitimate BG/Q system])
+		# AC_MSG_CHECKING(for BG serial value)
+		# bg_serial="BGQ"
+    		# AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
+     		# AC_MSG_RESULT($bg_serial)
+     		# AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
-   	fi
+		ac_real_bluegene_loaded=yes
+		ac_bgq_loaded=yes
+	fi
 
    	AC_SUBST(BG_INCLUDES)
+   	AC_SUBST(BG_LDFLAGS)
 ])
diff --git a/auxdir/x_ac_cray.m4 b/auxdir/x_ac_cray.m4
index 426ef06d1..c01d3aaba 100644
--- a/auxdir/x_ac_cray.m4
+++ b/auxdir/x_ac_cray.m4
@@ -6,45 +6,84 @@
 #    X_AC_CRAY
 #
 #  DESCRIPTION:
-#    Test for Cray systems including XT with 3-D interconect
-#    Also test for the apbasil client (Cray's Batch Application Scheduler 
-#    Interface Layer interface)
-##*****************************************************************************
+#    Test for Cray XT and XE systems with 2-D/3-D interconnects.
+#    Tests for required libraries (native Cray systems only):
+#    * mySQL (relies on testing for mySQL presence earlier);
+#    * libexpat, needed for XML-RPC calls to Cray's BASIL
+#      (Batch Application  Scheduler Interface Layer) interface.
+#*****************************************************************************
+
+AC_DEFUN([X_AC_CRAY],
+[
+  ac_have_cray="no"
+  ac_have_real_cray="no"
+  ac_have_alps_emulation="no"
+  ac_have_cray_emulation="no"
+
+  AC_ARG_WITH(
+    [alps-emulation],
+    AS_HELP_STRING(--with-alps-emulation,Run SLURM against an emulated Alps system - requires option cray.conf @<:@default=no@:>@),
+    [test "$withval" = no || ac_have_alps_emulation=yes],
+    [ac_have_alps_emulation=no])
 
-AC_DEFUN([X_AC_CRAY], [
-  AC_MSG_CHECKING([for Cray XT])
   AC_ARG_ENABLE(
-    [cray-xt],
-    AS_HELP_STRING(--enable-cray-xt,enable Cray XT system support),
-    [ case "$enableval" in
-        yes) x_ac_cray_xt=yes ;;
-         no) x_ac_cray_xt=no ;;
-          *) AC_MSG_RESULT([doh!])
-             AC_MSG_ERROR([bad value "$enableval" for --enable-cray-xt]) ;;
-      esac
-    ],
-    [x_ac_cray_xt=no]
+    [cray-emulation],
+    AS_HELP_STRING(--enable-cray-emulation,Run SLURM in an emulated Cray mode),
+      [ case "$enableval" in
+        yes) ac_have_cray_emulation="yes" ;;
+         no) ac_have_cray_emulation="no"  ;;
+          *) AC_MSG_ERROR([bad value "$enableval" for --enable-cray-emulation])  ;;
+      esac ]
   )
 
-  if test "$x_ac_cray_xt" = yes; then
-    AC_MSG_RESULT([yes])
-    AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
-    AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
-    AC_DEFINE(HAVE_CRAY,1,[Define if Cray system])
-    AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
+  if test "$ac_have_alps_emulation" = "yes"; then
+    ac_have_cray="yes"
+    AC_MSG_NOTICE([Running A Cray system against an Alps emulation])
+    AC_DEFINE(HAVE_ALPS_EMULATION, 1, [Define to 1 if running against an Alps emulation])
+  elif test "$ac_have_cray_emulation" = "yes"; then
+    ac_have_cray="yes"
+    AC_MSG_NOTICE([Running in Cray emulation mode])
+    AC_DEFINE(HAVE_CRAY_EMULATION, 1, [Define to 1 for emulating a Cray XT/XE system])
   else
-    AC_MSG_RESULT([no])
+    # Check for a Cray-specific file:
+    #  * older XT systems use an /etc/xtrelease file
+    #  * newer XT/XE systems use an /etc/opt/cray/release/xtrelease file
+    #  * both have an /etc/xthostname
+    AC_MSG_CHECKING([whether this is a native Cray XT or XE system or have ALPS simulator])
+
+    if test -f /etc/xtrelease || test -d /etc/opt/cray/release; then
+      ac_have_cray="yes"
+      ac_have_real_cray="yes"
+      AC_DEFINE(HAVE_REAL_CRAY, 1, [Define to 1 for running on a real Cray XT/XE system])
+    fi
+    AC_MSG_RESULT([$ac_have_cray])
   fi
 
-  AC_ARG_WITH(apbasil, AS_HELP_STRING(--with-apbasil=PATH,Specify path to apbasil command), [ try_apbasil=$withval ])
-  apbasil_default_locs="/usr/bin/apbasil"
-  for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
-    if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
-      have_apbasil=$apbasil_loc
+  if test "$ac_have_cray" = "yes"; then
+    # libexpat is always required for the XML-RPC interface
+    AC_CHECK_HEADER(expat.h, [],
+                    AC_MSG_ERROR([Cray BASIL requires expat headers/rpm]))
+    AC_CHECK_LIB(expat, XML_ParserCreate, [],
+                 AC_MSG_ERROR([Cray BASIL requires libexpat.so (i.e. libexpat1-dev)]))
+
+    if test "$ac_have_real_cray" = "yes"; then
+      AC_CHECK_LIB([job], [job_getjid], [],
+              AC_MSG_ERROR([Need cray-job (usually in /opt/cray/job/default)]))
+    fi
+
+    if test -z "$MYSQL_CFLAGS" || test -z "$MYSQL_LIBS"; then
+      AC_MSG_ERROR([Cray BASIL requires the cray-MySQL-devel-enterprise rpm])
     fi
-  done
-  if test ! -z "$have_apbasil" ; then
-    AC_DEFINE_UNQUOTED(APBASIL_LOC, "$have_apbasil", [Define the apbasil command location])
+
+    AC_DEFINE(HAVE_3D,           1, [Define to 1 if 3-dimensional architecture])
+    AC_DEFINE(SYSTEM_DIMENSIONS, 3, [3-dimensional architecture])
+    AC_DEFINE(HAVE_FRONT_END,    1, [Define to 1 if running slurmd on front-end only])
+    AC_DEFINE(HAVE_CRAY,         1, [Define to 1 for Cray XT/XE systems])
+    AC_DEFINE(SALLOC_KILL_CMD,   1, [Define to 1 for salloc to kill child processes at job termination])
+    AC_DEFINE(SALLOC_RUN_FOREGROUND, 1, [Define to 1 to require salloc execution in the foreground.])
   fi
+  AM_CONDITIONAL(HAVE_CRAY, test "$ac_have_cray" = "yes")
+  AM_CONDITIONAL(HAVE_REAL_CRAY, test "$ac_have_real_cray" = "yes")
+  AM_CONDITIONAL(HAVE_ALPS_EMULATION, test "$ac_have_alps_emulation" = "yes")
+  AM_CONDITIONAL(HAVE_CRAY_EMULATION, test "$ac_have_cray_emulation" = "yes")
 ])
-
diff --git a/auxdir/x_ac_debug.m4 b/auxdir/x_ac_debug.m4
index 077bd2eed..37658f01c 100644
--- a/auxdir/x_ac_debug.m4
+++ b/auxdir/x_ac_debug.m4
@@ -33,6 +33,7 @@ AC_DEFUN([X_AC_DEBUG], [
   )
   if test "$x_ac_debug" = yes; then
     test "$GCC" = yes && CFLAGS="$CFLAGS -Wall -fno-strict-aliasing"
+    test "$GXX" = yes && CXXFLAGS="$CXXFLAGS -Wall -fno-strict-aliasing"
   else
     AC_DEFINE([NDEBUG], [1],
       [Define to 1 if you are building a production release.]
@@ -91,6 +92,24 @@ AC_DEFUN([X_AC_DEBUG], [
   fi
   AC_MSG_RESULT([${x_ac_partial_attach=no}])
 
+  AC_MSG_CHECKING([whether salloc should kill child processes at job termination])
+  AC_ARG_ENABLE(
+    [salloc-kill-cmd],
+    AS_HELP_STRING(--enable-salloc-kill-cmd,salloc should kill child processes at job termination),
+    [ case "$enableval" in
+        yes) x_ac_salloc_kill_cmd=yes ;;
+         no) x_ac_salloc_kill_cmd=no ;;
+          *) AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$enableval" for --enable-salloc-kill-cmd]) ;;
+      esac
+    ]
+  )
+  if test "$x_ac_salloc_kill_cmd" = yes; then
+    AC_DEFINE(SALLOC_KILL_CMD, 1, [Define to 1 for salloc to kill child processes at job termination])
+    AC_MSG_RESULT([yes])
+  else
+    AC_MSG_RESULT([no])
+  fi
 
   AC_MSG_CHECKING([whether to disable salloc execution in the background])
   AC_ARG_ENABLE(
diff --git a/auxdir/x_ac_lua.m4 b/auxdir/x_ac_lua.m4
index a4efdbf85..d5a7a2b7f 100644
--- a/auxdir/x_ac_lua.m4
+++ b/auxdir/x_ac_lua.m4
@@ -22,9 +22,9 @@ AC_DEFUN([X_AC_LUA],
 
 	if test "x$x_ac_have_lua" = "xyes"; then
 	  saved_CFLAGS="$CFLAGS"
-	  saved_LDFLAGS="$LDFLAGS"
+	  saved_LIBS="$LIBS"
 	  CFLAGS="$CFLAGS $lua_CFLAGS"
-	  LDFLAGS="$LDFLAGS $lua_LIBS"
+	  LIBS="$LIBS $lua_LIBS"
 	  AC_MSG_CHECKING([for whether we can link to liblua])
 	  AC_TRY_LINK(
 		[#include <lua.h>
@@ -37,7 +37,7 @@ AC_DEFUN([X_AC_LUA],
 
 	  AC_MSG_RESULT([$x_ac_have_lua])
 	  CFLAGS="$saved_CFLAGS"
-	  LDFLAGS="$saved_LDFLAGS"
+	  LIBS="$saved_LIBS"
 	fi
 
 	AM_CONDITIONAL(HAVE_LUA, test "x$x_ac_have_lua" = "xyes")
diff --git a/auxdir/x_ac_man2html.m4 b/auxdir/x_ac_man2html.m4
new file mode 100644
index 000000000..d85656636
--- /dev/null
+++ b/auxdir/x_ac_man2html.m4
@@ -0,0 +1,23 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Don Lipari <lipari1@llnl.gov>
+#
+#  SYNOPSIS:
+#    X_AC_MAN2HTML
+#
+#  DESCRIPTION:
+#    Test for the presence of the man2html command.
+#
+##*****************************************************************************
+
+AC_DEFUN([X_AC_MAN2HTML],
+[
+   AC_MSG_CHECKING([whether man2html is available])
+   AC_CHECK_PROG(ac_have_man2html, man2html, [yes], [no], [$bindir:/usr/bin:/usr/local/bin])
+
+   AM_CONDITIONAL(HAVE_MAN2HTML, test "x$ac_have_man2html" == "xyes")
+
+   if test "x$ac_have_man2html" != "xyes" ; then
+      AC_MSG_NOTICE([Unable to build man page html files without man2html])
+   fi
+])
diff --git a/auxdir/x_ac_munge.m4 b/auxdir/x_ac_munge.m4
index 96e414fa4..9d39f8a2d 100644
--- a/auxdir/x_ac_munge.m4
+++ b/auxdir/x_ac_munge.m4
@@ -36,7 +36,7 @@ AC_DEFUN([X_AC_MUNGE], [
         test -f "$d/include/munge.h" || continue
 	for bit in $_x_ac_munge_libs; do
           test -d "$d/$bit" || continue
-        
+
  	  _x_ac_munge_libs_save="$LIBS"
           LIBS="-L$d/$bit -lmunge $LIBS"
           AC_LINK_IFELSE(
@@ -54,7 +54,11 @@ AC_DEFUN([X_AC_MUNGE], [
   else
     MUNGE_LIBS="-lmunge"
     MUNGE_CPPFLAGS="-I$x_ac_cv_munge_dir/include"
-    MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+    if test "$ac_with_rpath" = "yes"; then
+      MUNGE_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_munge_dir/$bit -L$x_ac_cv_munge_dir/$bit"
+    else
+      MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+    fi
   fi
 
   AC_SUBST(MUNGE_LIBS)
diff --git a/auxdir/x_ac_srun.m4 b/auxdir/x_ac_srun.m4
new file mode 100644
index 000000000..d94a24a61
--- /dev/null
+++ b/auxdir/x_ac_srun.m4
@@ -0,0 +1,32 @@
+##*****************************************************************************
+## $Id: x_ac_srun.m4 17616 2009-05-27 21:24:58Z jette $
+##*****************************************************************************
+#  AUTHOR:
+#    Morris Jette <jette@schedmd.com>
+#
+#  SYNOPSIS:
+#    AC_SRUN
+#
+#  DESCRIPTION:
+#    Adds support for --with-srun2aprun. If set then build srun-aprun wrapper
+#    rather than native SLURM srun.
+##*****************************************************************************
+
+AC_DEFUN([X_AC_SRUN2APRUN],
+[
+  ac_with_srun2aprun="no"
+
+  AC_MSG_CHECKING([for whether to include srun-aprun wrapper rather than native SLURM srun])
+  AC_ARG_WITH([srun2aprun],
+    AS_HELP_STRING(--with-srun2aprun,use aprun wrapper instead of native SLURM srun command),
+      [ case "$withval" in
+        yes) ac_with_srun2aprun=yes ;;
+        no)  ac_with_srun2aprun=no ;;
+        *)   AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$withval" for --with-srun2aprun]) ;;
+      esac
+    ]
+  )
+  AC_MSG_RESULT($ac_with_srun2aprun)
+  AM_CONDITIONAL(BUILD_SRUN2APRUN, test "x$ac_with_srun2aprun" = "xyes")
+])
diff --git a/auxdir/x_ac_sun_const.m4 b/auxdir/x_ac_sun_const.m4
index f0881dbb3..747fca173 100644
--- a/auxdir/x_ac_sun_const.m4
+++ b/auxdir/x_ac_sun_const.m4
@@ -6,7 +6,7 @@
 #    X_AC_SUN_CONST
 #
 #  DESCRIPTION:
-#    Test for Sun Constellation system with 3-D interconect
+#    Test for Sun Constellation system with 3-D interconnect
 ##*****************************************************************************
 
 AC_DEFUN([X_AC_SUN_CONST], [
diff --git a/config.h.in b/config.h.in
index 6fe5b3f51..e7c7e0a37 100644
--- a/config.h.in
+++ b/config.h.in
@@ -3,9 +3,6 @@
 /* Define if building universal (internal helper macro) */
 #undef AC_APPLE_UNIVERSAL_BUILD
 
-/* Define the apbasil command location */
-#undef APBASIL_LOC
-
 /* Define the BG_BRIDGE_SO value */
 #undef BG_BRIDGE_SO
 
@@ -39,9 +36,15 @@
 /* Define to 1 if 3-dimensional architecture */
 #undef HAVE_3D
 
+/* Define to 1 if 4-dimensional architecture */
+#undef HAVE_4D
+
 /* Define to 1 for AIX operating system */
 #undef HAVE_AIX
 
+/* Define to 1 if running against an Alps emulation */
+#undef HAVE_ALPS_EMULATION
+
 /* Define to 1 if emulating or running on Blue Gene system */
 #undef HAVE_BG
 
@@ -63,9 +66,12 @@
 /* Define to 1 if you have the `cfmakeraw' function. */
 #undef HAVE_CFMAKERAW
 
-/* Define if Cray system */
+/* Define to 1 for Cray XT/XE systems */
 #undef HAVE_CRAY
 
+/* Define to 1 for emulating a Cray XT/XE system */
+#undef HAVE_CRAY_EMULATION
+
 /* Define to 1 if you have the <curses.h> header file. */
 #undef HAVE_CURSES_H
 
@@ -136,6 +142,12 @@
 /* define if you have libelanhosts. */
 #undef HAVE_LIBELANHOSTS
 
+/* Define to 1 if you have the `expat' library (-lexpat). */
+#undef HAVE_LIBEXPAT
+
+/* Define to 1 if you have the `job' library (-ljob). */
+#undef HAVE_LIBJOB
+
 /* define if you have libntbl. */
 #undef HAVE_LIBNTBL
 
@@ -209,6 +221,9 @@
 /* Define if you are compiling with readline. */
 #undef HAVE_READLINE
 
+/* Define to 1 for running on a real Cray XT/XE system */
+#undef HAVE_REAL_CRAY
+
 /* Define to 1 if you have the `sched_setaffinity' function. */
 #undef HAVE_SCHED_SETAFFINITY
 
@@ -389,6 +404,9 @@
 /* Define the project's release. */
 #undef RELEASE
 
+/* Define to 1 for salloc to kill child processes at job termination */
+#undef SALLOC_KILL_CMD
+
 /* Define to 1 to require salloc execution in the foreground. */
 #undef SALLOC_RUN_FOREGROUND
 
@@ -461,7 +479,7 @@
 /* Define to 1 if strerror_r returns char *. */
 #undef STRERROR_R_CHAR_P
 
-/* Define system dimension count */
+/* 3-dimensional architecture */
 #undef SYSTEM_DIMENSIONS
 
 /* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
diff --git a/configure b/configure
index e0ab48582..f2d184a9b 100755
--- a/configure
+++ b/configure
@@ -743,6 +743,8 @@ ac_includes_default="\
 ac_subst_vars='am__EXEEXT_FALSE
 am__EXEEXT_TRUE
 LTLIBOBJS
+BUILD_SRUN2APRUN_FALSE
+BUILD_SRUN2APRUN_TRUE
 WITH_BLCR_FALSE
 WITH_BLCR_TRUE
 BLCR_LDFLAGS
@@ -766,6 +768,10 @@ SSL_CPPFLAGS
 SSL_LIBS
 SSL_LDFLAGS
 READLINE_LIBS
+HAVE_MAN2HTML
+HAVE_MAN2HTML_FALSE
+HAVE_MAN2HTML_TRUE
+ac_have_man2html
 HAVE_LUA_FALSE
 HAVE_LUA_TRUE
 lua_LIBS
@@ -787,6 +793,14 @@ SLURMD_PORT
 SLURMCTLD_PORT
 DEBUG_MODULES_FALSE
 DEBUG_MODULES_TRUE
+HAVE_CRAY_EMULATION_FALSE
+HAVE_CRAY_EMULATION_TRUE
+HAVE_ALPS_EMULATION_FALSE
+HAVE_ALPS_EMULATION_TRUE
+HAVE_REAL_CRAY_FALSE
+HAVE_REAL_CRAY_TRUE
+HAVE_CRAY_FALSE
+HAVE_CRAY_TRUE
 WITH_PGSQL_FALSE
 WITH_PGSQL_TRUE
 PGSQL_CFLAGS
@@ -827,6 +841,8 @@ HAVE_NUMA_TRUE
 NUMA_LIBS
 WITH_GNU_LD_FALSE
 WITH_GNU_LD_TRUE
+WITH_CXX_FALSE
+WITH_CXX_TRUE
 PKG_CONFIG_LIBDIR
 PKG_CONFIG_PATH
 PKG_CONFIG
@@ -848,12 +864,8 @@ LD
 FGREP
 SED
 LIBTOOL
-am__fastdepCXX_FALSE
-am__fastdepCXX_TRUE
-CXXDEPMODE
-ac_ct_CXX
-CXXFLAGS
-CXX
+WITH_CYGWIN_FALSE
+WITH_CYGWIN_TRUE
 HAVE_AIX_PROCTRACK_FALSE
 HAVE_AIX_PROCTRACK_TRUE
 EGREP
@@ -869,6 +881,22 @@ CMD_LDFLAGS
 BLUEGENE_LOADED
 BLUEGENE_LOADED_FALSE
 BLUEGENE_LOADED_TRUE
+BGQ_LOADED
+BGQ_LOADED_FALSE
+BGQ_LOADED_TRUE
+BG_LDFLAGS
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+REAL_BG_L_P_LOADED
+REAL_BG_L_P_LOADED_FALSE
+REAL_BG_L_P_LOADED_TRUE
+BG_L_P_LOADED
+BG_L_P_LOADED_FALSE
+BG_L_P_LOADED_TRUE
 BGL_LOADED
 BGL_LOADED_FALSE
 BGL_LOADED_TRUE
@@ -981,6 +1009,7 @@ ac_subst_files=''
 ac_user_opts='
 enable_option_checking
 enable_maintainer_mode
+with_rpath
 with_db2_dir
 enable_bluegene_emulation
 enable_bgl_emulation
@@ -1001,8 +1030,6 @@ enable_pam
 with_pam_dir
 enable_iso8601
 enable_load_env_no_login
-enable_cray_xt
-with_apbasil
 enable_sun_const
 with_dimensions
 with_hwloc
@@ -1010,10 +1037,13 @@ with_xcpu
 enable_gtktest
 with_mysql_config
 with_pg_config
+with_alps_emulation
+enable_cray_emulation
 enable_debug
 enable_memory_leak_debug
 enable_front_end
 enable_partial_attach
+enable_salloc_kill_cmd
 enable_salloc_background
 with_slurmctld_port
 with_slurmd_port
@@ -1024,6 +1054,7 @@ with_ssl
 with_munge
 enable_multiple_slurmd
 with_blcr
+with_srun2aprun
 '
       ac_precious_vars='build_alias
 host_alias
@@ -1033,10 +1064,10 @@ CFLAGS
 LDFLAGS
 LIBS
 CPPFLAGS
-CPP
 CXX
 CXXFLAGS
 CCC
+CPP
 CXXCPP
 PKG_CONFIG
 PKG_CONFIG_PATH
@@ -1683,15 +1714,18 @@ Optional Features:
   --enable-load-env-no-login
                           enable --get-user-env option to load user
                           environment without .login
-  --enable-cray-xt        enable Cray XT system support
   --enable-sun-const      enable Sun Constellation system support
   --disable-gtktest       do not try to compile and run a test GTK+ program
+  --enable-cray-emulation Run SLURM in an emulated Cray mode
   --enable-debug          enable debugging code for development
   --enable-memory-leak-debug
                           enable memory leak debugging code for development
   --enable-front-end      enable slurmd operation on a front-end
   --disable-partial-attach
                           disable debugger partial task attach support
+  --enable-salloc-kill-cmd
+                          salloc should kill child processes at job
+                          termination
   --disable-salloc-background
                           disable salloc execution in the background
   --enable-multiple-slurmd
@@ -1700,6 +1734,7 @@ Optional Features:
 Optional Packages:
   --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
   --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+  --without-rpath         Do not include rpath in build
   --with-db2-dir=PATH     Specify path to parent directory of DB2 library
   --with-bg-serial=NAME   set BG_SERIAL value
 
@@ -1710,7 +1745,6 @@ Optional Packages:
   --with-cpusetdir=PATH   specify path to cpuset directory default is
                           /dev/cpuset
   --with-pam_dir=PATH     Specify path to PAM module installation
-  --with-apbasil=PATH     Specify path to apbasil command
   --with-dimensions=N     set system dimension count for generic computer
                           system
   --with-hwloc=PATH       Specify path to hwloc installation
@@ -1718,6 +1752,8 @@ Optional Packages:
   --with-mysql_config=PATH
                           Specify path to mysql_config binary
   --with-pg_config=PATH   Specify path to pg_config binary
+  --with-alps-emulation   Run SLURM against an emulated Alps system - requires
+                          option cray.conf [default=no]
   --with-slurmctld-port=N set slurmctld default port [6817]
   --with-slurmd-port=N    set slurmd default port [6818]
   --with-slurmdbd-port=N  set slurmdbd default port [6819]
@@ -1727,6 +1763,8 @@ Optional Packages:
   --with-ssl=PATH         Specify path to OpenSSL installation
   --with-munge=PATH       Specify path to munge installation
   --with-blcr=PATH        Specify path to BLCR installation
+  --with-srun2aprun       use aprun wrapper instead of native SLURM srun
+                          command
 
 Some influential environment variables:
   CC          C compiler command
@@ -1736,9 +1774,9 @@ Some influential environment variables:
   LIBS        libraries to pass to the linker, e.g. -l<library>
   CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
               you have headers in a nonstandard directory <include dir>
-  CPP         C preprocessor
   CXX         C++ compiler command
   CXXFLAGS    C++ compiler flags
+  CPP         C preprocessor
   CXXCPP      C++ preprocessor
   PKG_CONFIG  path to pkg-config utility
   PKG_CONFIG_PATH
@@ -1912,6 +1950,90 @@ fi
 
 } # ac_fn_c_try_link
 
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 $as_test_x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
 # ac_fn_c_try_cpp LINENO
 # ----------------------
 # Try to preprocess conftest.$ac_ext, and return whether this succeeded.
@@ -2109,44 +2231,6 @@ $as_echo "$ac_res" >&6; }
 
 } # ac_fn_c_check_header_compile
 
-# ac_fn_cxx_try_compile LINENO
-# ----------------------------
-# Try to compile conftest.$ac_ext, and return whether this succeeded.
-ac_fn_cxx_try_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext
-  if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_cxx_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_cxx_try_compile
-
 # ac_fn_c_check_func LINENO FUNC VAR
 # ----------------------------------
 # Tests whether FUNC exists, setting the cache variable VAR accordingly
@@ -2251,52 +2335,6 @@ fi
 
 } # ac_fn_cxx_try_cpp
 
-# ac_fn_cxx_try_link LINENO
-# -------------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_cxx_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_cxx_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 $as_test_x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_cxx_try_link
-
 # ac_fn_c_check_decl LINENO SYMBOL VAR INCLUDES
 # ---------------------------------------------
 # Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR
@@ -2938,7 +2976,7 @@ if echo "$RELEASE" | grep -e "UNSTABLE"; then
    SLURM_RELEASE="unstable svn build $DATE"
    SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR ($SLURM_RELEASE)"
 else
-   SLURM_RELEASE="`echo $RELEASE | sed 's/^.*\.//'`"
+   SLURM_RELEASE="`echo $RELEASE | sed 's/^0\.//'`"
    SLURM_VERSION_STRING="$SLURM_MAJOR.$SLURM_MINOR.$SLURM_MICRO"
    test $RELEASE = "1" || SLURM_VERSION_STRING="$SLURM_VERSION_STRING-$SLURM_RELEASE"
 fi
@@ -3482,6 +3520,28 @@ ac_config_headers="$ac_config_headers config.h"
 ac_config_headers="$ac_config_headers slurm/slurm.h"
 
 
+
+  ac_with_rpath=yes
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to include rpath in build" >&5
+$as_echo_n "checking whether to include rpath in build... " >&6; }
+
+# Check whether --with-rpath was given.
+if test "${with_rpath+set}" = set; then :
+  withval=$with_rpath;  case "$withval" in
+        yes) ac_with_rpath=yes ;;
+        no)  ac_with_rpath=no ;;
+        *)   { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+             as_fn_error $? "bad value \"$withval\" for --without-rpath" "$LINENO" 5  ;;
+        esac
+
+
+fi
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_with_rpath" >&5
+$as_echo "$ac_with_rpath" >&6; }
+
 DEPDIR="${am__leading_dot}deps"
 
 ac_config_commands="$ac_config_commands depfiles"
@@ -4463,6 +4523,7 @@ fi
 
 
 
+	ac_real_bluegene_loaded=no
 	ac_bluegene_loaded=no
 
 
@@ -4630,6 +4691,7 @@ _ACEOF
 
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
+		ac_real_bluegene_loaded=yes
   	fi
 
 
@@ -4785,48 +4847,460 @@ _ACEOF
 
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
-   	fi
+		ac_real_bluegene_loaded=yes
+	fi
 
 
 
 
-	# test for bluegene emulation mode
-   	# Check whether --enable-bgq-emulation was given.
-if test "${enable_bgq_emulation+set}" = set; then :
-  enableval=$enable_bgq_emulation;  case "$enableval" in
-	  yes) bgq_emulation=yes ;;
-	  no)  bgq_emulation=no ;;
-	  *)   as_fn_error $? "bad value \"$enableval\" for --enable-bgq-emulation" "$LINENO" 5   ;;
-    	esac
+ if test "x$ac_bluegene_loaded" = "xyes"; then
+  BG_L_P_LOADED_TRUE=
+  BG_L_P_LOADED_FALSE='#'
+else
+  BG_L_P_LOADED_TRUE='#'
+  BG_L_P_LOADED_FALSE=
 fi
 
 
-	# Skip if already set
-   	if test "x$ac_bluegene_loaded" = "xyes" ; then
-		bg_default_dirs=""
-	elif test "x$bgq_emulation" = "xyes"; then
-
-$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
-
-
-$as_echo "#define HAVE_BG 1" >>confdefs.h
 
+ if test "x$ac_real_bluegene_loaded" = "xyes"; then
+  REAL_BG_L_P_LOADED_TRUE=
+  REAL_BG_L_P_LOADED_FALSE='#'
+else
+  REAL_BG_L_P_LOADED_TRUE='#'
+  REAL_BG_L_P_LOADED_FALSE=
+fi
 
-$as_echo "#define HAVE_BGQ 1" >>confdefs.h
 
 
-$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+  if test -n "$CCC"; then
+    CXX=$CCC
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CXX+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CXX"; then
+  ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$CXX" && break
+  done
+fi
+if test -z "$CXX"; then
+  ac_ct_CXX=$CXX
+  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CXX"; then
+  ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_ac_ct_CXX="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CXX" && break
+done
+
+  if test "x$ac_ct_CXX" = x; then
+    CXX="g++"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CXX=$ac_ct_CXX
+  fi
+fi
+
+  fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if test "${ac_cv_cxx_compiler_gnu+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GXX=yes
+else
+  GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if test "${ac_cv_prog_cxx_g+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+   ac_cxx_werror_flag=yes
+   ac_cv_prog_cxx_g=no
+   CXXFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_cv_prog_cxx_g=yes
+else
+  CXXFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+	 CXXFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+  CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+  if test "$GXX" = yes; then
+    CXXFLAGS="-g -O2"
+  else
+    CXXFLAGS="-g"
+  fi
+else
+  if test "$GXX" = yes; then
+    CXXFLAGS="-O2"
+  else
+    CXXFLAGS=
+  fi
+fi
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+depcc="$CXX"  am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if test "${am_cv_CXX_dependencies_compiler_type+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CXX_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CXX_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+  am__fastdepCXX_TRUE=
+  am__fastdepCXX_FALSE='#'
+else
+  am__fastdepCXX_TRUE='#'
+  am__fastdepCXX_FALSE=
+fi
+
+
+
+
+	# test for bluegene emulation mode
+   	# Check whether --enable-bgq-emulation was given.
+if test "${enable_bgq_emulation+set}" = set; then :
+  enableval=$enable_bgq_emulation;  case "$enableval" in
+	  yes) bgq_emulation=yes ;;
+	  no)  bgq_emulation=no ;;
+	  *)   as_fn_error $? "bad value \"$enableval\" for --enable-bgq-emulation" "$LINENO" 5   ;;
+    	esac
+fi
+
+
+	# Skip if already set
+   	if test "x$ac_bluegene_loaded" = "xyes" ; then
+		bg_default_dirs=""
+	elif test "x$bgq_emulation" = "xyes"; then
+
+$as_echo "#define HAVE_4D 1" >>confdefs.h
+
+
+$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
+
+
+$as_echo "#define HAVE_BG 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_BGQ 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
 
     		{ $as_echo "$as_me:${as_lineno-$LINENO}: Running in BG/Q emulation mode" >&5
 $as_echo "$as_me: Running in BG/Q emulation mode" >&6;}
 		bg_default_dirs=""
  		#define ac_bluegene_loaded so we don't load another bluegene conf
 		ac_bluegene_loaded=yes
+		ac_bgq_loaded=yes
 	else
 		bg_default_dirs="/bgsys/drivers/ppcfloor"
 	fi
 
 	libname=bgsched
+	loglibname=log4cxx
 
    	for bg_dir in $trydb2dir "" $bg_default_dirs; do
       	# Skip directories that don't exist
@@ -4834,55 +5308,88 @@ $as_echo "$as_me: Running in BG/Q emulation mode" >&6;}
 			continue;
       		fi
 
-		soloc=$bg_dir/lib64/lib$libname.so
+		soloc=$bg_dir/hlcs/lib/lib$libname.so
       		# Search for required BG API libraries in the directory
       		if test -z "$have_bg_ar" -a -f "$soloc" ; then
 			have_bgq_ar=yes
-			bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -l$libname"
+			if test "$ac_with_rpath" = "yes"; then
+				bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/hlcs/lib -L$bg_dir/hlcs/lib -l$libname"
+			else
+				bg_ldflags="$bg_ldflags -L$bg_dir/hlcs/lib -l$libname"
+			fi
+		fi
+
+  		soloc=$bg_dir/extlib/lib/lib$loglibname.so
+    		if test -z "$have_bg_ar" -a -f "$soloc" ; then
+			have_bgq_ar=yes
+			if test "$ac_with_rpath" = "yes"; then
+				bg_ldflags="$bg_ldflags -Wl,-rpath -Wl,$bg_dir/extlib/lib -L$bg_dir/extlib/lib -l$loglibname"
+			else
+				bg_ldflags="$bg_ldflags -L$bg_dir/extlib/lib -l$loglibname"
+			fi
 		fi
 
       		# Search for headers in the directory
-      		if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then
+      		if test -z "$have_bg_hdr" -a -f "$bg_dir/hlcs/include/bgsched/bgsched.h" ; then
 			have_bgq_hdr=yes
-			bg_includes="-I$bg_dir/include"
+			bg_includes="-I$bg_dir/hlcs/include"
       		fi
+     		if test -z "$have_bg_hdr" -a -f "$bg_dir/extlib/include/log4cxx/logger.h" ; then
+			have_bgq_hdr=yes
+			bg_includes="$bg_includes -I$bg_dir/extlib/include"
+    		fi
    	done
 
    	if test ! -z "$have_bgq_ar" -a ! -z "$have_bgq_hdr" ; then
       		# ac_with_readline="no"
 		# Test to make sure the api is good
 		saved_LDFLAGS="$LDFLAGS"
-      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64"
+      	 	LDFLAGS="$saved_LDFLAGS $bg_ldflags -m64 $bg_includes"
+		ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
 		cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
- int rm_set_serial(char *);
+#include <bgsched/bgsched.h>
+#include <log4cxx/logger.h>
 int
 main ()
 {
- rm_set_serial("");
+ bgsched::init("");
+ log4cxx::LoggerPtr logger_ptr(log4cxx::Logger::getLogger( "ibm" ));
   ;
   return 0;
 }
 _ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
+if ac_fn_cxx_try_link "$LINENO"; then :
   have_bgq_files=yes
 else
-  as_fn_error $? "There is a problem linking to the BG/P api." "$LINENO" 5
+  as_fn_error $? "There is a problem linking to the BG/Q api." "$LINENO" 5
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
+		ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
 		LDFLAGS="$saved_LDFLAGS"
    	fi
 
   	if test ! -z "$have_bgq_files" ; then
+      		BG_LDFLAGS="$bg_ldflags"
       		BG_INCLUDES="$bg_includes"
 		CFLAGS="$CFLAGS -m64"
    		CXXFLAGS="$CXXFLAGS $CFLAGS"
 
-$as_echo "#define HAVE_3D 1" >>confdefs.h
+$as_echo "#define HAVE_4D 1" >>confdefs.h
 
 
-$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
+$as_echo "#define SYSTEM_DIMENSIONS 4" >>confdefs.h
 
 
 $as_echo "#define HAVE_BG 1" >>confdefs.h
@@ -4896,32 +5403,32 @@ $as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
 
 $as_echo "#define HAVE_BG_FILES 1" >>confdefs.h
 
+		#AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$soloc", [Define the BG_BRIDGE_SO value])
 
-cat >>confdefs.h <<_ACEOF
-#define BG_BRIDGE_SO "$soloc"
-_ACEOF
+    		{ $as_echo "$as_me:${as_lineno-$LINENO}: Running on a legitimate BG/Q system" >&5
+$as_echo "$as_me: Running on a legitimate BG/Q system" >&6;}
+		# AC_MSG_CHECKING(for BG serial value)
+		# bg_serial="BGQ"
+    		# AC_ARG_WITH(bg-serial,, [bg_serial="$withval"])
+     		# AC_MSG_RESULT($bg_serial)
+     		# AC_DEFINE_UNQUOTED(BG_SERIAL, "$bg_serial", [Define the BG_SERIAL value])
+ 		#define ac_bluegene_loaded so we don't load another bluegene conf
+		ac_bluegene_loaded=yes
+		ac_real_bluegene_loaded=yes
+		ac_bgq_loaded=yes
+	fi
 
 
-		{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BG serial value" >&5
-$as_echo_n "checking for BG serial value... " >&6; }
-		bg_serial="BGQ"
 
-# Check whether --with-bg-serial was given.
-if test "${with_bg_serial+set}" = set; then :
-  withval=$with_bg_serial; bg_serial="$withval"
-fi
-
-     		{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $bg_serial" >&5
-$as_echo "$bg_serial" >&6; }
 
-cat >>confdefs.h <<_ACEOF
-#define BG_SERIAL "$bg_serial"
-_ACEOF
-
- 		#define ac_bluegene_loaded so we don't load another bluegene conf
-		ac_bluegene_loaded=yes
-   	fi
 
+ if test "x$ac_bgq_loaded" = "xyes"; then
+  BGQ_LOADED_TRUE=
+  BGQ_LOADED_FALSE='#'
+else
+  BGQ_LOADED_TRUE='#'
+  BGQ_LOADED_FALSE=
+fi
 
 
 
@@ -4935,6 +5442,7 @@ fi
 
 
 
+
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
 ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -5638,11 +6146,26 @@ $as_echo "#define USE_ALIAS 1" >>confdefs.h
  ;;
 esac
 
+ac_have_cygwin=no
 case "$host" in
+     	*cygwin)   LDFLAGS="$LDFLAGS -no-undefined"
+		   SO_LDFLAGS="$SO_LDFLAGS \$(top_builddir)/src/api/libslurmhelper.la"
+
+		   ac_have_cygwin=yes
+		   ;;
 	*solaris*) CC="/usr/sfw/bin/gcc"
 		   CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS -I/usr/sfw/include"
 		   LDFLAGS="$LDFLAGS -L/usr/sfw/lib"
+		   ;;
 esac
+ if test x"$ac_have_cygwin" == x"yes"; then
+  WITH_CYGWIN_TRUE=
+  WITH_CYGWIN_FALSE='#'
+else
+  WITH_CYGWIN_TRUE='#'
+  WITH_CYGWIN_FALSE=
+fi
+
 
 ac_ext=c
 ac_cpp='$CPP $CPPFLAGS'
@@ -7205,13 +7728,13 @@ if test "${lt_cv_nm_interface+set}" = set; then :
 else
   lt_cv_nm_interface="BSD nm"
   echo "int some_variable = 0;" > conftest.$ac_ext
-  (eval echo "\"\$as_me:7208: $ac_compile\"" >&5)
+  (eval echo "\"\$as_me:7731: $ac_compile\"" >&5)
   (eval "$ac_compile" 2>conftest.err)
   cat conftest.err >&5
-  (eval echo "\"\$as_me:7211: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+  (eval echo "\"\$as_me:7734: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
   (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
   cat conftest.err >&5
-  (eval echo "\"\$as_me:7214: output\"" >&5)
+  (eval echo "\"\$as_me:7737: output\"" >&5)
   cat conftest.out >&5
   if $GREP 'External.*some_variable' conftest.out > /dev/null; then
     lt_cv_nm_interface="MS dumpbin"
@@ -8416,7 +8939,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 8419 "configure"' > conftest.$ac_ext
+  echo '#line 8942 "configure"' > conftest.$ac_ext
   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -9704,7 +10227,6 @@ fi
 
 
 
-
 # Set options
 
 
@@ -10205,11 +10727,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:10208: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:10730: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:10212: \$? = $ac_status" >&5
+   echo "$as_me:10734: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -10544,11 +11066,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:10547: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:11069: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:10551: \$? = $ac_status" >&5
+   echo "$as_me:11073: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -10649,11 +11171,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:10652: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:11174: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:10656: \$? = $ac_status" >&5
+   echo "$as_me:11178: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -10704,11 +11226,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:10707: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:11229: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:10711: \$? = $ac_status" >&5
+   echo "$as_me:11233: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -13088,7 +13610,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<_LT_EOF
-#line 13091 "configure"
+#line 13613 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -13184,7 +13706,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<_LT_EOF
-#line 13187 "configure"
+#line 13709 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -15140,11 +15662,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15143: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15665: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:15147: \$? = $ac_status" >&5
+   echo "$as_me:15669: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -15239,11 +15761,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15242: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15764: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:15246: \$? = $ac_status" >&5
+   echo "$as_me:15768: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -15291,11 +15813,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15294: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15816: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:15298: \$? = $ac_status" >&5
+   echo "$as_me:15820: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -16378,6 +16900,14 @@ $as_echo "no" >&6; }
 	fi
 fi
 
+ if test -n "$ac_ct_CXX"; then
+  WITH_CXX_TRUE=
+  WITH_CXX_FALSE='#'
+else
+  WITH_CXX_TRUE='#'
+  WITH_CXX_FALSE=
+fi
+
  if test "$with_gnu_ld" = "yes"; then
   WITH_GNU_LD_TRUE=
   WITH_GNU_LD_FALSE='#'
@@ -18110,90 +18640,31 @@ fi
 
 
 
-# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
-if test x"$acx_pthread_ok" = xyes; then
-
-$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
-
-        :
-else
-        acx_pthread_ok=no
-        as_fn_error $? "Error: Cannot figure out how to use pthreads!" "$LINENO" 5
-fi
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-# Always define WITH_PTHREADS if we make it this far
-
-$as_echo "#define WITH_PTHREADS 1" >>confdefs.h
-
-LDFLAGS="$LDFLAGS "
-CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
-LIBS="$PTHREAD_LIBS $LIBS"
-
-
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Cray XT" >&5
-$as_echo_n "checking for Cray XT... " >&6; }
-  # Check whether --enable-cray-xt was given.
-if test "${enable_cray_xt+set}" = set; then :
-  enableval=$enable_cray_xt;  case "$enableval" in
-        yes) x_ac_cray_xt=yes ;;
-         no) x_ac_cray_xt=no ;;
-          *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
-$as_echo "doh!" >&6; }
-             as_fn_error $? "bad value \"$enableval\" for --enable-cray-xt" "$LINENO" 5  ;;
-      esac
-
-else
-  x_ac_cray_xt=no
-
-fi
-
-
-  if test "$x_ac_cray_xt" = yes; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-
-$as_echo "#define HAVE_3D 1" >>confdefs.h
-
-
-$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
-
-
-$as_echo "#define HAVE_CRAY 1" >>confdefs.h
-
-
-$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
-
-  else
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-  fi
+# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
+if test x"$acx_pthread_ok" = xyes; then
 
+$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h
 
-# Check whether --with-apbasil was given.
-if test "${with_apbasil+set}" = set; then :
-  withval=$with_apbasil;  try_apbasil=$withval
+        :
+else
+        acx_pthread_ok=no
+        as_fn_error $? "Error: Cannot figure out how to use pthreads!" "$LINENO" 5
 fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
-  apbasil_default_locs="/usr/bin/apbasil"
-  for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
-    if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
-      have_apbasil=$apbasil_loc
-    fi
-  done
-  if test ! -z "$have_apbasil" ; then
 
-cat >>confdefs.h <<_ACEOF
-#define APBASIL_LOC "$have_apbasil"
-_ACEOF
 
-  fi
+# Always define WITH_PTHREADS if we make it this far
+
+$as_echo "#define WITH_PTHREADS 1" >>confdefs.h
+
+LDFLAGS="$LDFLAGS "
+CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
+LIBS="$PTHREAD_LIBS $LIBS"
 
 
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Sun Constellation system" >&5
@@ -19275,6 +19746,230 @@ fi
 
 
 
+  ac_have_cray="no"
+  ac_have_real_cray="no"
+  ac_have_alps_emulation="no"
+  ac_have_cray_emulation="no"
+
+
+# Check whether --with-alps-emulation was given.
+if test "${with_alps_emulation+set}" = set; then :
+  withval=$with_alps_emulation; test "$withval" = no || ac_have_alps_emulation=yes
+else
+  ac_have_alps_emulation=no
+fi
+
+
+  # Check whether --enable-cray-emulation was given.
+if test "${enable_cray_emulation+set}" = set; then :
+  enableval=$enable_cray_emulation;  case "$enableval" in
+        yes) ac_have_cray_emulation="yes" ;;
+         no) ac_have_cray_emulation="no"  ;;
+          *) as_fn_error $? "bad value \"$enableval\" for --enable-cray-emulation" "$LINENO" 5   ;;
+      esac
+
+fi
+
+
+  if test "$ac_have_alps_emulation" = "yes"; then
+    ac_have_cray="yes"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: Running A Cray system against an Alps emulation" >&5
+$as_echo "$as_me: Running A Cray system against an Alps emulation" >&6;}
+
+$as_echo "#define HAVE_ALPS_EMULATION 1" >>confdefs.h
+
+  elif test "$ac_have_cray_emulation" = "yes"; then
+    ac_have_cray="yes"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: Running in Cray emulation mode" >&5
+$as_echo "$as_me: Running in Cray emulation mode" >&6;}
+
+$as_echo "#define HAVE_CRAY_EMULATION 1" >>confdefs.h
+
+  else
+    # Check for a Cray-specific file:
+    #  * older XT systems use an /etc/xtrelease file
+    #  * newer XT/XE systems use an /etc/opt/cray/release/xtrelease file
+    #  * both have an /etc/xthostname
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether this is a native Cray XT or XE system or have ALPS simulator" >&5
+$as_echo_n "checking whether this is a native Cray XT or XE system or have ALPS simulator... " >&6; }
+
+    if test -f /etc/xtrelease || test -d /etc/opt/cray/release; then
+      ac_have_cray="yes"
+      ac_have_real_cray="yes"
+
+$as_echo "#define HAVE_REAL_CRAY 1" >>confdefs.h
+
+    fi
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_have_cray" >&5
+$as_echo "$ac_have_cray" >&6; }
+  fi
+
+  if test "$ac_have_cray" = "yes"; then
+    # libexpat is always required for the XML-RPC interface
+    ac_fn_c_check_header_mongrel "$LINENO" "expat.h" "ac_cv_header_expat_h" "$ac_includes_default"
+if test "x$ac_cv_header_expat_h" = x""yes; then :
+
+else
+  as_fn_error $? "Cray BASIL requires expat headers/rpm" "$LINENO" 5
+fi
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for XML_ParserCreate in -lexpat" >&5
+$as_echo_n "checking for XML_ParserCreate in -lexpat... " >&6; }
+if test "${ac_cv_lib_expat_XML_ParserCreate+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-lexpat  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char XML_ParserCreate ();
+int
+main ()
+{
+return XML_ParserCreate ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_expat_XML_ParserCreate=yes
+else
+  ac_cv_lib_expat_XML_ParserCreate=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_expat_XML_ParserCreate" >&5
+$as_echo "$ac_cv_lib_expat_XML_ParserCreate" >&6; }
+if test "x$ac_cv_lib_expat_XML_ParserCreate" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBEXPAT 1
+_ACEOF
+
+  LIBS="-lexpat $LIBS"
+
+else
+  as_fn_error $? "Cray BASIL requires libexpat.so (i.e. libexpat1-dev)" "$LINENO" 5
+fi
+
+
+    if test "$ac_have_real_cray" = "yes"; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: checking for job_getjid in -ljob" >&5
+$as_echo_n "checking for job_getjid in -ljob... " >&6; }
+if test "${ac_cv_lib_job_job_getjid+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ljob  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char job_getjid ();
+int
+main ()
+{
+return job_getjid ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_job_job_getjid=yes
+else
+  ac_cv_lib_job_job_getjid=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_job_job_getjid" >&5
+$as_echo "$ac_cv_lib_job_job_getjid" >&6; }
+if test "x$ac_cv_lib_job_job_getjid" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBJOB 1
+_ACEOF
+
+  LIBS="-ljob $LIBS"
+
+else
+  as_fn_error $? "Need cray-job (usually in /opt/cray/job/default)" "$LINENO" 5
+fi
+
+    fi
+
+    if test -z "$MYSQL_CFLAGS" || test -z "$MYSQL_LIBS"; then
+      as_fn_error $? "Cray BASIL requires the cray-MySQL-devel-enterprise rpm" "$LINENO" 5
+    fi
+
+
+$as_echo "#define HAVE_3D 1" >>confdefs.h
+
+
+$as_echo "#define SYSTEM_DIMENSIONS 3" >>confdefs.h
+
+
+$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
+
+
+$as_echo "#define HAVE_CRAY 1" >>confdefs.h
+
+
+$as_echo "#define SALLOC_KILL_CMD 1" >>confdefs.h
+
+
+$as_echo "#define SALLOC_RUN_FOREGROUND 1" >>confdefs.h
+
+  fi
+   if test "$ac_have_cray" = "yes"; then
+  HAVE_CRAY_TRUE=
+  HAVE_CRAY_FALSE='#'
+else
+  HAVE_CRAY_TRUE='#'
+  HAVE_CRAY_FALSE=
+fi
+
+   if test "$ac_have_real_cray" = "yes"; then
+  HAVE_REAL_CRAY_TRUE=
+  HAVE_REAL_CRAY_FALSE='#'
+else
+  HAVE_REAL_CRAY_TRUE='#'
+  HAVE_REAL_CRAY_FALSE=
+fi
+
+   if test "$ac_have_alps_emulation" = "yes"; then
+  HAVE_ALPS_EMULATION_TRUE=
+  HAVE_ALPS_EMULATION_FALSE='#'
+else
+  HAVE_ALPS_EMULATION_TRUE='#'
+  HAVE_ALPS_EMULATION_FALSE=
+fi
+
+   if test "$ac_have_cray_emulation" = "yes"; then
+  HAVE_CRAY_EMULATION_TRUE=
+  HAVE_CRAY_EMULATION_FALSE='#'
+else
+  HAVE_CRAY_EMULATION_TRUE='#'
+  HAVE_CRAY_EMULATION_FALSE=
+fi
+
+
+
+
 
 
 
@@ -19353,6 +20048,7 @@ fi
 
   if test "$x_ac_debug" = yes; then
     test "$GCC" = yes && CFLAGS="$CFLAGS -Wall -fno-strict-aliasing"
+    test "$GXX" = yes && CXXFLAGS="$CXXFLAGS -Wall -fno-strict-aliasing"
   else
 
 $as_echo "#define NDEBUG 1" >>confdefs.h
@@ -19430,6 +20126,31 @@ $as_echo "#define DEBUGGER_PARTIAL_ATTACH 1" >>confdefs.h
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${x_ac_partial_attach=no}" >&5
 $as_echo "${x_ac_partial_attach=no}" >&6; }
 
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether salloc should kill child processes at job termination" >&5
+$as_echo_n "checking whether salloc should kill child processes at job termination... " >&6; }
+  # Check whether --enable-salloc-kill-cmd was given.
+if test "${enable_salloc_kill_cmd+set}" = set; then :
+  enableval=$enable_salloc_kill_cmd;  case "$enableval" in
+        yes) x_ac_salloc_kill_cmd=yes ;;
+         no) x_ac_salloc_kill_cmd=no ;;
+          *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+             as_fn_error $? "bad value \"$enableval\" for --enable-salloc-kill-cmd" "$LINENO" 5  ;;
+      esac
+
+
+fi
+
+  if test "$x_ac_salloc_kill_cmd" = yes; then
+
+$as_echo "#define SALLOC_KILL_CMD 1" >>confdefs.h
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+  else
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+  fi
 
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to disable salloc execution in the background" >&5
 $as_echo_n "checking whether to disable salloc execution in the background... " >&6; }
@@ -19976,9 +20697,9 @@ fi
 
 	if test "x$x_ac_have_lua" = "xyes"; then
 	  saved_CFLAGS="$CFLAGS"
-	  saved_LDFLAGS="$LDFLAGS"
+	  saved_LIBS="$LIBS"
 	  CFLAGS="$CFLAGS $lua_CFLAGS"
-	  LDFLAGS="$LDFLAGS $lua_LIBS"
+	  LIBS="$LIBS $lua_LIBS"
 	  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for whether we can link to liblua" >&5
 $as_echo_n "checking for whether we can link to liblua... " >&6; }
 	  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -20007,7 +20728,7 @@ rm -f core conftest.err conftest.$ac_objext \
 	  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $x_ac_have_lua" >&5
 $as_echo "$x_ac_have_lua" >&6; }
 	  CFLAGS="$saved_CFLAGS"
-	  LDFLAGS="$saved_LDFLAGS"
+	  LIBS="$saved_LIBS"
 	fi
 
 	 if test "x$x_ac_have_lua" = "xyes"; then
@@ -20021,6 +20742,73 @@ fi
 
 
 
+   { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether man2html is available" >&5
+$as_echo_n "checking whether man2html is available... " >&6; }
+   # Extract the first word of "man2html", so it can be a program name with args.
+set dummy man2html; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_have_man2html+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_have_man2html"; then
+  ac_cv_prog_ac_have_man2html="$ac_have_man2html" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_dummy="$bindir:/usr/bin:/usr/local/bin"
+for as_dir in $as_dummy
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_ac_have_man2html="yes"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_prog_ac_have_man2html" && ac_cv_prog_ac_have_man2html="no"
+fi
+fi
+ac_have_man2html=$ac_cv_prog_ac_have_man2html
+if test -n "$ac_have_man2html"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_have_man2html" >&5
+$as_echo "$ac_have_man2html" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+    if test "x$ac_have_man2html" == "xyes"; then
+  HAVE_MAN2HTML_TRUE=
+  HAVE_MAN2HTML_FALSE='#'
+else
+  HAVE_MAN2HTML_TRUE='#'
+  HAVE_MAN2HTML_FALSE=
+fi
+
+
+   if test "x$ac_have_man2html" != "xyes" ; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Unable to build man page html files without man2html" >&5
+$as_echo "$as_me: Unable to build man page html files without man2html" >&6;}
+   fi
+
+ if test "x$ac_have_man2html" = "xyes"; then
+  HAVE_MAN2HTML_TRUE=
+  HAVE_MAN2HTML_FALSE='#'
+else
+  HAVE_MAN2HTML_TRUE='#'
+  HAVE_MAN2HTML_FALSE=
+fi
+
+
+
+
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking for support of printf(\"%s\", NULL)" >&5
 $as_echo_n "checking for support of printf(\"%s\", NULL)... " >&6; }
   if test "$cross_compiling" = yes; then :
@@ -20374,7 +21162,11 @@ $as_echo "$as_me: WARNING: unable to locate munge installation" >&2;}
   else
     MUNGE_LIBS="-lmunge"
     MUNGE_CPPFLAGS="-I$x_ac_cv_munge_dir/include"
-    MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+    if test "$ac_with_rpath" = "yes"; then
+      MUNGE_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_munge_dir/$bit -L$x_ac_cv_munge_dir/$bit"
+    else
+      MUNGE_LDFLAGS="-L$x_ac_cv_munge_dir/$bit"
+    fi
   fi
 
 
@@ -20619,8 +21411,38 @@ fi
 
 
 
+  ac_with_srun2aprun="no"
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for whether to include srun-aprun wrapper rather than native SLURM srun" >&5
+$as_echo_n "checking for whether to include srun-aprun wrapper rather than native SLURM srun... " >&6; }
+
+# Check whether --with-srun2aprun was given.
+if test "${with_srun2aprun+set}" = set; then :
+  withval=$with_srun2aprun;  case "$withval" in
+        yes) ac_with_srun2aprun=yes ;;
+        no)  ac_with_srun2aprun=no ;;
+        *)   { $as_echo "$as_me:${as_lineno-$LINENO}: result: doh!" >&5
+$as_echo "doh!" >&6; }
+             as_fn_error $? "bad value \"$withval\" for --with-srun2aprun" "$LINENO" 5  ;;
+      esac
+
+
+fi
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_with_srun2aprun" >&5
+$as_echo "$ac_with_srun2aprun" >&6; }
+   if test "x$ac_with_srun2aprun" = "xyes"; then
+  BUILD_SRUN2APRUN_TRUE=
+  BUILD_SRUN2APRUN_FALSE='#'
+else
+  BUILD_SRUN2APRUN_TRUE='#'
+  BUILD_SRUN2APRUN_FALSE=
+fi
+
+
+
 
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/pam/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/sshare/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/sprio/Makefile src/srun/Makefile src/srun_cr/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/preempt/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/lua/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bgq/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/linear/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/arrayrun/Makefile contribs/cray/Makefile contribs/lua/Makefile contribs/pam/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/sshare/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/sprio/Makefile src/srun/Makefile src/srun_cr/Makefile src/slurmd/Makefile src/slurmd/common/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/preempt/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/lua/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/ba/Makefile src/plugins/select/bluegene/ba_bgq/Makefile src/plugins/select/bluegene/bl/Makefile src/plugins/select/bluegene/bl_bgq/Makefile src/plugins/select/bluegene/sfree/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/cray/libalps/Makefile src/plugins/select/cray/libemulate/Makefile src/plugins/select/linear/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/cgroup/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
 
 
 cat >confcache <<\_ACEOF
@@ -20745,6 +21567,22 @@ if test -z "${BGL_LOADED_TRUE}" && test -z "${BGL_LOADED_FALSE}"; then
   as_fn_error $? "conditional \"BGL_LOADED\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${BG_L_P_LOADED_TRUE}" && test -z "${BG_L_P_LOADED_FALSE}"; then
+  as_fn_error $? "conditional \"BG_L_P_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${REAL_BG_L_P_LOADED_TRUE}" && test -z "${REAL_BG_L_P_LOADED_FALSE}"; then
+  as_fn_error $? "conditional \"REAL_BG_L_P_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${BGQ_LOADED_TRUE}" && test -z "${BGQ_LOADED_FALSE}"; then
+  as_fn_error $? "conditional \"BGQ_LOADED\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${BLUEGENE_LOADED_TRUE}" && test -z "${BLUEGENE_LOADED_FALSE}"; then
   as_fn_error $? "conditional \"BLUEGENE_LOADED\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20757,6 +21595,10 @@ if test -z "${HAVE_AIX_PROCTRACK_TRUE}" && test -z "${HAVE_AIX_PROCTRACK_FALSE}"
   as_fn_error $? "conditional \"HAVE_AIX_PROCTRACK\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${WITH_CYGWIN_TRUE}" && test -z "${WITH_CYGWIN_FALSE}"; then
+  as_fn_error $? "conditional \"WITH_CYGWIN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
   as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20769,6 +21611,10 @@ if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
   as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${WITH_CXX_TRUE}" && test -z "${WITH_CXX_FALSE}"; then
+  as_fn_error $? "conditional \"WITH_CXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${WITH_GNU_LD_TRUE}" && test -z "${WITH_GNU_LD_FALSE}"; then
   as_fn_error $? "conditional \"WITH_GNU_LD\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20806,6 +21652,22 @@ if test -z "${WITH_PGSQL_TRUE}" && test -z "${WITH_PGSQL_FALSE}"; then
   as_fn_error $? "conditional \"WITH_PGSQL\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${HAVE_CRAY_TRUE}" && test -z "${HAVE_CRAY_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_CRAY\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_REAL_CRAY_TRUE}" && test -z "${HAVE_REAL_CRAY_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_REAL_CRAY\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_ALPS_EMULATION_TRUE}" && test -z "${HAVE_ALPS_EMULATION_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_ALPS_EMULATION\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_CRAY_EMULATION_TRUE}" && test -z "${HAVE_CRAY_EMULATION_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_CRAY_EMULATION\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${DEBUG_MODULES_TRUE}" && test -z "${DEBUG_MODULES_FALSE}"; then
   as_fn_error $? "conditional \"DEBUG_MODULES\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20826,6 +21688,14 @@ if test -z "${HAVE_LUA_TRUE}" && test -z "${HAVE_LUA_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_LUA\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${HAVE_MAN2HTML_TRUE}" && test -z "${HAVE_MAN2HTML_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_MAN2HTML\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_MAN2HTML_TRUE}" && test -z "${HAVE_MAN2HTML_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_MAN2HTML\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${HAVE_OPENSSL_TRUE}" && test -z "${HAVE_OPENSSL_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_OPENSSL\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -20842,6 +21712,10 @@ if test -z "${WITH_BLCR_TRUE}" && test -z "${WITH_BLCR_FALSE}"; then
   as_fn_error $? "conditional \"WITH_BLCR\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${BUILD_SRUN2APRUN_TRUE}" && test -z "${BUILD_SRUN2APRUN_FALSE}"; then
+  as_fn_error $? "conditional \"BUILD_SRUN2APRUN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 
 : ${CONFIG_STATUS=./config.status}
 ac_write_fail=0
@@ -21804,6 +22678,9 @@ do
     "config.xml") CONFIG_FILES="$CONFIG_FILES config.xml" ;;
     "auxdir/Makefile") CONFIG_FILES="$CONFIG_FILES auxdir/Makefile" ;;
     "contribs/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/Makefile" ;;
+    "contribs/arrayrun/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/arrayrun/Makefile" ;;
+    "contribs/cray/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/cray/Makefile" ;;
+    "contribs/lua/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/lua/Makefile" ;;
     "contribs/pam/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/pam/Makefile" ;;
     "contribs/perlapi/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/Makefile" ;;
     "contribs/perlapi/libslurm/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/libslurm/Makefile" ;;
@@ -21832,6 +22709,7 @@ do
     "src/srun/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun/Makefile" ;;
     "src/srun_cr/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun_cr/Makefile" ;;
     "src/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/Makefile" ;;
+    "src/slurmd/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/common/Makefile" ;;
     "src/slurmd/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmd/Makefile" ;;
     "src/slurmd/slurmstepd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmstepd/Makefile" ;;
     "src/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmdbd/Makefile" ;;
@@ -21860,7 +22738,6 @@ do
     "src/plugins/checkpoint/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/aix/Makefile" ;;
     "src/plugins/checkpoint/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/none/Makefile" ;;
     "src/plugins/checkpoint/ompi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/ompi/Makefile" ;;
-    "src/plugins/checkpoint/xlch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/xlch/Makefile" ;;
     "src/plugins/checkpoint/blcr/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/Makefile" ;;
     "src/plugins/checkpoint/blcr/cr_checkpoint.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_checkpoint.sh" ;;
     "src/plugins/checkpoint/blcr/cr_restart.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_restart.sh" ;;
@@ -21895,11 +22772,11 @@ do
     "src/plugins/priority/multifactor/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/priority/multifactor/Makefile" ;;
     "src/plugins/proctrack/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/Makefile" ;;
     "src/plugins/proctrack/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/aix/Makefile" ;;
+    "src/plugins/proctrack/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/cgroup/Makefile" ;;
     "src/plugins/proctrack/pgid/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/pgid/Makefile" ;;
     "src/plugins/proctrack/linuxproc/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/linuxproc/Makefile" ;;
     "src/plugins/proctrack/rms/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/rms/Makefile" ;;
     "src/plugins/proctrack/sgi_job/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/sgi_job/Makefile" ;;
-    "src/plugins/proctrack/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/cgroup/Makefile" ;;
     "src/plugins/proctrack/lua/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/lua/Makefile" ;;
     "src/plugins/sched/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/Makefile" ;;
     "src/plugins/sched/backfill/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/backfill/Makefile" ;;
@@ -21908,12 +22785,16 @@ do
     "src/plugins/sched/wiki/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/wiki/Makefile" ;;
     "src/plugins/sched/wiki2/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/wiki2/Makefile" ;;
     "src/plugins/select/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/Makefile" ;;
-    "src/plugins/select/bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bgq/Makefile" ;;
     "src/plugins/select/bluegene/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/Makefile" ;;
-    "src/plugins/select/bluegene/block_allocator/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/block_allocator/Makefile" ;;
-    "src/plugins/select/bluegene/plugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/plugin/Makefile" ;;
+    "src/plugins/select/bluegene/ba/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/ba/Makefile" ;;
+    "src/plugins/select/bluegene/ba_bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/ba_bgq/Makefile" ;;
+    "src/plugins/select/bluegene/bl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/bl/Makefile" ;;
+    "src/plugins/select/bluegene/bl_bgq/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/bl_bgq/Makefile" ;;
+    "src/plugins/select/bluegene/sfree/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/sfree/Makefile" ;;
     "src/plugins/select/cons_res/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cons_res/Makefile" ;;
     "src/plugins/select/cray/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/Makefile" ;;
+    "src/plugins/select/cray/libalps/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/libalps/Makefile" ;;
+    "src/plugins/select/cray/libemulate/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cray/libemulate/Makefile" ;;
     "src/plugins/select/linear/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/linear/Makefile" ;;
     "src/plugins/switch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/Makefile" ;;
     "src/plugins/switch/elan/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/elan/Makefile" ;;
@@ -21930,6 +22811,7 @@ do
     "src/plugins/mpi/openmpi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/mpi/openmpi/Makefile" ;;
     "src/plugins/task/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/Makefile" ;;
     "src/plugins/task/affinity/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/affinity/Makefile" ;;
+    "src/plugins/task/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/cgroup/Makefile" ;;
     "src/plugins/task/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/none/Makefile" ;;
     "src/plugins/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/Makefile" ;;
     "src/plugins/topology/3d_torus/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/3d_torus/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index ae7817dae..18352d60a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -25,6 +25,7 @@ AC_CONFIG_HEADERS([config.h])
 AC_CONFIG_HEADERS([slurm/slurm.h])
 
 dnl This needs to be close to the front to set CFLAGS=-m64
+X_AC_RPATH
 X_AC_BGL
 
 dnl we need to know if this is a bgl in the Makefile.am to do
@@ -33,12 +34,28 @@ AM_CONDITIONAL(BGL_LOADED, test "x$ac_bluegene_loaded" = "xyes")
 AC_SUBST(BGL_LOADED)
 
 X_AC_BGP
+
+dnl ok now check if We have an L or P system, Q is handled differently
+dnl so handle it later.
+AM_CONDITIONAL(BG_L_P_LOADED, test "x$ac_bluegene_loaded" = "xyes")
+AC_SUBST(BG_L_P_LOADED)
+
+dnl ok now check if We are on a real L or P system, (test if to build srun
+dnl or not.  If we are emulating things we should build it.
+AM_CONDITIONAL(REAL_BG_L_P_LOADED, test "x$ac_real_bluegene_loaded" = "xyes")
+AC_SUBST(REAL_BG_L_P_LOADED)
+
 X_AC_BGQ
 
-dnl ok now check if bluegene was loaded at all
+dnl We need to know if this is a Q system
+AM_CONDITIONAL(BGQ_LOADED, test "x$ac_bgq_loaded" = "xyes")
+AC_SUBST(BGQ_LOADED)
+
+dnl ok now check if any bluegene was loaded.
 AM_CONDITIONAL(BLUEGENE_LOADED, test "x$ac_bluegene_loaded" = "xyes")
 AC_SUBST(BLUEGENE_LOADED)
 
+
 X_AC_AIX
 
 dnl
@@ -54,14 +71,22 @@ case "$host" in
 			[Define slurm_ prefix function aliases for plugins]) ;;
 esac
 
+ac_have_cygwin=no
 dnl
-dnl add some flags for Solaris
+dnl add some flags for Solaris and cygwin
 dnl
 case "$host" in
+     	*cygwin)   LDFLAGS="$LDFLAGS -no-undefined"
+		   SO_LDFLAGS="$SO_LDFLAGS \$(top_builddir)/src/api/libslurmhelper.la"
+		   AC_SUBST(SO_LDFLAGS)
+		   ac_have_cygwin=yes
+		   ;;
 	*solaris*) CC="/usr/sfw/bin/gcc"
 		   CFLAGS="$CFLAGS -D_POSIX_PTHREAD_SEMANTICS -I/usr/sfw/include"
 		   LDFLAGS="$LDFLAGS -L/usr/sfw/lib"
+		   ;;
 esac
+AM_CONDITIONAL(WITH_CYGWIN, test x"$ac_have_cygwin" == x"yes")
 
 dnl Checks for programs.
 dnl
@@ -71,6 +96,7 @@ AC_PROG_MAKE_SET
 AC_PROG_LIBTOOL
 PKG_PROG_PKG_CONFIG([0.9.0])
 
+AM_CONDITIONAL(WITH_CXX, test -n "$ac_ct_CXX")
 AM_CONDITIONAL(WITH_GNU_LD, test "$with_gnu_ld" = "yes")
 
 
@@ -172,7 +198,6 @@ LDFLAGS="$LDFLAGS "
 CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 LIBS="$PTHREAD_LIBS $LIBS"
 
-X_AC_CRAY
 X_AC_SUN_CONST
 X_AC_DIMENSIONS
 
@@ -216,6 +241,9 @@ fi
 
 X_AC_DATABASES
 
+dnl Cray ALPS/Basil support depends on mySQL
+X_AC_CRAY
+
 dnl checks for system services.
 dnl
 
@@ -268,6 +296,12 @@ dnl check for lua library
 dnl
 X_AC_LUA
 
+dnl check for presence of the man2html command
+dnl
+X_AC_MAN2HTML
+AM_CONDITIONAL(HAVE_MAN2HTML, test "x$ac_have_man2html" = "xyes")
+AC_SUBST(HAVE_MAN2HTML)
+
 dnl check if we can use standard printf functions
 dnl
 X_AC_PRINTF_NULL
@@ -335,6 +369,10 @@ dnl Check for compilation of SLURM with BLCR support:
 dnl
 X_AC_BLCR
 
+dnl
+dnl Check to build native SLURM srun command or an aprun (Cray ALPS) wrapper.
+dnl
+X_AC_SRUN2APRUN
 
 dnl All slurm Makefiles:
 
@@ -342,6 +380,9 @@ AC_CONFIG_FILES([Makefile
 		 config.xml
 		 auxdir/Makefile
 		 contribs/Makefile
+		 contribs/arrayrun/Makefile
+		 contribs/cray/Makefile
+		 contribs/lua/Makefile
 		 contribs/pam/Makefile
 		 contribs/perlapi/Makefile
 		 contribs/perlapi/libslurm/Makefile
@@ -370,6 +411,7 @@ AC_CONFIG_FILES([Makefile
 		 src/srun/Makefile
 		 src/srun_cr/Makefile
 		 src/slurmd/Makefile
+		 src/slurmd/common/Makefile
 		 src/slurmd/slurmd/Makefile
 		 src/slurmd/slurmstepd/Makefile
 		 src/slurmdbd/Makefile
@@ -398,7 +440,6 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/checkpoint/aix/Makefile
 		 src/plugins/checkpoint/none/Makefile
 		 src/plugins/checkpoint/ompi/Makefile
-		 src/plugins/checkpoint/xlch/Makefile
 		 src/plugins/checkpoint/blcr/Makefile
 		 src/plugins/checkpoint/blcr/cr_checkpoint.sh
 		 src/plugins/checkpoint/blcr/cr_restart.sh
@@ -433,11 +474,11 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/priority/multifactor/Makefile
 		 src/plugins/proctrack/Makefile
 		 src/plugins/proctrack/aix/Makefile
+		 src/plugins/proctrack/cgroup/Makefile
 		 src/plugins/proctrack/pgid/Makefile
 		 src/plugins/proctrack/linuxproc/Makefile
 		 src/plugins/proctrack/rms/Makefile
 		 src/plugins/proctrack/sgi_job/Makefile
-		 src/plugins/proctrack/cgroup/Makefile
 		 src/plugins/proctrack/lua/Makefile
 		 src/plugins/sched/Makefile
 		 src/plugins/sched/backfill/Makefile
@@ -446,12 +487,16 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/sched/wiki/Makefile
 		 src/plugins/sched/wiki2/Makefile
 		 src/plugins/select/Makefile
-		 src/plugins/select/bgq/Makefile
 		 src/plugins/select/bluegene/Makefile
-		 src/plugins/select/bluegene/block_allocator/Makefile
-		 src/plugins/select/bluegene/plugin/Makefile
+		 src/plugins/select/bluegene/ba/Makefile
+		 src/plugins/select/bluegene/ba_bgq/Makefile
+		 src/plugins/select/bluegene/bl/Makefile
+		 src/plugins/select/bluegene/bl_bgq/Makefile
+		 src/plugins/select/bluegene/sfree/Makefile
 		 src/plugins/select/cons_res/Makefile
 		 src/plugins/select/cray/Makefile
+		 src/plugins/select/cray/libalps/Makefile
+		 src/plugins/select/cray/libemulate/Makefile
 		 src/plugins/select/linear/Makefile
 		 src/plugins/switch/Makefile
 		 src/plugins/switch/elan/Makefile
@@ -468,6 +513,7 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/mpi/openmpi/Makefile
 		 src/plugins/task/Makefile
 		 src/plugins/task/affinity/Makefile
+		 src/plugins/task/cgroup/Makefile
 		 src/plugins/task/none/Makefile
 		 src/plugins/topology/Makefile
 		 src/plugins/topology/3d_torus/Makefile
diff --git a/contribs/Makefile.am b/contribs/Makefile.am
index 38f6a1efd..45c13a119 100644
--- a/contribs/Makefile.am
+++ b/contribs/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = pam perlapi torque sjobexit slurmdb-direct
+SUBDIRS = arrayrun cray lua pam perlapi torque sjobexit slurmdb-direct
 
 EXTRA_DIST = \
 	env_cache_builder.c	\
diff --git a/contribs/Makefile.in b/contribs/Makefile.in
index 8d50851ae..dad2f7db3 100644
--- a/contribs/Makefile.in
+++ b/contribs/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -312,7 +320,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = pam perlapi torque sjobexit slurmdb-direct
+SUBDIRS = arrayrun cray lua pam perlapi torque sjobexit slurmdb-direct
 EXTRA_DIST = \
 	env_cache_builder.c	\
 	make.slurm.patch	\
diff --git a/contribs/README b/contribs/README
index 7041eb26e..57bd818d4 100644
--- a/contribs/README
+++ b/contribs/README
@@ -7,6 +7,47 @@ Subdirectories contain the source-code for the various contributations for
 SLURM as their documentation. A quick description of the subdirectories
 of the SLURM contribs distribution follows:
 
+  arrayrun            [Adds support for array jobs]
+     README                - Description of the arrayrun tool and its use
+     arrayrun              - Command used to submit job arrays
+     arrayrun_worker       - Back-end to the arrayrun command responsible for
+                             spawning the jobs in the array
+
+  cray                [Tools for use on Cray systems]
+     etc_init_d_munge      - /etc/init.d/munge script for use with Munge
+     etc_sysconfig_slurm   - /etc/sysconfig/slurm for Cray XT/XE systems
+     libalps_test_programs.tar.gz - set of tools to verify ALPS/BASIL support
+                             logic. Note that this currently requires:
+                             * hardcoding in libsdb/basil_mysql_routines.c:
+                               mysql_real_connect(handle, "localhost", NULL, NULL, "XT5istanbul"
+                             * suitable /etc/my.cnf, containing at least the lines
+                               [client]
+                               user=basic
+                               password=basic
+                             * setting the APBASIL in the libalps/Makefile, e.g.
+                               APBASIL := slurm/alps_simulator/apbasil.sh
+                             To use, extract the files then:
+                             > cd libasil/
+                             > make -C alps_tests all   # runs basil parser tests
+                             > make -C sdb_tests  all   # checks if database routines work
+                             A tool named tuxadmin is also also included. When
+                             executed with the -s or --slurm.conf option, this
+                             contact the SDB to generate system-specific information
+                             needed in slurm.conf (e.g. "NodeName=nid..." and
+                             "PartitionName= Nodes=nid... MaxNodes=...".
+     munge_build_script.sh - script to build Munge from sources for Cray system
+     opt_modulefiles_slurm - enables use of Munge as soon as built
+     slurm-build-script.sh - script to build SLURM from sources for Cray system.
+                             set LIBROOT and SLURM_SRC environment variables
+                             before use, for example:
+                             LIBROOT=/ufs/slurm/build
+                             SLURM_SRC=${SLURM_SRC:-${LIBROOT}/slurm-2.3.0-0.pre4}
+     srun.pl               - A perl wrapper for the aprun command. Use of this
+                             wrapper requires that SLURM's perlapi be installed.
+                             Execute configure with the --with-srun2aprun option
+                             to build and install this instead of SLURM's normal
+                             srun command.
+
   env_cache_builder.c [C program]
      This program will build an environment variable cache file for specific 
      users or all users on the system. This can be used to prevent the aborting 
@@ -18,6 +59,8 @@ of the SLURM contribs distribution follows:
      Example LUA scripts that can serve as SLURM plugins.
      job_submit.lua - job_submit plugin that can set a job's default partition
                       using a very simple algorithm
+     job_submit_license.lua - job_submit plugin that can set a job's use of
+                      system licenses
      proctrack.lua  - proctrack (process tracking) plugin that implements a
                       very simple job step container using CPUSETs
 
diff --git a/contribs/arrayrun/Makefile.am b/contribs/arrayrun/Makefile.am
new file mode 100644
index 000000000..2b280966d
--- /dev/null
+++ b/contribs/arrayrun/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST = \
+	arrayrun		\
+	arrayrun_worker		\
+	README
diff --git a/contribs/arrayrun/Makefile.in b/contribs/arrayrun/Makefile.in
new file mode 100644
index 000000000..019c6d0b1
--- /dev/null
+++ b/contribs/arrayrun/Makefile.in
@@ -0,0 +1,475 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/arrayrun
+DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = \
+	arrayrun		\
+	arrayrun_worker		\
+	README
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contribs/arrayrun/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu contribs/arrayrun/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/arrayrun/README b/contribs/arrayrun/README
new file mode 100644
index 000000000..3615d053e
--- /dev/null
+++ b/contribs/arrayrun/README
@@ -0,0 +1,132 @@
+-*- text -*- $Id: README.arrayrun,v 1.2 2011/06/28 11:21:27 bhm Exp $
+
+Overview
+========
+
+Arrayrun is an attempt to simulate arrayjobs as found in SGE and PBS.  It
+works very similarly to mpirun:
+
+   arrayrun [-r] taskids [sbatch arguments] YourCommand [arguments]
+
+In principle, arrayrun does 
+
+   TASK_ID=id sbatch [sbatch arguments] YourCommand [arguments]
+
+for each id in the 'taskids' specification.  'taskids' is a comma separated
+list of integers, ranges of integers (first-last) or ranges with step size
+(first-last:step).  If -r is specified, arrayrun will restart a job that has
+failed.  To avoid endless loops, a job is only restarted once, and a maximum
+of 10 (configurable) jobs will be restarted.
+
+The idea is to submit a master job that calls arrayrun to start the jobs,
+for instance
+
+   $ cat workerScript
+   #!/bin/sh
+   #SBATCH --account=YourProject
+   #SBATCH --time=1:0:0
+   #SBATCH --mem-per-cpu=1G
+
+   DATASET=dataset.$TASK_ID
+   OUTFILE=result.$TASK_ID
+   cd $SCRATCH
+   YourProgram $DATASET > $OUTFILE
+   # end of workerScript
+
+   $ cat submitScript
+   #!/bin/sh
+   #SBATCH --account=YourProject
+   #SBATCH --time=50:0:0
+   #SBATCH --mem-per-cpu=100M
+
+   arrayrun 1-200 workerScript
+   # end of submitScript
+
+   $ sbatch submitScript
+
+The --time specification in the master script must be long enough for all
+jobs to finish.
+
+Alternatively, arrayrun can be run on the command line of a login or master
+node.
+
+If the master job is cancelled, or the arrayrun process is killed, it tries
+to scancel all running or pending jobs before it exits.
+
+Arrayrun tries not to flood the queue with jobs.  It works by submitting a
+limited number of jobs, sleeping a while, checking the status of its jobs,
+and iterating, until all jobs have finished.  All limits and times are
+configurable (see below).  It also tries to handle all errors in a graceful
+manner.
+
+
+Installation and configuration
+==============================
+
+There are two files, arrayrun (to be called by users) and arrayrun_worker
+(exec'ed or srun'ed by arrayrun, to make scancel work).
+
+arrayrun should be placed somewhere on the $PATH.  arrayrun_worker can be
+place anywhere.  Both files should be accessible from all nodes.
+
+There are quite a few configuration variables, so arrayrun can be tuned to
+work under different policies and work loads.
+
+Configuration variables in arrayrun:
+
+- WORKER: the location of arrayrun_worker
+
+Configuration variables in arrayrun_worker:
+
+- $maxJobs:          The maximal number of jobs arrayrun will allow in the
+		     queue at any time
+- $maxIdleJobs:	     The maximal number of _pending_ jobs arrayrun will allow
+		     in the queue at any time
+- $maxBurst:	     The maximal number of jobs submitted at a time
+- $pollSeconds:	     How many seconds to sleep between each iteration
+- $maxFails:	     The maximal number of errors to accept when submitting a
+		     job
+- $retrySleep:	     The number of seconds to sleep between each retry when
+		     submitting a job
+- $doubleCheckSleep: The number of seconds to sleep after a failed sbatch
+		     before runnung squeue to double check whether the job
+		     was submitted or not.
+- $maxRestarts:	     The maximal number of restarts all in all
+- $sbatch:	     The full path of the sbatch command to use
+
+
+Notes and caveats
+=================
+
+Arrayrun is an attempt to simulate array jobs.  As such, it is not
+perfect or foolproof.  Here are a couple of caveats.
+
+- Sometimes, arrayrun fails to scancel all jobs when it is itself cancelled
+
+- When arrayrun is run as a master job, it consumes one CPU for the whole
+  duration of the job.  Also, the --time limit must be long enough.  This can
+  be avoided by running arrayrun interactively on a master/login node (in
+  which case running it under screen is probably a good idea).
+
+- Arrayrun does (currently) not checkpoint, so if an arrayrun is restarted,
+  it starts from scratch with the first taskid.
+
+We welcome any suggestions for improvements or additional functionality!
+
+
+Copyright
+=========
+
+Copyright 2009,2010,2011 Bjørn-Helge Mevik <b.h.mevik@usit.uio.no>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License version 2 as
+published by the Free Software Foundation.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License version 2 for more details.
+
+A copy of the GPL v. 2 text is available here:
+http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
diff --git a/contribs/arrayrun/arrayrun b/contribs/arrayrun/arrayrun
new file mode 100644
index 000000000..944e5f398
--- /dev/null
+++ b/contribs/arrayrun/arrayrun
@@ -0,0 +1,69 @@
+#!/bin/bash
+### Simulate an array job
+### $Id: arrayrun,v 1.6 2011/02/10 11:57:53 root Exp $
+
+### Copyright 2009,2010 Bjørn-Helge Mevik <b.h.mevik@usit.uio.no>
+###
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License version 2 as
+### published by the Free Software Foundation.
+###
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+### GNU General Public License version 2 for more details.
+###
+### A copy of the GPL v. 2 text is available here:
+### http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+
+
+## Debugging
+#set -x
+
+### Configuration:
+## The work horse:
+WORKER=/site/lib/arrayrun_worker
+
+## Documentation:
+function usage () {
+    echo "Run many instances of the same job or command in the queue system.
+The instances are submitted via sbatch, and each get their own value
+of the environment variable TASK_ID.  This can be used to select which
+intput or output file to use, etc.
+
+Usage:
+ arrayrun [-r] taskids [sbatch arguments] command [arguments]
+ arrayrun [-h | --help]
+
+Arguments:
+ '-r':         Restart a job if it fails.  For security reasons, each job is
+               restarted only once, and no more than 5 jobs will be restarted.
+ 'taskids':    Run 'command' with TASK_ID set to the values specified in
+               'taskids'.  'taskids' is a comma separated list of integers,
+               ranges of integers (first-last) or ranges with step size
+               (first-last:step).  For instance
+                 1-5 means 1, 2, 3, 4, 5
+                 1,4,6 means 1, 4, 6
+                 10-20:5 means 10, 15, 20
+                 1-5,15,100-150:25 means 1, 2, 3, 4, 5, 15, 100, 125, 150
+               Note: spaces, negative number or decimal numbers are not allowed.
+ 'sbatch arguments': Any command line arguments for the implied sbatch.  This
+               is most useful when 'command' is not a job script.
+ 'command':    The command or job script to run.  If it is a job script, it can
+               contain #SBATCH lines in addition to or instead of the 'sbatch
+               arguments'.
+ 'arguments':  Any arguments for 'command'.
+ '-h', '--help' (or no arguments): Display this help."
+}
+
+if [ $# == 0 -o "$1" == '--help' -o "$1" == '-h' ]; then
+    usage
+    exit 0
+fi
+
+if [ -n "$SLURM_JOB_ID" ]; then
+    ## Started in a job script.  Run with srun to make "scancel" work
+    exec srun --ntasks=1 $WORKER "$@"
+else
+    exec $WORKER "$@"
+fi
diff --git a/contribs/arrayrun/arrayrun_worker b/contribs/arrayrun/arrayrun_worker
new file mode 100644
index 000000000..8107c72d4
--- /dev/null
+++ b/contribs/arrayrun/arrayrun_worker
@@ -0,0 +1,255 @@
+#!/usr/bin/perl
+### Simulate an array job -- work horse script
+### $Id: arrayrun_worker,v 1.30 2011/04/27 08:58:25 root Exp $
+
+### Copyright 2009,2010,2011 Bjørn-Helge Mevik <b.h.mevik@usit.uio.no>
+###
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License version 2 as
+### published by the Free Software Foundation.
+###
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+### GNU General Public License version 2 for more details.
+###
+### A copy of the GPL v. 2 text is available here:
+### http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+
+
+### Note: This script is meant to be run by 'arrayrun'; do not
+### run this script directly.
+
+use strict;
+use List::Util qw/min/;
+use Time::HiRes qw/sleep/;
+
+## Debug:
+use warnings;
+use constant DEBUG => 1;
+$| = 1 if DEBUG;
+
+## Configuration:
+my $maxJobs = 100;		# Max total number of jobs in queue
+my $maxIdleJobs = 10;		# Max number of pending jobs in queue
+my $maxBurst = 10;		# Max number of jobs to submit at a time
+my $pollSeconds = 180;		# How many seconds to sleep between each poll
+my $maxFails = 300;		# Max errors to accept when submitting a job
+my $retrySleep = 300;		# Seconds to sleep between each retry
+my $doubleCheckSleep = 30;	# Seconds to sleep before double checking
+my $maxRestarts = 10;		# Max number of restarts all in all
+my $sbatch = "/site/bin/sbatch";# Which sbatch command to use
+
+## Parse command line
+my $restart = 0;
+if (@ARGV && $ARGV[0] eq '-r') {
+    $restart = 1;
+    shift @ARGV;
+}
+my $jobSpec = shift @ARGV or die "Too few arguments\n";
+my @commandLine = @ARGV or die "Too few arguments\n";
+my @jobArray;
+foreach (split /,/, $jobSpec) {
+    if (/^(\d+)$/) {
+	push @jobArray, $1;
+    } elsif (/^(\d+)[-:](\d+)$/) {
+	push @jobArray, $1 .. $2;
+    } elsif (/^(\d+)[-:](\d+):(\d+)$/) {
+	for (my $i = $1; $i <= $2; $i += $3) {
+	    push @jobArray, $i;
+	}
+    } else {
+	die "Unknown TASK_ID specification: '$_'\n";
+    }
+}
+die "No TASK_IDs specified\n" unless (@jobArray);
+
+print "TASK_IDs to submit: ", join(",", @jobArray), "
+Command line: @commandLine\n" if DEBUG;
+print "Will restart failed jobs\n" if DEBUG && $restart;
+
+## Setup
+my $mainid = $ENV{'SLURM_JOB_ID'} || $ENV{'SLURM_JOBID'} || 'null';
+my $runids = [];		# List of IDs of running jobs
+my $pendids = [];		# List of IDs of pending jobs
+my $testids = [];		# List of IDs to test
+my %taskid;			# TASK_ID for all submitted jobs
+my @restartedTasks;		# TASK_ID of all restarted jobs
+my @tmp = (localtime())[5,4,3];
+my $starttime = sprintf "%d-%02d-%02d", $tmp[0] + 1900, $tmp[1] + 1, $tmp[2];
+
+print "Main job id: $mainid\nStart time: $starttime\n" if DEBUG;
+
+## Trap signals such that any running sub jobs are cancelled if the
+## main job is cancelled or times out.
+sub clean_up {
+    print "Caught signal.  Cleaning up...\n" if DEBUG;
+    ## Cancel any subjobs:
+    if (@{$runids} || @{$pendids} || @{$testids}) {
+	print "Cancelling @{$runids} @{$pendids} @{$testids}\n" if DEBUG;
+	system("echo scancel @{$runids} @{$pendids} @{$testids}");
+	system("scancel @{$runids} @{$pendids} @{$testids}");
+	print "Cancelled @{$runids} @{$pendids} @{$testids}\n" if DEBUG;
+    }
+    exit 0;
+}
+$SIG{'TERM'} = 'clean_up';	# scancel/timeout
+$SIG{'INT'} = 'clean_up';	# ^C in interactive use
+
+
+## Submit a job with fail resilience:
+sub submit_job {
+    my $jobName = shift;
+    (my $commandLine = shift) || die "Job script not specified\n";
+    my $id;
+    my $nFails = 0;
+    my $success = 0;
+    until ($success) {
+	my $fail = 0;
+	$id = `$sbatch --job-name=$jobName $commandLine 2>&1`;
+	if ($? == 0) {
+	    chomp($id);
+	    print "  Result from submit: $id" if DEBUG;
+	    if ($id =~ s/.*Submitted batch job //) {
+		$success = 1;
+	    }
+	} else {
+	    warn "  sbatch failed with error code '$?' (output: '",
+	        $id || '', "'): $!\n";
+	    $nFails++;
+	}
+	until ($success || $fail || $nFails > $maxFails) {
+	    ## Double check that the job did not start
+	    warn "  Problem with submitting/checking job.  Checking with squeue in a while.\n";
+	    sleep $doubleCheckSleep - 5 + int(rand(11));
+	    $id = `squeue -h -o '%i %j' -u $ENV{USER}`;
+	    if ($? == 0) {
+		chomp($id);
+		print "  Result from squeue: $id" if DEBUG;
+		if ($id =~ s/ $jobName//) {
+		    warn "Job '$jobName' seems to have been started as jobid '$id'.  Using that id.\n";
+		    $success = 1;
+		} else {
+		    warn "Job '$jobName' did not start.\n";
+		    $fail = 1;
+		}
+	    } else {
+		$nFails++;
+	    }
+	}
+	unless ($success) {
+	    if ($nFails <= $maxFails) {
+		warn "  Could not submit job.  Trying again in a while.\n";
+		sleep $retrySleep - 5 + int(rand(11));
+	    } else {
+		die "  Cannot submit job.  Giving up after $nFails errors.\n";
+	    }
+	}
+    }
+    print " => job ID $id\n" if DEBUG;
+    $id;
+}
+
+
+## Check the given jobs, and return lists of the ones still running/waiting:
+sub check_queue {
+    print scalar localtime, ": Checking queue...\n" if DEBUG;
+    my $queueids = `squeue -h -o '%i %t' 2>&1`;
+    if ($? != 0) {
+	print "squeue failed with error code '$?',\nmessage: $queueids\nI will assume all jobs are still running/waiting\n";
+	return;
+    }
+    my $testids = [ @{$runids}, @{$pendids} ];
+    print "Number of jobs to check: ", scalar @{$testids}, "\n" if DEBUG;
+    sleep 10 + rand;		# Sleep to allow requeued jobs to get back
+                                # in queue.
+    $runids = [];
+    $pendids = [];
+    foreach my $id (@{$testids}) {
+	if ($queueids =~ /$id (\w+)/) {
+	    if ($1 eq "PD") {
+		print " Job $id is still waiting\n" if DEBUG;
+		push @{$pendids}, $id;
+	    } else {
+		print " Job $id is still running\n" if DEBUG;
+		push @{$runids}, $id;
+	    }
+	} else {
+	    print " Job $id has finished:\n" if DEBUG;
+	    my @sacctres = `sacct -o jobid,start,end,maxvmsize,maxrss,state,exitcode -S $starttime -j $id 2>&1`;
+	    if ($? != 0) {
+		print "  sacct failed with error code '$?',\n  message: ",
+		  @sacctres, "  I will assume job $id finished successfully\n";
+	    } else {
+		print join("  ", @sacctres);
+		if (grep /^[ ]*$id[ ]+.*RUNNING/, @sacctres) {
+		    print "  Job seems to be still running, after all.\n" if DEBUG;
+		    push @{$runids}, $id;
+		} elsif ($restart && !grep /^[ ]*$id[ ]+.*COMPLETED[ ]+0:0/, @sacctres) {
+		    print "  Job failed. ";
+		    if (@restartedTasks >= $maxRestarts) {
+			print "Too many jobs have been restarted.  Will not restart TASK_ID $taskid{$id}\n";
+		    } elsif (grep /^$taskid{$id}$/, @restartedTasks) {
+			print "TASK_ID $taskid{$id} has already been restarted once.  Will not restart it again\n";
+		    } else {
+			print "Restarting TASK_ID $taskid{$id}\n";
+			$ENV{'TASK_ID'} = $taskid{$id};
+			my $newid = submit_job "$mainid.$taskid{$id}", "@commandLine";
+			push @{$runids}, $newid;
+			$taskid{$newid} = $taskid{$id};
+			push @restartedTasks, $taskid{$newid};
+			sleep 1.5 + rand;	# Sleep between 1.5 and 2.5 secs
+		    }
+		}
+	    }
+	}
+    }
+}
+
+
+## Make sure sub jobs do not inherit the main job TMPDIR or jobname:
+delete $ENV{'TMPDIR'};
+delete $ENV{'SLURM_JOB_NAME'};
+
+while (@jobArray) {
+    ## There is more to submit
+    print scalar localtime, ": Submitting jobs...\n" if DEBUG;
+    print scalar @jobArray, " more job(s) to submit\n" if DEBUG;
+    ## Submit as many as possible:
+    my $nToSubmit = min(scalar @jobArray,
+			$maxJobs - @{$runids} - @{$pendids},
+			$maxIdleJobs - @{$pendids},
+			$maxBurst);
+    print scalar(@{$runids}), " job(s) are running, and ",
+        scalar(@{$pendids}), " are waiting\n" if DEBUG;
+    print "Submitting $nToSubmit job(s):\n" if DEBUG;
+    for (my $i = 1; $i <= $nToSubmit; $i++) {
+	my $currJob = shift @jobArray;
+	print " TASK_ID $currJob:\n" if DEBUG;
+	## Set $TASK_ID for the job:
+	$ENV{'TASK_ID'} = $currJob;
+	my $id = submit_job "$mainid.$currJob", "@commandLine";
+	push @{$pendids}, $id;
+	$taskid{$id} = $currJob;
+	sleep 1.5 + rand;	# Sleep between 1.5 and 2.5 secs
+    }
+    ## Wait a while:
+    print "Sleeping...\n" if DEBUG;
+    sleep $pollSeconds - 5 + int(rand(11));
+    ## Find which are still running or waiting:
+    check_queue();
+}
+print "All jobs have been submitted\n" if DEBUG;
+
+while (@{$runids} || @{$pendids}) {
+    ## Some jobs are still running or pending
+    print scalar(@{$runids}), " job(s) are still running, and ",
+        scalar(@{$pendids}), " are waiting\n" if DEBUG;
+    ## Wait a while
+    print "Sleeping...\n" if DEBUG;
+    sleep $pollSeconds - 5 + int(rand(11));
+    ## Find which are still running or waiting:
+    check_queue();
+}
+
+print "Done.\n" if DEBUG;
diff --git a/contribs/cray/Makefile.am b/contribs/cray/Makefile.am
new file mode 100644
index 000000000..9176ff935
--- /dev/null
+++ b/contribs/cray/Makefile.am
@@ -0,0 +1,40 @@
+#
+# Makefile for cray scripts
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+EXTRA_DIST = \
+	etc_init_d_munge		\
+	etc_sysconfig_slurm		\
+	libalps_test_programs.tar.gz	\
+	munge_build_script.sh		\
+	opt_modulefiles_slurm		\
+	pam_job.c			\
+	slurm-build-script.sh
+
+if BUILD_SRUN2APRUN
+  bin_SCRIPTS = srun
+endif
+
+srun:
+_perldir=$(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+
+install-binSCRIPTS: $(bin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	test -z "$(DESTDIR)$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)"
+	@list='$(bin_SCRIPTS)'; for p in $$list; do \
+	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/cray/$$p.pl | sed 's%BINDIR%@bindir@%' > $(DESTDIR)$(bindir)/$$p"; \
+	         sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/cray/$$p.pl | sed "s%BINDIR%@bindir@%" > $(DESTDIR)$(bindir)/$$p; \
+	   chmod 755 $(DESTDIR)$(bindir)/$$p;\
+	done
+
+uninstall-binSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_SCRIPTS)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$p"; \
+	done
+
+clean:
+
diff --git a/contribs/cray/Makefile.in b/contribs/cray/Makefile.in
new file mode 100644
index 000000000..73102d3a9
--- /dev/null
+++ b/contribs/cray/Makefile.in
@@ -0,0 +1,532 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for cray scripts
+#
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/cray
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(bindir)"
+SCRIPTS = $(bin_SCRIPTS)
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+EXTRA_DIST = \
+	etc_init_d_munge		\
+	etc_sysconfig_slurm		\
+	libalps_test_programs.tar.gz	\
+	munge_build_script.sh		\
+	opt_modulefiles_slurm		\
+	pam_job.c			\
+	slurm-build-script.sh
+
+@BUILD_SRUN2APRUN_TRUE@bin_SCRIPTS = srun
+_perldir = $(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign contribs/cray/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign contribs/cray/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(SCRIPTS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binSCRIPTS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binSCRIPTS
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-binSCRIPTS install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	uninstall uninstall-am uninstall-binSCRIPTS
+
+
+srun:
+
+install-binSCRIPTS: $(bin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	test -z "$(DESTDIR)$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)"
+	@list='$(bin_SCRIPTS)'; for p in $$list; do \
+	   echo "sed 's%use lib .*%use lib qw(${_perldir});%' $(top_srcdir)/contribs/cray/$$p.pl | sed 's%BINDIR%@bindir@%' > $(DESTDIR)$(bindir)/$$p"; \
+	         sed "s%use lib .*%use lib qw(${_perldir});%" $(top_srcdir)/contribs/cray/$$p.pl | sed "s%BINDIR%@bindir@%" > $(DESTDIR)$(bindir)/$$p; \
+	   chmod 755 $(DESTDIR)$(bindir)/$$p;\
+	done
+
+uninstall-binSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_SCRIPTS)'; for p in $$list; do \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$p'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$p"; \
+	done
+
+clean:
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/cray/etc_init_d_munge b/contribs/cray/etc_init_d_munge
new file mode 100644
index 000000000..0bc5e3393
--- /dev/null
+++ b/contribs/cray/etc_init_d_munge
@@ -0,0 +1,559 @@
+#!/bin/sh
+#
+# /etc/init.d/munge - Start/stop script configured for Cray XT/XE
+#
+###############################################################################
+# Written by Chris Dunlap <cdunlap@llnl.gov>.
+# Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# UCRL-CODE-155910.
+###############################################################################
+# chkconfig:          - 40 60
+# Description:        Start/Stop the MUNGE authentication service.
+###############################################################################
+### BEGIN INIT INFO
+# Provides:           munge
+# Required-Start:     $remote_fs
+# Required-Stop:      $remote_fs
+# Default-Start:      2 3 5
+# Default-Stop:
+# Short-Description:  Start/Stop the MUNGE authentication service.
+# Description:        MUNGE (MUNGE Uid 'N' Gid Emporium) is a highly scalable
+#                     authentication service for creating and validating
+#                     credentials.
+### END INIT INFO
+
+unset DESC DAEMON CONFIG DAEMON_ARGS PIDFILE NICE USER SIGHUP_RELOAD
+
+prefix="/opt/slurm/munge"
+exec_prefix="${prefix}"
+sbindir="${exec_prefix}/sbin"
+sysconfdir="${prefix}/etc"
+localstatedir="/var"
+
+DESC="MUNGE"
+DAEMON="$sbindir/munged"
+#CONFIG=#_NOT_SUPPORTED_#
+DAEMON_ARGS="--key-file ${prefix}/etc/munge.key"
+PIDFILE="$localstatedir/run/munge/munged.pid"
+#NICE=
+#USER="daemon"
+#SIGHUP_RELOAD=#_NOT_SUPPORTED_#
+
+###############################################################################
+
+service_init ()
+{
+# Determine the system type and initialize the environment.
+#
+# Note that the shell positional parameters must be preserved when calling
+#   this function in order for SuSE to initialize its environment properly.
+##
+  PATH=/sbin:/usr/sbin:/bin:/usr/bin
+  DAEMON_NAME="`basename \"$DAEMON\"`"
+  SCRIPT_NAME="`basename \"$0\" .init | sed 's/^[SK][0-9][0-9]*//'`"
+  SIGTERM_TIMEOUT="3"
+  STATUS=0
+
+  # Read configuration defaults to override variables:
+  #   $CONFIG, $DAEMON_ARGS, $PIDFILE, $USER, $NICE, $SIGHUP_RELOAD
+  ##
+  for dir in "$sysconfdir/default" "$sysconfdir/sysconfig"; do
+    [ -r "$dir/$SCRIPT_NAME" ] && . "$dir/$SCRIPT_NAME"
+  done
+  [ -z "$DAEMON_ARGS" -a -n "$OPTIONS" ] && DAEMON_ARGS="$OPTIONS"
+  [ "`id | sed 's/^uid=\([0-9]*\).*/\1/'`" -ne 0 ] && unset USER
+  expr -- "$NICE" : '[0-9]*$' >/dev/null 2>&1 && NICE="+$NICE"
+  [ -n "$SIGHUP_RELOAD" -a "$SIGHUP_RELOAD" != 0 ] \
+    && RELOAD=1 || unset RELOAD
+
+  if [ -f /etc/debian_version -a -x /sbin/start-stop-daemon ]; then
+    SYSTEM="DEBIAN"
+    [ -x "$DAEMON" ] || exit 0                  # pkg removed but not purged
+    [ -r /etc/default/rcS ] && . /etc/default/rcS
+    [ -r /lib/init/vars.sh ] && . /lib/init/vars.sh
+    [ -r /lib/lsb/init-functions ] && . /lib/lsb/init-functions
+  elif [ -f /etc/redhat-release -a -r /etc/init.d/functions ]; then
+    SYSTEM="REDHAT"
+    . /etc/init.d/functions
+    RH_SUBSYS="/var/lock/subsys/$DAEMON_NAME"
+  elif [ -f /etc/SuSE-release -a -r /etc/rc.status ]; then
+    SYSTEM="SUSE"
+    . /etc/rc.status
+    rc_reset
+  elif [ -r /lib/lsb/init-functions ]; then
+    SYSTEM="LSB"
+    . /lib/lsb/init-functions
+  else
+    SYSTEM="OTHER"
+  fi
+
+  # Exit if the package has been removed.
+  ##
+  [ -x "$DAEMON" ] || exit 5                    # LSB: program not installed
+
+  # Exit if the configuration has been removed.
+  ##
+  [ -z "$CONFIG" -o -r "$CONFIG" ] || exit 6    # LSB: program not configured
+}
+
+service_fini ()
+{
+# Return the exit status.
+##
+  case $SYSTEM in
+    SUSE)
+      rc_exit
+      ;;
+    DEBIAN|REDHAT|LSB|*)
+      exit $STATUS
+      ;;
+  esac
+}
+
+service_start ()
+{
+# Start the service.
+#
+# Required by LSB, where running "start" on a service already running should be
+#   considered successful.
+##
+  log_init "Starting $DESC" "$DAEMON_NAME"
+
+  VARRUNDIR="$localstatedir/run/munge"
+  if [ ! -d "$VARRUNDIR" ]; then
+    mkdir -m 755 -p "$VARRUNDIR"
+    [ -n "$USER" ] && chown "$USER" "$VARRUNDIR"
+  fi
+
+  case $SYSTEM in
+    DEBIAN)
+      if $0 status >/dev/null 2>&1; then
+        STATUS=0
+      else
+        ERRMSG=`start-stop-daemon --start --quiet \
+          ${NICE:+"--nicelevel"} ${NICE:+"$NICE"} \
+          ${USER:+"--chuid"} ${USER:+"$USER"} \
+          ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+          --exec "$DAEMON" -- $DAEMON_ARGS 2>&1`
+        STATUS=$?
+      fi
+      ;;
+    REDHAT)
+      if $0 status >/dev/null 2>&1; then
+        STATUS=0
+      else
+        daemon ${NICE:+"$NICE"} ${USER:+"--user"} ${USER:+"$USER"} \
+          "$DAEMON" $DAEMON_ARGS
+        STATUS=$?
+      fi
+      [ $STATUS -eq 0 ] && touch "$RH_SUBSYS" >/dev/null 2>&1
+      ;;
+    SUSE)
+      ERRMSG=`startproc ${NICE:+"-n"} ${NICE:+"$NICE"} \
+        ${USER:+"-u"} ${USER:+"$USER"} \
+        ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+        "$DAEMON" $DAEMON_ARGS 2>&1`
+      rc_status -v
+      STATUS=$?
+      ;;
+    LSB)
+      if [ -n "$USER" ]; then
+        ERRMSG=`su "$USER" -c "/sbin/start_daemon \
+          ${NICE:+\"-n\"} ${NICE:+\"$NICE\"} \
+          ${PIDFILE:+\"-p\"} ${PIDFILE:+\"$PIDFILE\"} \
+          \"$DAEMON\" $DAEMON_ARGS" 2>&1`
+      else
+        ERRMSG=`start_daemon ${NICE:+"-n"} ${NICE:+"$NICE"} \
+          ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON" $DAEMON_ARGS 2>&1`
+      fi
+      STATUS=$?
+      ;;
+    *)
+      if $0 status >/dev/null 2>&1; then
+        STATUS=0
+      else
+        [ -n "$NICE" ] && nice="nice -n $NICE"
+        if [ -n "$USER" ]; then
+          ERRMSG=`su "$USER" -c "$nice \"$DAEMON\" $DAEMON_ARGS" 2>&1`
+        else
+          ERRMSG=`$nice "$DAEMON" $DAEMON_ARGS 2>&1`
+        fi
+        STATUS=$?
+      fi
+      ;;
+  esac
+  log_fini "$STATUS" "$ERRMSG"
+}
+
+service_stop ()
+{
+# Stop the service.
+#
+# Required by LSB, where running "stop" on a service already stopped or not
+#   running should be considered successful.
+##
+  log_init "Stopping $DESC" "$DAEMON_NAME"
+  case $SYSTEM in
+    DEBIAN)
+      if ! $0 status >/dev/null 2>&1; then
+        STATUS=0
+      else
+        start-stop-daemon --stop --quiet \
+          ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+          --name "$DAEMON_NAME" ${SIGTERM_TIMEOUT:+"--retry"} \
+          ${SIGTERM_TIMEOUT:+"$SIGTERM_TIMEOUT"} >/dev/null 2>&1
+        STATUS=$?
+      fi
+      ;;
+    REDHAT)
+      if ! $0 status >/dev/null 2>&1; then
+        STATUS=0
+      else
+        killproc "$DAEMON"
+        STATUS=$?
+      fi
+      [ $STATUS -eq 0 ] && rm -f "$RH_SUBSYS" >/dev/null 2>&1
+      ;;
+    SUSE)
+      killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+        ${SIGTERM_TIMEOUT:+"-t"} ${SIGTERM_TIMEOUT:+"$SIGTERM_TIMEOUT"} \
+        "$DAEMON"
+      rc_status -v
+      ;;
+    LSB)
+      killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+      STATUS=$?
+      ;;
+    *)
+      signal_process "$DAEMON"
+      rc=$?
+      [ $rc -eq 0 -o $rc -eq 2 ] && STATUS=0 || STATUS=1
+      ;;
+  esac
+  log_fini "$STATUS"
+  [ -f "$PIDFILE" ] && rm -f "$PIDFILE"
+}
+
+service_restart ()
+{
+# Stop and restart the service if it is already running;
+#   otherwise, start the service.
+#
+# Required by LSB, where running "restart" on a service already stopped or not
+#   running should be considered successful.
+##
+  if $0 status >/dev/null 2>&1; then
+    $0 stop && $0 start
+  else
+    $0 start
+  fi
+
+  case $SYSTEM in
+    SUSE)
+      rc_status
+      ;;
+    DEBIAN|REDHAT|LSB|*)
+      STATUS=$?
+      ;;
+  esac
+}
+
+service_try_restart ()
+{
+# Restart the service if it is already running.
+#
+# Optional for LSB, where running "try-restart" on a service already stopped or
+#   not running should be considered successful.
+# Also known as "condrestart" by RedHat.
+##
+  case $SYSTEM in
+    REDHAT)
+      [ -f "$RH_SUBSYS" ] && $0 restart || :
+      STATUS=$?
+      ;;
+    SUSE)
+      $0 status >/dev/null 2>&1 && $0 restart || rc_reset
+      rc_status
+      ;;
+    DEBIAN|LSB|*)
+      $0 status >/dev/null 2>&1 && $0 restart || :
+      STATUS=$?
+      ;;
+  esac
+}
+
+service_reload ()
+{
+# Reload the configuration without stopping and restarting the service.
+#
+# Optional for LSB.
+##
+  [ -z "$RELOAD" ] && STATUS=3          # LSB: unimplemented feature
+
+  log_init "Reloading $DESC" "$DAEMON_NAME"
+  case $SYSTEM in
+    DEBIAN)
+      if [ -n "$RELOAD" ]; then
+        start-stop-daemon --stop --quiet --signal HUP \
+          ${PIDFILE:+"--pidfile"} ${PIDFILE:+"$PIDFILE"} \
+          --name "$DAEMON_NAME" >/dev/null 2>&1
+        STATUS=$?
+      fi
+      ;;
+    REDHAT)
+      if [ -n "$RELOAD" ]; then
+        killproc "$DAEMON" -HUP
+        STATUS=$?
+      else
+        echo_failure
+      fi
+      ;;
+    SUSE)
+      if [ -n "$RELOAD" ]; then
+        killproc -HUP ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+      else
+        rc_failed $STATUS
+      fi
+      rc_status -v
+      ;;
+    LSB)
+      if [ -n "$RELOAD" ]; then
+        killproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON" -HUP
+        STATUS=$?
+      fi
+      ;;
+    *)
+      if [ -n "$RELOAD" ]; then
+        signal_process "$DAEMON" "HUP"
+        STATUS=$?
+      fi
+      ;;
+  esac
+  log_fini "$STATUS"
+}
+
+service_force_reload ()
+{
+# Reload the configuration if the service supports this;
+#   otherwise, restart the service if it is already running.
+#
+# Required by LSB, where running "force-reload" on a service already stopped or
+#   not running should be considered successful.
+##
+  if [ -n "$RELOAD" ]; then
+    $0 reload
+  else
+    $0 try-restart
+  fi
+
+  case $SYSTEM in
+    SUSE)
+      rc_status
+      ;;
+    DEBIAN|REDHAT|LSB|*)
+      STATUS=$?
+      ;;
+  esac
+}
+
+service_status ()
+{
+# Print the current status of the service.
+#
+# Required by LSB.
+##
+  case $SYSTEM in
+    REDHAT)
+      status "$DAEMON"
+      STATUS=$?
+      ;;
+    SUSE)
+      printf "Checking for service $DESC: "
+      checkproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} "$DAEMON"
+      rc_status -v
+      ;;
+    LSB)
+      printf "Checking status of $DESC: "
+      pids=`pidofproc ${PIDFILE:+"-p"} ${PIDFILE:+"$PIDFILE"} \
+        "$DAEMON" 2>/dev/null`
+      STATUS=$?
+      if [ $STATUS -eq 0 -a -n "$pids" ]; then
+        echo "running."
+      elif [ $STATUS -ne 0 -a -s "$PIDFILE" ]; then
+        echo "dead."
+      else
+        echo "stopped."
+      fi
+      ;;
+    DEBIAN|*)
+      printf "Checking status of $DESC: "
+      pids=`query_pids "$DAEMON" "$PIDFILE"`
+      rc=$?
+      if [ $rc -eq 0 -a -n "$pids" ]; then
+        echo "running."
+        STATUS=0                        # LSB: program is running
+      elif [ $rc -ne 0 -a -s "$PIDFILE" ]; then
+        echo "dead."
+        STATUS=1                        # LSB: program is dead & pidfile exists
+      elif [ $rc -ne 0 ]; then
+        echo "stopped."
+        STATUS=3                        # LSB: program is not running
+      else
+        echo "unknown."
+        STATUS=4                        # LSB: program status unknown
+      fi
+      ;;
+  esac
+}
+
+query_pids ()
+{
+# Writes the matching PIDs to stdout.
+# Returns 0 on success (ie, pids found).
+##
+  PROCNAME="$1"
+  PIDFILE="$2"
+
+  if type pgrep >/dev/null 2>&1; then
+    pids=`pgrep -d ' ' -x "\`basename \"$PROCNAME\"\`" 2>/dev/null`
+    rc=$?
+  elif type pidof >/dev/null 2>&1; then
+    pids=`pidof -o $$ -x "$PROCNAME" 2>/dev/null`
+    rc=$?
+  else
+    pids=`(ps awx -o pid -o command || ps -e -f -o pid -o args) 2>/dev/null \
+      | tail +2 | egrep "( |/)$PROCNAME( |$)" | grep -v egrep \
+      | sed 's/ *\([0-9]*\).*/\1/' | sort -n | tr '\012' ' '`
+    [ -n "$pids" ] && rc=0 || rc=1
+  fi
+
+  unset pids_running
+  if [ -n "$pids" -a -r "$PIDFILE" ]; then
+    read pid_line < "$PIDFILE"
+    for pid in $pid_line; do
+      expr -- "$pid" : '[0-9]*$' >/dev/null 2>&1 \
+        && expr -- " $pids " : ".* $pid .*" >/dev/null 2>&1 \
+        && pids_running="$pids_running $pid"
+    done
+    [ -n "$pids_running" ] && pids=$pids_running
+  fi
+
+  echo $pids
+  return $rc
+}
+
+signal_process ()
+{
+# Returns 0 on success, 1 if kill failed, 2 if PROCNAME is not running.
+##
+  PROCNAME="$1"
+  SIGNUM="$2"
+
+  pids=`query_pids "$DAEMON" "$PIDFILE"`
+  [ $? -ne 0 -o -z "$pids" ] && return 2
+
+  kill ${SIGNUM:+"-$SIGNUM"} $pids >/dev/null 2>&1
+  [ $? -ne 0 ] && return 1
+  [ -n "$SIGNUM" ] && return 0
+
+  pids=`query_pids "$DAEMON" "$PIDFILE"`
+  [ $? -ne 0 -o -z "$pids" ] && return 0
+  [ -z "$SIGTERM_TIMEOUT" ] && return 1
+
+  sleep "$SIGTERM_TIMEOUT"
+  kill -KILL $pids >/dev/null 2>&1
+  pids=`query_pids "$DAEMON" "$PIDFILE"`
+  [ $? -ne 0 -o -z "$pids" ] && return 0
+  return 1
+}
+
+log_init ()
+{
+# Output informational message at beginning of action.
+##
+  MESSAGE="$1"
+  PROCNAME="$2"
+
+  case $SYSTEM in
+    DEBIAN)
+      if [ "$VERBOSE" != no ]; then
+        if type log_daemon_msg >/dev/null 2>&1; then
+          log_daemon_msg "$MESSAGE" "$PROCNAME"
+        else
+          printf "$MESSAGE: $PROCNAME"
+        fi
+      fi
+      ;;
+    REDHAT|SUSE|LSB|*)
+      printf "$MESSAGE: $PROCNAME"
+      ;;
+  esac
+}
+
+log_fini ()
+{
+# Output informational/error message at end of action.
+##
+  STATUS="$1"
+  ERRMSG="$2"
+
+  case $SYSTEM in
+    DEBIAN)
+      if [ "$VERBOSE" != no ]; then
+        if ( type log_end_msg && type log_failure_msg ) >/dev/null 2>&1; then
+          log_end_msg "$STATUS"
+          [ $STATUS -eq 0 -o -z "$ERRMSG" ] || log_failure_msg "$ERRMSG"
+        else
+          [ $STATUS -eq 0 ] && echo "." || echo " (failed)."
+          [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+        fi
+      fi
+      ;;
+    REDHAT)
+      echo
+      ;;
+    SUSE)
+      [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+      ;;
+    LSB|*)
+      [ $STATUS -eq 0 ] && echo "." || echo " (failed)."
+      [ $STATUS -eq 0 -o -z "$ERRMSG" ] || echo "$ERRMSG" >&2
+      ;;
+  esac
+}
+
+###############################################################################
+
+service_init "$@"
+
+case "$1" in
+  start)
+    service_start
+    ;;
+  stop)
+    service_stop
+    ;;
+  restart)
+    service_restart
+    ;;
+  try-restart|condrestart)
+    service_try_restart
+    ;;
+  reload)
+    service_reload
+    ;;
+  force-reload)
+    service_force_reload
+    ;;
+  status)
+    service_status
+    ;;
+  *)
+    echo "Usage: `basename \"$0\"`" \
+      "(start|stop|restart|try-restart|reload|force-reload|status)" >&2
+    exit 2                              # LSB: invalid or excess argument(s)
+    ;;
+esac
+
+service_fini
diff --git a/contribs/cray/etc_sysconfig_slurm b/contribs/cray/etc_sysconfig_slurm
new file mode 100644
index 000000000..44775c55a
--- /dev/null
+++ b/contribs/cray/etc_sysconfig_slurm
@@ -0,0 +1,24 @@
+#
+# /etc/sysconfig/slurm for Cray XT/XE systems
+#
+# Cray is SuSe-based, which means that ulimits from /etc/security/limits.conf
+# will get picked up any time slurm is restarted e.g. via pdsh/ssh. Since slurm
+# respects configured limits, this can mean that for instance batch jobs get
+# killed as a result of configuring CPU time limits. Set sane start limits here.
+#
+# Values were taken from pam-1.1.2 Debian package
+ulimit -t unlimited	# max amount of CPU time in seconds
+ulimit -d unlimited	# max size of a process's data segment in KB
+ulimit -l 64		# max memory size (KB) that may be locked into memory
+ulimit -m unlimited	# max RSS size in KB
+ulimit -u unlimited	# max number of processes
+ulimit -f unlimited	# max size of files written by process and children
+ulimit -x unlimited	# max number of file locks
+ulimit -i 16382		# max number of pending signals
+ulimit -q 819200	# max number of bytes in POSIX message queues
+ulimit -Sc 0		# max size of core files (soft limit)
+ulimit -Hc unlimited	# max size of core files (hard limit)
+ulimit -Ss 8192		# max stack size (soft limit)
+ulimit -Hs unlimited	# max stack size (hard limit)
+ulimit -n 1024		# max number of open file descriptors
+ulimit -v unlimited	# max size of virtual memory (address space) in KB
diff --git a/contribs/cray/libalps_test_programs.tar.gz b/contribs/cray/libalps_test_programs.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..436bade59953e071beccba2f8b00b70a58cd795e
GIT binary patch
literal 345034
zcmV(&K;ge1iwFRNdiYHM1ME2qcoS8+1wrX}xvUQmWYw`0XdBYWd)m?hc{XYDYTgM#
zcQTnI(@bWPnMu+F5ZrZT6<mBE`#@Q*SL>~dD9B1h1bpxkT|^KB%H!(t6j1>cMdY4I
z3c{7WfEVj`ul>J#oipcup7Wpoai$a*#5j^dihe|p06>*W@Kj3Wa#VPiqDo1Az6*+^
zQl%1As3a-@FO|trX%VdaF^xI{DUQcj7%qyi1RW#Te@cgQfG`~H4wJeQs4o9wRDx^f
z36AHk{86|>|D!4is*s{G;D3cuq3ZE}cfbYwk7G1WP|YmC5o{XcNrwJk9*N8Gzfz?t
zlE@?qnY_pU-2s>8e+w2P!X!ofFdc$}RSLyF^S@M~k_-NqDkLf;AO!xGNdOv_T-l{w
z7=Gsd9|migM3yBZQ64VC%VC*BQZ1HAq*7Q*&^*h)&NP|J5nvD^VJ$}C3<c(J5+KPi
zi8FAS)~R)tL&IP*i4!zOgy0k%B3PJ@5^%lUoR(DroK<bnJ0X|aQLlHwbugBQM_6&;
zoIw_~$qK_@Hja^WB@Q_qT4(;fv<w_Mo?yjHiU%(OUHPcevJ6RuaEuMX6_t2-Ib_o`
zoFNxDYbx=&N~RnFO6?9?y+dtrLRu|AM{pc+Sipdd!{T%}PoQ=gosh|@)tMXsQCW$A
zA4%g>DnvkLljhuMAx=Pu2y6zkx*iD4KLB2ADlIF>D2K&fjH19_f@K*N9wtr@Aq;F3
z4yny{Cy*#uSrA-)Mq(ufnRQ?;FNz{s6BsWj;Zor+*$T5Mir}E(l?+@153j`Gv#OAM
z*f!@SM4{r+GOe~8U<+0OLjY+lEN0-+aS#M-ssXsd_JgfCJFGdgt>7u_qwwsqwEX)r
zOD77}=WQ+T5?u^^#~=th>znVEDR@bcWl=Zk0h<<}LL_^RB4ALZ-DtD=YhaurFd7Et
zLJ*Hdxbq}d<X3-=)N}TtqB06}-~x|e<(T}c^TKMN;&G*A4of-UXW)uE;ju8zump@%
z3WqAs01)8#g9l5roTWZzw?ZVx7i1OegMR9ey`pew`M(gFKUBxX^1nnS%lH4F?5mWh
z9OQqgQX=ii|J?yd1q4^Xrg(xP;vlEPBe8fQ!or0!FcJ=@Xk0h}7)A0~K#4%Zx)LN@
za&87Hfgloa3+JRv7+`D!9|hx4u(cHx;3ZuSO@fNaFkBWPcp^oH%0S!~7lS<6S|%L@
zm+(=NgA*(hVX?TN9A;=L3kM010BJawg;|E-OUg$>1sQ^LxULSCo+pDQK=EN{0tTr!
zE(E!dVc>I)F>KOt1tFOzXlCFtu~?8)4g=GQ1*-vVGZ_*J6wi=l6(UjIa)BYQwhLWb
z2TKY(#TksHTgysFIt?}`bRmWC$l=_`l2LF)m#E_6DKJ5CM1eoA6f6ZU>?MbV2@rQ6
z%Wy=Owo$OvZ8jHdC>VjmwLrh{dPY6Yq^J-~Gr|J$yyw|4Y<LLN4z6TWo(0as8QEQG
zc{X3Lfb8;uiIDJP2|mTru-<2KH9Or}t={Pz4Ndv~mHvMAaDn_E2F-qRGDWZ?ab-RR
zT-yClA(u;45`{##|D)2L??2rG7xO<Rbl^0|Bx#}<q&ZL`7&cqUMK4Fk#rzLyKk&a?
zE>lXSGAZ!COaZ!qp8DS%Fzj|DNYcV>A_`GC9ODc9IgP~$Fk%TTM3Hn17Kh<dNqIRE
z0R^U+h$r~$Ij5mDPE}I5IM!1d{_nxX;$L_XV~ISGINv2)X7CqXJpPq(#hLgQz5s&w
zm&+t*PyBZWbj3f2{$bECxSpU17URMF7#184cR3zD8|#2d$Pu-*dRw#J;ecw#g5ZbK
z1j~U|v#vy1DJdB{2CCHvTn>UuFf`~!L0``^I71ciW1!;NGbkO#V=&lPKE>6Q7}O@S
z+o3Ok;{+dNLUkn$y;JY-fQkIMYsM*z17vo+!=kq8t*$fk9Ft;kqOL@v);8*`y1W7l
z!EsoGfH?x#2Cgo3CBRynqhw6JMbGoqfG!^v+GRd}X~3Cy9HT=cIXDG+XV?l_cM9D6
zVQ??uq72K|B3&}U@{n`w7v9fX?-;0SbAD>yUJ6`R{GUArVu|!J(!fRHU#3zh|6BZn
zUQpE&|J?zXAOFMhpLW9xMKKwYj({iu(UZYAIMT%&g3}}h$WkeS#+eWSi{mg7BGL%Q
zhlH<gwJ^vTXAfAuqxgO=0Yj-&h{&@G$;JS~li_@S0tJZ(NsED9gh~Oc6pC|YL%t@p
zI-=HQ(h>C;AaqqJqdwGP3ncXRNGsZi__&<Zh$Q&xbW75wmo!FwK3kL)K`o-7E(2~M
zG;bzam03}UuZml+aBDU0pmQ!1v!@tGHtA|18o5L)X!R3Bl@xK4RAVrhhEQ(`(HOI7
z6-v2tO-L1GG+JYFA4vxy;Y2QiXI*wjPMcCFT{+&WvQSNuq$eITdg6h23uJ6arL#0i
zdbB2wHxSYrLNd<oi)X6sLAKGNQ4*ZjYeuT{2|3S1JxNuP@tLcXey@*6Iv~atR@stt
zTw5>Kctkosr^k~bO}$p)Y4O#&J?0o`^(m90pj;D_C>7zb+njY6LrB78mAWCdA=GM)
zX&mZ+yV{Kh-43rSp494^(k4RVmxRd#!>d9ziCq!ag;OY6okU$;4$;Ii?z9FHIU3Ri
zM8?>OYELlAM0qV8Yw}`QL=;zwB8`qL9*8DeEq*N;us7CIsTRy>PibSM%q~G7R%Btx
zDxHdL&`JXiqYF{SB2m`i&XJZTwNdK{r>l9c)ryB@W>n-PGrX7L3{ejqwHWn~-9nSN
z0<Wj^GHX~NO(Qyo(XMO_=nN#SbT_6PPDOpIMN&^h>}qF=T}8%yUJ8xU1ZtH*9yt|l
z%BGVkn@9%y5<t_4gs>(VByO&kwmO{E2E>)NDJ+?UTjleZCHg>yO=p5ZQW9k$ucV5x
z2NOz)mbWMjdIm9C)dok#S?$ot>6WU-Mjc7D7>x-hDT*L5EN+czP_M7r63S3GM8=pH
zmC|~m_AsVQg|Vt!m5Fv%D^iTv9Wk_ottzcu+Qg{Ck|xxx@FkIe$eSa*h^g9>gZx@~
zE6zw#iHzQ8tx5+?5{a%Qrz8U&zblh8(b1$igqj;nt|o6BtqP@_^-T?UHcDg>#0hCU
zs46SbHKHyRX{gfW8vH6xizd}Xm|1D7t4XDxNw-g?k4Sx*ByJBl=_ZZRh%``WB1=Ms
zh!x26#$&E%$P$REm1?41?yE-C7^e5R{8YFlZP4nY30uhPj$4CjyThV%nVU>HQj>w4
zA=0UqVf7IkE@jfuoLOXa;z7(WH#sz^KukpjqE<>qiZrQ~xFp<!N_ZEI>*;W7kk3IH
zQN+<$tqdp-IY&z}=2VM2<M#Mc!3M^Sr`_fzBETvYG$mIwpc+5xVbcLNQKgkekgx$l
zNU4mrcnH1Tq|H`q!-gDBp=zzntB{EreFm$kJ`7H~&SYjHev>HO%1Z)-Mx#y!t0Oi@
zV|S)7OH`W?sT_(XQ_|qzk+`Oj%$SwgDB|Zlj+oIBQtL^p*NFve21;WIsDfHGQEhfY
zVONVgnKOsY;ZQWtMB!wsEN<t5HY}{MHzcYQL1&H$%eAhA4T}Ucns7?3(s}FwJ`t|A
zLA1q6OQR-pOs%ei>Hz$hjl>VF|FSDr|Ifew%M|(gFYT%S-2y%L|DQKpEdDRho<(w(
zspBI5|C1?D<=OAQa)m^=|I1NDPyBZW+%}_Mzdd^>bJ11b_jJ+6JN?X#s$X35>6eqC
zUw<<GmMvH>_P#q-4Lb0}+PjX78S;!qVW8GMF#GY3pMH)S-d^`For6E^xcTtur=BOK
zV{hCR3CbRAdv3+sZHIQbK7VqQVf&=Lhqg|?=3vreYun#8?+^O;=4BOgWysSzKbn3`
z#gxeh6)~9`^>f}?$5*|#Zr!$3zxBvBb<W;8?9{<Shrd~|>ZJSY7dueB)3f`g4X&@3
zFH_aKA4%C>-v3PW^bp(j=Z2mL4BYnCO#aBXd-qNJc+dim|B>D1n;%9q?vrn>_(Q<G
zckvW{GM_!VWa1Z3Oi6#<l!-rc9Cz<)Td=-m@91YIdWckA`Jyk@EDD@f&0O8|xO<HF
z*pAN*?^|O$`QS%mRIh9~v3QDY%i<+Acbn?HQ%65Lx_|J9h3R?ApF4JZ_}rx*oO*D<
z{5$-w_z#W^Z~6VUuTOonpnc!TWgqXHp6=Mc%`0Cpa_n0tzF2R(X24BHk1mKBYK~9N
zJiBCog+6kKv&`pL#6E2QB#ms!Y`aN5VPf{gqRsxnljiLB;K+i+u~++d4t}Zs)gJx@
zL$A)2aqmX);wOf7+%<GOf9?DO`+hUDBdK4q_C9*g)ifobb`*84ykE3*m509yP2e-=
z>+Yp*9i&-#KeDvHDL*xnUN~^)^S|gVkT2|#G&Hc$azL~8`LUg<^i$29_nh22=;bp4
z?j5-E?#jET%`KuAM)YXnDzuXRXvNZ({)VJ(>9f60^x8WfY7|W5+HGt(5O&*=H{N_o
z<K6kVW!j4?U&v35ue`<g(v85Z8FTjs*4;Q`*`C#N%m){a1+rF$8fKlkcki6z(|4?1
zaD2x2Mf*dC{Oeo?=JoC<-ZSVOXB$}7%yqNp$@&6Y%xz;#ZCAC=h4ze?Hh$ayC0M(3
zgfsUaLx5=MpZ;}zsDk(Q>g<!a_P~%_LH34s@-&-PHnhF(E<#@nH8iTG<+%cj)MZOT
zr$(GI(JKx%{G~&@bm{KU&4G2Ue=O+TaW7B}-Cj`b>DMc-`Z`nFU2je3o4Das@#B5T
zm0-mi-r4kPMV?6U;zR9gyKtY+5^xWfKRdM#y=V0w&1A19i(ZEItX?gCvhM>0lz3X{
zoVi<XqldoyE<*2`2WY3=Xxs_x>(!|V^eWD;=3U4LL{ILV9OzX(d+A?`=-wT_UEKS6
zu$#E-sexUB0AIre`2@SiK??!fWMR}hzk<cRAM(+6PVI;vZdlRr)g&vJaFnGAJ4W^`
zE$o;GzN@f5y~-atUoHRd!^x8N^~;3KH#WX2Y@-1U3@d$Y>CleP^kCFSFktD%C;DGk
zIB#H-UqVT4e)XMSY1ST`zTuPVSF1-a{NQlxkB?2b-=x{_EiE0f?DjpUYf7%Gdv5TG
zZ>~~?i#Lt>x9MNMIQWLQuAk%A3@BNJoc`54{?`<Z2gt|7tNXlgH!<g(8?OFr{nS_2
zjKcmr{P>1XmjK#_Cd@Q``RUdjn~pXAWv1t5|100THI=$PmfAXTXa@i4Z?aX-wr`(1
z8=3S~;;Z9_-`@D%%j?%H|F&)2e;%1@PVfB3qMD5h<{nS)esaZ>hue?+^6t&q@tt#@
zI5?&svRjVK*#2nPl5TyVW{tn@Ph%$5)}AQV+B2h`U;b04?kfTptN$#+Vc%5+v9wZp
zw%xmA9shIvm&;YC@c&;Ks**`ma;a44|G`zDr~Y>bOeiXvR`mCNXyNVF0qdt<_x|6*
z4-WXvt#6*1B;P#s;F295wkH)81Gm-;`F7U6*I%FV`Mh^`t$B2LdiMwkvh&cK`Mxh}
zo?hPnv8``ghKz1YeEi|SeYM-0XGk+ot$h8n{^J(!7_w!}>wjLM?EA(NV)US`L$~M#
z_fBYkx4hqgnx!WLH~e+50e|nv*khwK15zjIr@pkS>ew3#Z<=&HJ+}R^(X)?2^55ON
zttETf{K^2WV^4g_jpWM6fXz$pd!m+)u1$XX#bdvmXx`<O+}~dIczxU6Umad98ZyYT
z^C-RPv~BdMA$v}eA51KL|M_dtxrS$rqi?^b&&s<u>n0pDszf*J>s(~JUDWU9_q5ta
z!uwWj+qj~&<<+C}T(gcBEm*(#ZQ`}xWB9DHTXyx6%{l&G?EM8;l}-034$~zeN+YlV
z>F)0CZUl)<cXxM4cY{bsBOoCHf&xl+hje!d@@=qv@;&GMpL3n-``wq%<-TXuthHv%
z%9*)+_;Jl+$F)n;^3+;lZ3O61WBtW7@~ZZ>R}Hy%)5dr08$8hN0X#c7RWiap1hsYG
zLJXn8DB0l&D3Ls2<5T{8F^($%Q0hxy1K*2%9$bm92Ya-y*A`*cT2J<---vT>YGMCb
zm{&R$;pFI=T*rsxI0U`+8Sv5}#eIIB`-#O&$(07a@AHVkVOrjzDK?`U6+s%s>Mp`1
zti*Z3;|d%F3|?Hh>LtIt1IW&uEFUcyjx0vzhVBi^YV2QgqTbdM<gvSZO#@h~B_<%&
z!<XMDNGLYyID7h}$firig}Ahj6a6y*5c&m9?xFO#e|Wf?x1pI5Y6-OOj?P+y%;M6)
z`trSKElNZI1<v9uz&_j~2XH+NbQ`w`3;QVUHE5{m)g=|o*U}g&Fz}Kj2(3V<kJhML
zA(i(7X-0v~1yNa)@CYA)I&CjBo`awGy^^w}JN-Bq$J;h$Q{Qd_=M+7Y>#x-MIA*Ee
zxBe{=LYbNArpZq6Sqw0Mox{efR~oYKQ~FeAxKdpvSRea39cuM5v~AHw{aGqUyuWQ2
z8-&5N<^WFBcu}jzvo4Rl7<eKiINERmR3yhtfLUn9z(jTU3hN%F9dxLYg2RWpVD{2y
z>`U@}iRL0)f*v$NJI?~ur4^=wG5U3}rl>;s0z1mA<b*G%+d521$jD)Il9&7x=f!uq
z$x1X`XM&E%QVrTPTp7K<SxAlh!{>wa-5_l#*zERt93#_5Vv`3FDkp)n(gHFNnFEF{
zZ#jcY8U%%iIThIJDn69~CH7c~Pj|g62Gm1`;m)_myC>|mKUND-!M{A%m!;1}y;4zk
zB#zpAPnj>P{PE%pnK0*?o7gnq2yQ>>Md3Xji@~!#8ips_>aO+08S6vLA40_5j3$CZ
znp~$ALlPTz))8kv%M(+7p*0AA&4@_EZ44OkE#@t!e!TOpZHskNsTc+lM!1C?e|NEv
z+|7&y__8bAX>XO8@r?r7akTLw)7^V{$xL%gUEsD%&HB`vwBa%Ln)bwcHafu}!qGQF
zx0VM8=M^U5_98dfv?8n?y*|J$&%0XKTU_;;gd8su$gom=<@N|6+G=S9d);$2enQa+
z3+k*&EUkBd_?e8ylReMokva`Qoe^OlNG+Zj+|g4O+SHv<%8^#*BR6)#4CrDezD!t3
zA(Zx`@Xr*+?;TSQbc?!qh-KAy*<XFyzH;~o=QUczbGjtraVBrcZX`)h?go{2(&V(A
zCY(cn(M1rsJsZDy?RPd_v6O?MOc9a&p|_<u1orvsJ5;@XI*p;+3lgpbm5iSoR{CZr
z1G_8$3E&UOT{xX*pB2>cO*zhUC$M!Y#|==b*xF?n!7}>SiFcWgBKTT(-V^ESZlvU0
z=S#`A9;Wty$-uQ4U$)(C^LT`3C^6|{9#!w*V__Zr2<!|gP2Yl+KUk#ZZZ<aF(DSY(
zNGt}Z=)0N6&T!>UJ9f$M2+u8SAJ|yr+Yy2}BqM~n(kJLM`gUlZcUvwxRg#d^`a9r1
zu{O_U{fwiVWgURXs;L7zDn7O=%Q^IN35fa7KLd-2_29YbiR0Vwwo43a2adca=g<*m
zI|?h#^BvvvBn$39#!0YG8PIPA+mO~-Q9=*w!8spBFNiMKB`~EMA{`(fQ170mCXdyF
ziKP;Qo$5*@y2;_AtvwWE`NS?YKzcl88rO+h9S<#(FP9>!CT@oP62&3mI9|xW;mJKe
zY*X3?+)zjOfi=00(i5rS_uB>!0#h**K8YJmk-khJ+zrSk9Bf+D$de#6nQ7P(OGzw7
zTH8N(r&MyLe8_XZ@a`7UNiO!$VPJZT+u|x_AQ=U+QJB7lgLX6N>08$<x87RY4|rsv
z{;foV8pK^h1j5Nqssxf*b+U*f)JL8)f$I*Q7iI!E02m5i?QqIw39GSHZjmT#QL5QG
zqCNSiEXR2JLHUpPx%^&fRYl3W_bHw`8>8uok-KK_b}3uXyj^f&7@;820vC92*5_f<
z{Xio*r*VG1|1NSfIf5VA9h7JDj8Uvz=6ix}oeNaNh?E-g&#CeVhNl9_m06|8<yyy^
zs&dQNK03;Dp};($fl8o=+i)2_*|0d<q_i;r8<i#(P*!=yFb+PM^1*Ii@3~IU*(!g3
zoPbGS%l++xG`1~uH^%@E4N4qDcoO-7>g}xa`;#Rh9rdtQcoC;_r|i8Go35>eYD6Cl
z-@|`+^=LzsQgvQhrSZx%UjO->#2kvuyjmW9UCeH`BA;|<c7WYGuaweF3+~iMM~cdW
zX2n6T5~N($8tcvHLf5&vY?Y{M5)P_L8#riJPz7F>*jX(b(7z{eea^$sKy?7ayV=c@
zz8xEb;3}<Zz8NPzk8<x5KXidJ)7=zu^aty(>yn&9v#h~B?^jA@vZg(3D#Y)&A%A{C
zTX)5X{h@$Oq<>~UVsPH_q3T<SnyQ#Ok~ni6UD3A>1>X5iA~_ldd@R$^9{0(|^$ULS
z0O#o*(LK+#<CAL<s-&lw4Ii0w!RNMIR_Bn!5yEF29U8Zfk3TkgNv3rMKrWKzy&ar3
zUJy%snC&c7@WTJC4y3TORVmYhRPG?RB^epk;yRPC!3Hr`<JaexvJk7HtSsRNy23JA
zY`Fq=g0??+K9HZaR<tt-=Dd#f#lXB4WrMH56-?!LT%gYCGw5tS_+iF-a>&RPePL;p
zOMF2R#b}o3)eP<11%f$q-ND=>;`mO6e5Qv`k!`*d=^GI4>cK5%itj}F@HUe=9ea%G
zO9?+{d9lCtuP=)<ILW~>exxh%YP?A{?#;QYK89pA7YmU4-3#75+``LZA$ti#L9}~f
zp*VQxs_@VSDT$+@<eM3-cv=T^y<3zDo$*jO1-brF&4#bH#2ws^pBOEKNc&CsvOrq7
zlRUfNn#D?sj4LrkA$FJbe*Ki~UK~`{{)fR$x|p0gVd^)cuk>vnxRYQR$@njB_8=Jc
z%o`QK%5xzL!1Y-z7}}-;OgZ{a%U29qXVH8#;g6o#hH&khpK3p`KD4(3Ule56?j;Oz
zW%8q-N9>bSk5GywPajd8deux@jT8*t&>lwp;8noo*nMURF>KT8ENTfjR+Ad}@y$FT
z`-9UU%NePlOjEED7aC*?%mh-%@{Q`teOh6RvD#sYw3iTGt$`N}VXS+Dfaz)4S&S<(
z<Y9IvcIb()!C_vnk5Be47UF^9QK2za_tF$HiS({hbGG+OcO4(}@KfERcdjSZH1~J2
zC0Tr|Rf1Nkd*X^AE>V&t@&=!%jqlF#8WL|-Epe)3>@G}2{YX@Z_9aD9HOy>|!6@a7
zw-YgcM8mjfE?%@IiBY{E=Tk;_?vR&V;S0D{MXB>ajb#;Q$^poH@FZGRihNU-KC~3?
zBgw3TMGSnUf;v$=UiBVD3P<p~&uPDS+;I79zcD-XJ*yrKZxT~pLY{+a8G;vSscoz@
z!InSWdtd_4_O9P5(DhzH;6}iO#mw6zd8KT|j{d4=1$*yTswc`9AYr%T;W(}>i`Bf0
zd)_QvKFoMsOdr=a-?ul%*B}Y5uu9c(hsBb#E|=5S5Jj7uYS0RxsUCqhFdT`1w@85}
z!N0&Pf`zClz(X2dIJKslED95odZ+^Mdu}gI<NRiG;dENxTeOcW>7Bc)462_O;$sF6
z`_DU;)M995SyiyAxJ&Gmu?z#u>(psIoe%F!&XD0YA#t3cW?+wflFZ<S-U18KTPwN@
zgyg@^Z<3YcZG&s;+1yRGP(v%jdRa(b?}=;uIO2j=HKk4;zG*f!>@_p@3tTw-SGds6
z_C?TWL!&=tMmjsNn@57#w}b<<PHXN3oFk3x=<dZHq07@1;rMLnD6pVbkP(=4N{Uhy
z#~m@OJU2w*BA*SLT0mlPFwY~5Q&iGpdJEYXvnbd{`X<r2+)8S1kv4~Qb|<lzPaQei
z<pAm8<*{7PY196w9R&)K90kt<KjIL~+Cx}->j*Dqbh5>J?G978Oh<)vyxZ{ozE2%I
zhR=t^5vV@|RV&(e3XT%b&7Qr?JhiR41Mb+}gKjO4E6^MwUAYh&P<YZrC!d&MV(}JJ
zW{GB`en3E*D(2OzHYtAPBORsp8ZgL*A3g*$gI5oPb?5A&eULd)rn;Zm)RnB>Ny<bZ
zv0uXA8*Z5Og4*eBDz<h6+z_0gf<lmt)Z@>I7auE8`t^NyHR10U=LoVhY7NFoLM`Oz
z_>|`k9IC#>yV|zhoO>s4Iu#EK%~+0nYUvxR)tlV8MCQxpCOhffjnh4@9uoVei3+w4
znsbAy#!+1ax#0M!$gfXHS}{vAXbe$`WsXcZl?3lhc$#Kt!(M70eji?)qg{ZCGol_w
z9PvK2)`M3(SSJc{1ZVD3^%ZT3A6^9l76H8?WVt6bV;`ty3=T4S>~ON*_+zcIWry5Z
zm%Ti!cnpSWcX!$76@J0=2)#R%co|~PO6w{ZghK#4UjOoonx)!yxK4z1j_oSM^4hH`
z5!B#<F-1oImCW3u5*@rpB6zkbEN(-70JhLGjbc^9HKP|SJgtzIr!~UB^a&pm`63~o
z8SecLeG~+}%a5P2Fi0_pUUF!7-*w^*3c$^GLi6>;T4NY_ooyj1?fN=Q?0l}a{*@0$
z%Zb2yO8VF%6rQ$`!WTgtn~@jFkcRC~1TVV19KtX7Yupn3auzfl%-lu0JxoU<N{FA^
z<ubn*PCp%*99rpIaUOHsW8;h2ng~y^Qbqg#6-T5L=PZ&z6W+Tecjw3=Oe=%3vjK5}
zQzEJ3ZdFn>8B)`re1fzl>V?x1R$%)*yZn@3mRC>1toSagmmHaj)FNXcYIMCnX}qFb
z&3y%FPsIv&{2)q0E~>l{Ho!PqDzHVxL!L=WmiZpl_4|Y^Qj+79&R&XsXL~hzCWA^#
z+$()^73wuW@bgg-@U_`XnC&?(g=TX1#)J_|qAe^j6PplapH}>9!<F?diy#9cWgob*
zbZ&E}9+LUF@waz+UmPz@nfQiZ*H#yCMHt6m9#y#KgbX~N#>2uyv0;F`1Ka*S^2r#_
z8ARcf;e3dqXxSjooth}UUF0~&u3j~(2ih8}PD@Rs6!%087~?BjE(-^&5csv4HP&6%
z^@&t*W9|$-Lp?LizQ4I7G(Bp-;yZ<Z1aTG-vs_nWPc0bTArdUqJ`*!fql6hjCaT$~
zpESxu&;ef2(jV0=!dzZ)k(jd;ygu)DU@F2;k5gWD`l=tSH>11q+#|MB=d|M;EjS-s
z4&5Om`>+?+xCce>QhMc_Q2_#MZFf}g-ts4&WC7_2mS_BRbhcm^!(uPN4_hQsEcTEy
zDfRVSho$F(d9Kdhc)f%usJiDY9Q-sY<zNq|J;q?$<UxBA6?(5tke^(G({ijH>newh
z_Ef9?$Si+u{k})1HWKlAhiYd5a(4>$28GvNXSKQj){{U}dX2W3oX-JQ&3C-z182xj
zz!=hDDSIU-P~5tO{I3W<w%46<%c{Yv!6Y`h^IP-KZk5N=AxC!WZ}BxAhcy#N&H)1o
zniTuV?rNVjerSo*@DzOGs=*3`_OjY9U9J8^R#1a-j8<bNcE>%JSzz=d{#j71_G9J^
zJTc_pDQK8r7Ti=NU52)ao%13_pl;Kpa!B`3XUm0O9l48wdH&0^l!)H>q?cF0V@4KC
zA;e3AlT(sNhccVw8~*tp7T-MUfZ7yG!5jxKntdY^EIgmjw`Y<)UrDPKM|Ix~J&^JU
zcXI+jGlltf4Yo$mrtjmK`bTX{lPQ?HMEH%#k|f0tKD6iTXQjE}#Pw8n_ga)%3!duq
zkWg$jR)!X#YiAU;j^VtdeWX(cj}r@KMN7+w`lR99P(Vu^9^2z|#6D82H{}BgeyL6B
zT|TsNwu}3=)ET)nJtC$LROAi^u|C=CYt%c<5pCqBzt`lM>&CE*+@2a=o0G&EFf~&_
ztX9l?B~D?GqD;usZcc3({@GeX@woIe8aN4LxRz;d?ntUYMH)`pP*FHj%ZnV|7;sI7
zUaF5^1(~bQ-W}et?vr_16=mWu-Sgl&FsnD`!TepGzOdtGFWVKi4s<TU5ubWLGCA$2
z(J@B2S{2E0RNiMmP>UBXElRTGMdY!nVex!Lx1;U(RKg2!B9!ESLp`S`v>_GNzd$ZJ
zp@njZ`L#5^S<{xCOT6;k&21@B{3S*-bLCt-sE>2jn-l9yo9!N-_>dK=b40!P*}{S)
zeS05Uf3D0Hd_NLoqx9irOyn?xj5vv*TJ)XJ5X4Ut>|CY3gnhPC5_()+`O5@t1k~0N
zyZ31(Acn{sAp2LrdDfnK#UD<u;7|Bkw=O?@22CyBCtmYpkHh#FqT$K7G9*)$9L|Sy
zbju^H$TNb&W5+0Fw8}+yCZt&MYjo=Nn%=qOSQ74CYyjTJPW41G^q9MuOWnh-84(sg
zOIyEUmw!Q$sCbtpj-_4S!C<yMkd{CXSLOvfP$45+#SbUyj({uqyPhXh=%&Ta%AZ%F
zl1H8-C*fnYn`n;E`8*j}TS1M6fM9~M!EtGy%%`e3;}j1Xp%JZVpT|YLcln7mI$>(U
z`qXn~h=}$9SHc4x_|ooc)UbUh34T(4R!0@hsSBs$`AW-hDL2O~!)l8Nyfn|a`!KR>
zw3AHfJyl_KSDYzA(k-wMQS6pXpA4-6HVmZ7J;_m#p97Rt<e2&7jl>4f&jKe|UR*6>
zb_qhKLkimNjNh-QQc6e78X|jHio6~XFoztC7E3|-s0Zm0RDafI-#*H;Zht<am*_4!
zclixpttR>l9owRqlj5y7qh6oXbjt^v1PF#vUQDO%SjO|#-*MXI*?{fjflYk%(FHMB
zbwcJv3?yLT1Id7$c6!FU`OZONB!d^(uPFD;tT!u8t8)yIoEgXN*Rtcq^0Fi&>ReXD
zy&b;4zaxy4uM||?SDIpCr8J*gDZM^EtgUOmaj9{y_FP)kAi{-0`#zHl(j<kW3R6D0
z0JxYRs=UP-_nZ0-bwn+)9f}3}%WxMQHptbuRFXZyt*Jy`%#V{QQlI>ZTrH8f6DVc3
z-36dqLODtjvnsI;k-U=K?zvq@Z<jpN!ZX(|b*3xuoAH1(dI^yz3y~C5zfTN7zQ1aj
zCCUD1he~(1IKI#JOgMh!^u?@SCMS8nJ+TnylL5RsF_Nq`K#0dZVCyB2#o&F*bnjEF
ztW<4*=BaS`m#WY9@vV6B3hqCjtcD@PW2lP!@H&a^@Q6(CD$Nlx>q1<zhdo5`UZx2O
zvrm@J+VZPM5J6gEHFY{PahR*Y=A3txZO%l6=w4VoJ1Gup&lXtFJf2uHJ`+_FhLmm4
z+sd80?-E5dL7K1`6jj95Fr|=rY|OQtF~Q`;o%@os;`nXc_&{%9>E{L5gZwA>o?Q3X
z*JWUhPAE4?hazfEo%rU^m!7#vO0n^Hae98*gGsmez$3&%Hksv!)tStChw{L*e(>?j
zLufJdxSY07eEQ>jiJCmNFo<j5-kv!8XIx;|7>ZJtI^LLasEMT6+LA<XdCQ7dkqQQk
zRK-jFF*$s;#e+@hSD0w;c4_Z=q-hd*N?qnVGq(f`Yq%!Pj<6vM!e^BoXNnN&>^IxV
zO`wo4ua?fQYVY9;?=(N&zarUKB}?O%$)vb{ic*qPrE5ea9SKR#N4@b@zU;{JnTEIZ
zQIf;CunuCCw(6>AT*v$LNilbvNby$vqz)qDfm}!WvY<otX)e@E^w31oR27(BI+hkg
z^D-tsbK_IZ?w9^GJ@0bbZ1fQYPKtBZ__3?aR<<JFv7m+w#OIN2+t5Yr?;a1SJx!JG
zZFk>r#!AihBs_SRjT%IKMRzT@sqorqG0GL}wZ@y;tHP+7!V=6-IC+X5nDI~Q_u0c!
zo9&xB_?QoH4ALMZqdnRU)wJeMX7{#RkGUUWSp@7-*iF6~0>h#u!FWT6gQpwGo5e*I
z*wn4?*5*m_A-cRjlzMAdk}VgQCFjc~AU$Phayjb`4tR|nZCD+;?i_5UYEYw`|5KO&
zYj8~zbOD8ns)11O;5=_>9ffsi6d|H9diWw<2tQi%3y1D^OYdL1Qp%Ipwku<&yzRb-
z<1>`VP+*`VjDN!F<|(lkcZIyq)X+e^7c{{a7(#v}<L_Ko>e~`)!LXjKYfc<SuNfIl
zCk+lsW&tr}mh;L(Q(t%PF^RI65L~@8=FzGw9+pd6yanf2CT;a(+NEGeMKGKl<q>^a
z>oqe^(;M*^k~=QU(u~9`MZCt_=XZB}oW=d-WVSzEb#(I|*%u#iL$B`~$vNLA;^60h
zjFD$sjfnl=p*(bN!2r}JDiz&|^wIIIiejnFRN@Vj9!-<e6~7SY_<i*Jv5!oIhhSx3
zUK_ASS005BJjx5!S)6ySgQ86-lTW!**$pq_V~g7cDlb|%YrPrlT+>U5L!*TG!EF41
zsJy@h>k+1|G(pKQ=F_W=fkNCf8*yjr`kLp9uas8r-$R4U;?H*Y7+a<(pi9VNH#j&-
zm2l#2@FHs?xor(|men*&i{O;wJUPfAYYK+>lfxL8qs~Ytnly@`-$qX~ijC-}6ET}>
zO^6+^W-2D3Sh@&*15;;VdJbSGI4Rpxf>!O8yJIds;;C0FL2_WoEX{tLg)@1Qefepl
za44E>TD!@j?s_m&*Ag4T=|+v4cT)&74W!l7R`PP%-`=^)Crv5NvK(hP?de}&(*t9;
zUJX8!juHw*96Irt#=wvl5>i%+$9i(ep5*LgU)*bGkypVSZf)!l$q3fS9DT_;_Wld$
zO;HR}{PdSEM;^jb9AX^~B1F^P3ENTg2CEH<)lt!%&tn}*EAa8WtC{7&@9U+86(t2G
zrGR{$$M?M7bWJTfmetWlHzWd)e5_OD!|O-JjB$MmSp!kCvjJ1cmM&v~e6<@0{Aw)%
zB3r#s=v@l1nph4}>6b+xRV$aME3gk4Ya^5=mKT?@Nr&k??uV}-R4BM{nMB=hgbsA^
zLmtM|YU3=Lr3sVtyj+cw59e)?_ULd_JS|=ce$$80f6d4tM`}`;y1bW(?D0fMx*M?K
zGpy95SnM_yMMkgY`X2cO#r3+fOtBP8p!#8B&r6F2s576p+B_8QdFkgd@*EX7{>+|<
zwa#bz4&x__@Xe$96_v3;PS_o7on-m%WEy#`w&T(3)jay&Q3>JUYk=!M%4HiwxH?VU
zwpw_=B|kK=*x-o<12czW<iEn6h2|qX!uFzBWo<UO4bkRp4L|kMWuLne({KTgvF_-5
zWnMOV3d_l1A$LSJ8!MRd)al5g6E9hQnnI?jjV0;}cpCB4d2i5=m)m3T!R$*E5ueM@
zN6+;ZD9qsF47@8H(C*zY-1#8M{63c*(k=L)W9*$jc{63q0Q1$H23Ip172k&rwX1IO
z+7y>KTF&uLRLQ0}9J(!0+^t-Q2cJ=w=$Z-pQMGkpq43_1v+Y`4UV|wj1Ild~(u2B<
z#HLW|0|;jV)&wl!yc`KeRCrzn=MhT13KB)G(?oZG-3y@I!dTCIfi+62`>0c$7mv|b
zU<_F^@Gk0Z#{I{x-9;BTctTOurn1i9<omOjUa|`6>7Fddcs#xkUET4_`!D0J`bFV}
zc}6%etMb#TJ(vPo#Uq}iJCly1M^F#aGP)a6welDFN}0{2s-U_lY1mn6Lu=}6syo8H
zYqhC^n`szv=FPH%O+A3s%Xyzb3wNz8TNV;NpRYu_Yc2UM7-e#pAjzr>euf&m#Xy{Z
z8edp+1{Ijoms1|Hda{5V6jBFM#OU&IE`~FgET_?7?W3<EnVKEyb!S};RsP*i?j|_|
z5RlH&en$Zmb8L+;HHjUa%K1QbM67vhin)l=A$D-R7aY_*jPRJlqd_Hg=P04L(DQjM
zc73f6VWSM4u{RsAi(K{Hbg^dqHSD}KpYUq?2BjzZz~q=JbvxI`-zzwm7>qF5O3!_N
zIL3`cg|mOny_R8+J+2WaseuU*y-W0-@)eS$2SnJTRPDEMFdL{qO7<A6W(mqsR=)$}
zY5gPf=)l=IrM~H7d)pnJH~3NJZ6Uy|ozqd^{4Ccon6L<|_;|ZGhX$1s`$PS5kAMvt
zbY~~FHQ{Dw2N6$gftj8ozA-24-O!b!fxM68>*Z^AL%l5DmZNZkz4J>Pv5@oSx`Pl5
zuv8;we0ERc^Hn;ojB5bZ`v^<~$*cj0QhiDPwq8v1@N!BE@pCn*)9M$vT<RRp#EY0e
z>k$P!N1$Q2$W)<!|8^6f@8z2zBYw7ak_&MdEU$@C&F;95L3Mdg3hAu3c@Un~hV$*0
ztu63E%t@B}NXICapJoG}KW`l{Xjs2S``pUlc*0f4HrraJntL^Hr(cNCblS;Q-{Y0y
z=F>-D#h!{rb~8ojD=WQp8QP&t*{f)Ms#DAP`f7H%ujLRJmU|)|4#vFGs$&nw_lOY4
z)0-GbXqKvhsHv>~Wbk>KRwezR+(hETIU3U$N>g^zjZ)oyQnht-qon8NB>twe9yU{K
z;WWs_3ZW|@g@g-uBC0V~G|%6$8HrF}6xGIWysGU=cYB0DGELEg5gU%dHD@qsnli~K
znKBkk<65cqye{zo$rFww>M45pTPXN?wV*uCq7Kd1!k+RSCN1E=nau56sKvp-<`(Bc
zW^{?2^u$?_MbZAgR^hvusD|MzSAynIk7KNv{Q1~1&q^XxlJ6}W-kTg#5!1&@OUA8_
z@N&Ts0-{ppxSGz|BCCDCZgCh4(ymjz{-BG-c4;Id&<!qM`{2ORq86ecqG1oVc!pc6
zJyyFi;Q^RC!QHFFoVS}ck=2t$Nc4dVv~Lw<>1`5y5SQ}ocAi9`V7Pmf%b3W#)Lj&r
z8JCLp9jWph2d56nTBMi=qpb5?;Hx$k>zC+}=UAxmh(o~g3}US!v3<W8F)UlH3~oUZ
zvs(F}dj--t2Euii9AY%x|7uxsi)Eg>)%t$Qhr&`Lg0bz?hJ%mTVk!vR5DP<XU1$#%
za{}QE!5v$GT(zP|un$Q(?lW#bi-$=q+;%S-5{r}++_T2ZIEo#;16Vv8hXp!`J`<Ym
zvAE<%fZbj7sLPW+yl&s#)}u{coDsN--1Ibu=;(l07suZg#*c8Mg*?AY!?a1<Ex?M~
zNP_FpZbWZfAqnT46~=a(?_!Kyj%E>mTzg6~lo^Kd>_HhvNFp;7fvAf4e7cJfu=@B_
z3m(=BirCI_9Y346z|}%5%HY01iVp5~T(Zdo;=%gRBMlr5=avj?l%|P~;Dd#W@w!tU
zj4`<u#yU3B&(OHuJt=v%JHWc~si)O1cF|T*4H8q7FH=Hdo{}vO2+i5Qg+~}f_8`<N
zj3d7$;XtUheoH!<JS)8+sz5sXP#XGm8Jy;Xc_WETp;`FTx~`L1%&S*k&shUYxIaA#
zfFdy$8a>n(bX|T#i2acpt6B^{Yn<v1TyuCmbaar;+Zd1jIY;fwM1BTqa=W=Wwd8&X
zg4_~$zTW#g_h7{{fFc#?7f%kECuu`4dqdjgviZ^zEKD_*WLYNApWW+@8Z#0~s)`<`
z{#bZ^@%mWmoks=2BL;${M>7{O*B&Wtx#o&H!XKYpbC0qOr;_;vq`s=EP+dTe_{g6`
za<5aDVEZ)Z?OU=ucxf16$KpbVK&pk!;R%b(M43H3A3@sM7{d5-fz&G)8r}q9l(UbG
zkW=%mC`-BGs@{t#(^prI%8Yd}=61~(c7kQvvabZoxh2LUqSR0xLM3ilC8*2b=z;lO
zk^8JBo(n5uc<+_hEDkw(hDOF~_*LAeRWyJ^G}O)Lqzrf|BgA){3S0>KnBLM%SCfuG
zKK*9()BL;q$L%!YPjKkdie1a6M%=Ej%U@$STHroMtboThyb!cR>9}<2_ZsSdDQr;t
z>8?A~fK<Q`Sw#PMn<uUoc_dtvWT>6!I<a)C4;RMgqcz&)hC;q)VE}Z`taBeXhsLs0
zgL{ssQiRfPi_$Lr+QaPaiDi$^vVz`kqj=hIf%%MEM}FcvPxsw=6goAh8GNMo&Z%h=
z=MxEO1mXMRFipQV2MHg9Tu7arpA)h3ZeXtHd9&a|IrAv7K|9naTMVMSNNEeEr!3E}
zVRhLxOX1jb65-H<#sxfi$T>@mS^sFko4Bgy?HCQ4;Y7av<NK)|!PK2Cax`2ma$S9^
z<^~6g1*{!1-CMbvxiLVS942?kw-Mp@#|u~elic{km)@}sbrXdVrRu+|+DY?chDpnJ
zSyHW*d5l@P%b+q$2F~}SyY(>EJJIs->yYZr!DBSe_Gvla>)E-ymnR(Vbt+U(qXsiZ
zeIE?$Kl3E&_z=5(x8L#eb+qUT>?YN8C0=xkq!?f5>qZ3aIH8=W05JpgPwazR5J+RD
zC~pqh&3#Ah!q_HT_V|abCrKknHiw!sBc0@DGLW9M^(Y_Ti#x-_$b9Nvz0&P)X43_i
z=BblHp*r~par<d2lZY-Fq!9w5!{rVH%qmKfBr_rsSCU6y6=1SZO@-^4dt!DL2QTSv
z%Ej{IkE%>W`892{FGqtj^x<$6p6Urkpa)x`pm0rQPT9O(kgwMBjdt~>FA2vWDnRdi
z;Jv%A3AYj(x6Zc(E7(yio%x8ynxLbNhIR}3g1veS)|L~yx9uTIIqhD1SDw16{R@_b
z>A33jUWsvD>)4CDSWxN!96rKnm?$*fi5>0RUYRtAj2~7Rwj*WC*4)rar1pLJU<RnG
zUG-K)cga22evkSbB|l<GrABnFETmY8%d7SC9K0Kh@@q=B=1^MQ4FZm^!Tc17>!;Gs
z0&_HLMwjJLpQUZ~I8{47s`|L6>9_lNg;r{0HUb4;Ky;tUq)cdxOH2NMB-B{Dir#=P
z$tkHWxP0k_y5Hr42ZD(acPx=vnd2E&jjADSye`aC>s!JmTppey>s;@3Oa%yGR#u2O
zOsXcG?8vwfcX?%Fm7rRhbO+1VBJu@uB(goE%%*4<=odFY4_WDK-Mj{~%u_`gBaF5&
zef4I_oy&K>pJWDd&i%!36vkWpQdD4e`CQc5L$7uTi`pQs>AbQ^q36L_wUjK9EvmJd
zCLfRp-X8jk?`-YoyAZ1#qp!2h`r_^j&7M~ZFZ#1}7WqFJiKZQc9)W?-6J7Ut&LUW>
zC#S+k^f(q)o=E7LP|FCZFVEk~`hM^g;@S)ljyWi~*7iM_h0nrr`Gkxb3Bgv6@QC&G
zYD9RizMK9kg?gr*(NHRVdAPI|+t$!ZQ$EY-OJ+i7Sn=m+`%}nImO}t-Hr_hJ>Q{(p
zH2s@B(ZyC6vl|vUPHy5MkPjdOhmlFAXzu6iaFrPo8+F`kiaIDZ72Twva1P<2g<XAI
z8NXgsMHN*Rdys)wT8fK!MSyqiHEg;lcBeKg;In{2vdYmFd~=OlfH&tjzTsMq-|+D^
zQV*?F{d?!&IliJN6!$Z^2zfQQbY*6q1qe07k6fe{`DV&+kUcfGnH(XdEk`$gNM^5!
zCe4dC@7I=^hUQA@8a%P;PU~@EdK`f~hhwZ$PVcuI$5yW%wx))+B!yQtOfB=s*J@-n
zi~vcRN0`L2Lzm;6?k(DOf<g_!QeoOv3e=U~UEJzQj*Sj#vOC(H-1wS_kG4iar(bwD
zb{~bY1XCisn%T@qOL;R572aRQ(~YG#iqYV7kO#YC_!(hf=W3XAuwEdQQ5|kC4X+rU
zNTDROVC)4q`l83nI5wj-rcdYBCe7HP0+w~;9}ADxLPod_P)S=`w|TB9=|)~<4EB&z
z*tPON5Cw6q3pa5<pq_}sOIi<c^~ZXbaTJj?Ha_TlO=nUqqzh4JW5v_hF<X{-HEKIh
zv1`e?RQ4G;=mlYB^wqgr^a$HZp59nvbi}&t3(rr4(|Hkv%|_#@iFB1mcWNtE&#O!v
zwD&|Z&ezlpxPqC(IKviRviS%;*lM`{24b>x3>yDkyGrt&^4Jbf?DK<GRRX_VzR_)i
zIHI_Q{iUH|&byhlf$MfUZC1rEVUOO$AgrMzYpp`pA`Y>Eagt>THPk+nrh1*Ep{hiN
zJAXKfU63*mTH#8$buzn`Sx4tL^GIPpQ-XO9)@>(G0poqH#mKY3<eV2%Tw3Q(htV!r
z0#M8*K55%%sTFQpfS+bRA1aqL+u4><3C7Ycdgl5#51FRRo$?fjW^jIO>kFQ)lx8JT
zzmyvEp7eCkNN1mjbLf4>QU`~U?S7%KF{>dLI>$XK1&x<HyZLRhS~eYp_TgZ6__WAm
z#9jCIUMlIUVOMaT+o2xqCfN&xyh`9)2|AgHj6Kq0jeM1T&*W2D$a!V1-?3(({J2!*
z<;d{47D6YVNVbwe;)`0WQfhv)m%0|sS?0QY&=0ylz=Na2X*#yCW!Nq@1tjm+hPr(K
zf2S_c#rEzHp69Jw!`MBWHVqR(%$Cuo`Ead9AMj8Oi6S@PS6P%390!?wCqe)gXhW}p
z!8#Do#$fc@MNr(W@8$cU3{LVr#}vF;44P9*H6OfQ8)XOyS_~=c2Q)xk_gp!_R&nbX
zHR2s_;*F@FiLY=jyTaO(6P}sd5)9H`^wx=fMB=3ZujAfp9~GRB<9JzV+V5v(abDms
z?n{W9u30M7J|bt{rnR@9hU#0oW#N8D)F3cs6793t`~4S%=pqtMiP4d%QY^*SJ&#6@
zVs>HS%Ew3=E#mz5)Chd1vR-;$4v!JBO{t1QU-Ca^X*;{pI1a;kl4Hz;=Vme`zH!!A
zi3$d<9oKaLKD@JIw=6Xv+W@I$L`iwX7NAHwfKhT(AFL43&%Z!Wgf<?)uaK{M!X3&K
z{m>CQyz665*^0J_dY=fTVw@wJ-U~Gp2%)W~qq$i-+|L%|6q}~RQxJDn#PBw_xUo8w
z9efqwS1hdW`d&?YVm1;#AjKqp$e^h%b&9^UFX$_bJf`lvCnaPS`~g@Xt@k<s-U&Al
z{#yRQaCYqWbBifF(=g336*h9X1&=G4p#q8Wo!VVB=z<D!BNPOQPnCIcC+l002WVuq
z;rMc%FvU%X#(Jl*o6QAaj-2#HH40)#XJGe=eF~ecOIgnpP5hE%4>{r7u@j6bIzLc7
zbZ)MCG21@&`slvzmH?|P&4Z?({q$#K$Tn$_bh#5<kKBDFv{;ZV-p;4*J#E%fh_UYW
z`$)Xyn1+>2%G4vDzG&`nCe5X6)<g8-x`-Tdjz8lKktO(1N%C;^Zk*l*?91G@QJdIj
z_wv{JfqGD=&z>uxZ0aA6rw@h2JgQPDas^~EAwGAMQR&ns1%k1T7|9T9xaf-T@@f;>
z<;x#lg(=5hY;Mj7sZ2eoO+hzV&S2IlAw?9OLq^MlE8<J0uRmYBa3DhQCpUd0FUzf(
zCraK8oy)j(CX?HdDhRo#Hd<nB#~0jLio4$6!PE(p3oSP-trwH`lon<8xXj4zUN>6w
z5y`m!0mu8ItajTli!6NP(#OdqFR>n#vv&&C-Pr}sp?{3?vdYM(z*s>~eTw>~E|fAY
zZ-DgU=jHd43W=0UQ-~C<7iBs7_VKM;aL|6OR_5>n#)h^}3I=w{7p!7OwVFswdbxz*
znRjIDR?Kv(H6VB0lR~NHUQZ_UQe&YdKcwoGCJ!qDxb2f|M2Jb9E5D|wDO)cWHh9LV
zh3XQDA`lsh$}9e&kqI`+%Ql5LJ)W2Ax&<1!^jJH~(OrBv{Or|E%E$-1YnqXg!`+%b
z=DWi0*LF2OFn1rn*8TKp&k7q+3>B%~!3O5x#;Oaq;4??s%?`H)SZ;L3o#9@=>3-|L
z=1{xT`iSYPxCkc&uNO@sPscG7b?)?7cnF_XizdhgnDF<kwXrI4C;&7-%fD$gq$%C$
ztr$8yaVUj{+f9y3f(VrJSF07}%EgL4zFUg1d{z!mJ(Yli?cutd-RJ0PgTU_fb|z4}
zlFBb3;gON7eUvsCkL12Xf{Y>)f;oyyLZsx7X58DB4H#Idrm0@CZEA07q{$6_!<b~;
zY**H^M&a79au*xkuB6$p;JKEvNyZFp8m&hAu#R(bUk?hhN!&O7o>eOoYZ|NKqmgoi
z!;Cqtc3YXB<}zX#aqo?whPlt(!DI=Xr=O46eO_&(*j4>`93O|EAumJpjAWJXJ-VJh
z0r>06S75_cTWpwNnjO9!z;`x^4DRkdAznlN+n-$DLXX2q_ep3LB!D~08SP2nB)?kI
zj$VFZgKJp5jLz7B7U*pyc?Zl9=<{%ba**=9dM9x9^8H3?E`G#X1o0c-gbYI)eG@F$
zGi-jYN`Ju$l6ID>o(cx0!a(<O`aV@qGNfu#`Y!KjSoyg1Sr*YN0p8(2j-2x+w%`}j
zPw_dDSManv4CefxOP_H>Cp&wGEH@{ZDaEkx1VqS?%R36ZJ9bI5>z$Giob`DXqay5W
z98@<<t(@|Dz9cKDIA1qIFKhA*T&Djl6+H<@ioaM^@~g+2B<%)IC9+eO8ghf1bu}km
z09ncbdKk2o^h-O#^gLaCNk086Dloh#r72(#VGJMdn!cQ>zYDeF&R+E(&3*$bdcP=E
z8EX2?vi(`NQ_grYxFOX;<dm{{%^HYM89`|qwc3R26{V2pgA1;Pvk$4Kq-WmZ^yCng
zA2yGoXLA><Taq82Bm2jMq#*gxShY@~C?sbhRyFu!<R*%BF!j98O+{64AB}L>@_4OM
zWN?@-fAA)zN^b&#AUOCm7K&BT4OuRC)*9yNhnkvCq!65Uh{GVLC8C9L-_3RqT#i!!
z;mBlZnF}UjKAVU+vUG*dQ>q$0s}48}HEy!NM$42_PK{Auq6LR3%u!2n%=3;<wNqxL
zL?U=MJEHrD_ig>1{FfE`M={QZi9+cwP=O)Oxny7TamOEoK6h&{qn(n67>*;CDalN(
zy`-{DOvVhx{2(hdPSu147teu?SCWE9=y?9tyS<V-xGbLD_e0*{%O-r!wYNSf$^C7x
z^dzcX1!g1D$LSP(oqn)muoe278VIlv3w8kzKJ^BSm^{(vvg*2Y<_9#No+(9X@7D<?
zp7E$XA7Zp<dBBYlahhprh{qNiLn=G#Edb*c-oh9RR?Zydgn759oi1+~!%ey@y>dDn
z{*#VK1e=D4;W1<>_H_PWSTmE%)g7e>Y0V4wg$9p<xAa6tLBe7}j^P1X&vX*~EXXX4
zW4r<rvz=fqX7THTB39N71cP`7O$deron6lcJxqF8f!j?9N1;$X_}ZSwPs05km!c`u
zvsYjV4SQcPng={9OB{0sP{Xg2hNG9u@^{pcmcc8(cWZ@u7-o}U`ED652P>(;YK&&T
zL}7_8Gz98&SFnxiNF!hEe0~dN(5(Rnz&sBOA-co-AroyXMqgu#sg<LldpRc^iB*CP
zPJz_sZoPf?WLW3SejcWSFeCWh^vu{>#7qqH#X2<3^sxdX*AS(5P^E8=o0m898sM0c
zkSr;C8^gy!QHGWWcmt2Q5EX*%ESta+CSMBaf=x4Aqp~h-TaIVT;^A%Qy7oiM6rMI7
zuBSDj#T{Y=?g>*y_J798rx9Z(Nu(HSv+1dHl#hE`s`{Ysg7=P81UbB}&8*MTLVreX
zdQ;WWJ;6j%lhJj<AWYngu8QhhS7Mg1&&h5Mh3SI3@+t^5ekPo<qo}Q%3>wrmA6&aC
zro^^{bgP`tWpHg9_d;P0qR2k0FCUq)bf&3<6ig*y_t_It0~ukb+&LW(anJ-&))wC%
z@N*`fIn1shNxUrIFw=_+=$+QBMK<4N=$l55keZrvxGPq~9fc?&JON`BecZD&JCVN@
zBl=m>i0WvDLL%0_%EydMWJ8Dy2dF@3olgA0g$^=703)h|V^J0yzx=~`C1zS^3H7P1
zv}>7JSFB;neWz}n@qvvY1)H5b3KnfPVP!;LibYNgFDGo_DjulX;LN!6E?+r^xpvMm
z$~}dAHC#h>zTkPvS1LIBCHZh%h?bL$DSH9AM?yR~C)G}7Gwx5QavIh#P*x+I26TpX
z)8LrEqOM*@@-s#{p|ApmKGDrO6eU_IPBLtpBDr^_CA+^i7{3Q$lE9H}{7~vsL&){9
z%eAV2-O1iG+lb|>p0eVl+>-sF6q-CSu_wft;YfWV@C<<Y4b>;4yLcDRmE|M_2k9G#
z<3hZdGcL<3h40LKs)lv7K^jEU;O{Clee*Iei7i7CxiHg%cNKlfc{sQBdKYm`a5#=?
zAPCBx=)mGCBQ2FSzz96Vcg(7~?{k%^q;vBYL`g%PfHfOTlBF~)B=(2eJEWb{=cGq(
zhm6&BIE1(}811%v?u>cE5R&rK1B7h#ypixa^EI^USiq8rRc)f6lM(OhK#PAyQmxLq
zh&LxaSH4cyYCz7HMg^7LKwgszcwWka^rEUmY4g$Ha)fi|nvP7x=h>C>7Dn%h-`eFU
z;e#Eh#|dcPX=U6-g!ADOmWX0l-)>7`g1x_C&KtUbFnnh}Qd%xieXEM_6`MKw8x2pT
zL9dTpx(a9t<n~K4PRH&2I@&w00jJL!g$fGSt#ggh%O{{q@cW;Oh_OZ|@%c*zf~BCS
zpy4Qk+tg9V)KIiCtgLLRTdBe@mw7U3%EuQDpB}b3TMPE_axopT`;iRW!al2R8~$7{
z<W(B~-g2YlRmRc_NZfg;Tx=xxNB&f7zNl4<8^yIR3o|oTtP=B{Oxd$Db!)SEPV4Bh
zp`gcDzdJvp;v`D_RDYR0+#-RntD)jbryYJdLfltkQvVFc1VM+`I{*_VdO9w(tQne|
zo9v2&F$V2@z}lS|hX3C8fB)I{pa1!{pEu7xeE-Qp&v5hazcaG3u`shRu-^Xrk8G^}
z^Zn=lk>~dN&kP#oFK_?Z_n!;}`^W9ZJ-m~=8>rn+Ke{}ph^bb0-DSBF&!?!}-8O#>
z<x##_uiS%j8E@MY=eU@WOi;l7po?kX1J%oA?rpX_5+TWiRDqF{lsp24cNpGKI~^q~
zr(p{`nrS$qyz)7wJ+y(DPPzB}ElX>`?z$9|Iw_0m$9XXZJ(;&CfBB|x)mgX6=%pYD
z0gSha4VhG3xtt(g4WzyZCbb}dtxLnUL%&n&lXM~a`q{v{<6TQXVD3JTEv!XesGF3?
zs}N*8Y1HU(HrE8|JKkg0<C9Y0)WQ!fpTx-_eqM+-hP9rJI~*OjYDOmZtkN04U1G~K
z&LTuO955n<s&)j2*vJh$>)sM#W%y{-1I{qHn<{#cC`y@lDPLwkPIgvy@%(OC3gd2T
z$~_sB*p@2Q^JxsF4BHmG-W4U*<wwXFxa?#D=njkdL7ZOclN)wKVf^KK@0;dO^qFx-
z$rC(={k3SBV)7o_vpytgFgGYl6i_mMg61_NV{<iWl8ZiMbC<VJTNmMoV;nWD4&9R5
zla1i>g@!$w+n%`o+u~+^kqNAE!I#5!mN=g^c0%1NUYE{ZV$11%wCYc_8hEL$!%P@g
zTa#Du$o|7MQLFnV=Q9N#F+!(CVzF2Y@;AGC;5Jtr)vS&04Y|Zd);Dc0*mWu6@pgC#
zE>RzlfvxR?N!oMv6B`Yo>o<<v-+-tUZ%vn8sxOOs<8r0F@lfJ<ox2<&m31*ty<j4&
z5zt4o2=EfXeY6;ue7sex>E5Aj`MgHRIf0{3ns=hv{mMyb^$2R!E>{1uC{eFJZrbx#
zT}_Rxi|0+-Vp5EJ0`N5kX&&(@NS%CVovcWW{{FKO4>vE2(BL6YtJP;0c%7)pJTWE#
z_dO(JF7UiZxx!!c(;!K8_S=q87?*Y`6H)VQcsCAKbxa8$1i#Az3%Y|6RZ=UwYU~LK
zG5KNi^9Ki;h6$|&`|4LL*M^q55y62G(~3O}R!ok#xQta|(MJXkLTsQ|4IB#AfhFoF
zk6oKOv_|fd$=ixuQKX1(yp@<-!BE}MZOS4<tLGiR10K0+Z%1+6$vpj_AGyc#O*(5k
z9R<Jl@WX5Dc&tVLJ@rq`?^jlwa-k=oBi!MX!6N-t+h6e}x$kK_evulvw$YPDZK2s1
z^X~LBQ<=w}yg$C4R@CMDJ%k=Xvd6p4@b>eEz1ImE5bt}I2JZUik#gR*Gyd3OUUCj-
zMqp@BhYt|52vc3X5G#?~Z`{rA4uDB_X>a2~ee!H3P(yW4l7_A}&Gyj<wNwfgH9~sG
zQz7f&8FH)md4wu~ycS~Iz8+fsMVe}pjWdJOSfpz`tkH`O!CWpO=J@*!8n4K7k(U&~
zYE(7C@pR|8OLp+zgbvaym1}N8Iv6dMH3w$6>cWWG)CZdQ$I$AVw<(vU5f;-~M$+CP
zW-ih#vG<CyUbS7Kepw?v2m}B5slHtjzBoj_jRyeZ^ZLaER3;EpAn!cz1w<Z2N9f$S
zIQTjZ{>ZZvD>`yK`hmG8p12)!Flad=jgdqmm(ysQm(;P)hu3L6W*!SRNx|;TF{}wf
zY;$_}pHk8##bq9JqNwB=mM$b{2>?3Py^lPlRO1~VJb|Y;h4;S4>Ngy^>O)R%ZU?Ub
z&z|$?RbO%}aizlu?`UhC_vqrYLFRpz8e!aqh=pBVK9xkU>bC^K(6XMc4qThp(sDqF
z%tV5Rl*IFsXfw>+*>O^+gb_=@cK7ZsaUzhmyvgn)e46C0jO7<@d2v~=hSd#2A3(YB
z!0c+ChwXAmpGa(enhc)bdc0p^CWlo;$X=#SbdJZG$Iko=X>cH^`5^jYguaeTBdzN3
z#jIy?aNw2f`wVnxMjF~$ooJ_ZJ03PP3ZI#nXcawnl-JAh^?cKju=7#a*4z(e{nnCU
zrb_RC^MswvXzxh)p%72HAEi^P8(6KYlY?JS4jDIAK25O9)LPT#57Z6w-BPl^^Md=d
zN#eQOV<91yGp^(P?g7;c6e)_LyW5Qhegws?{6k}`k!aZPnl1<z{cMll!aS(dil>6+
z!EV^dxvb5Z3=ob;eyb!{06Tm@Vhg@JLkiWrwiKC|@K9nMyri)1d2Agf(-PSnNBC>T
z;5TZ-q)trD4Ke36FV5u^RnR4-&X4_)n0o1zdMXn7;bM?9IPzTUv5ItY3wgx#*Wk5_
zj5Cl!HScYe7;7_ZBk>KwuwiR!*Ve6^(SQk#3xHu~XL2kPH15O6e|mP-Y^}S41F%gp
z*l8#Qj|I%e+IZq$UCV9PtEQyj7Cd=1goDso*xp<dB8LJ=*c#m<j6K&kMHt2G%h@)M
z*GxeF<fY6K%u}w{rf&Vc`-Kg7vzyUr%sU=1EVaRR&_h3>%cWT)#V$qjqs6l1pqH60
zBrNzu>6v4&;L_>^`(;kOu*u65i4jM(GZIx^Bg6M2rg5gpff^Lh6z-!S3JB_807MSW
zM2Jz;Ratt>#Fw7#iq`PIYWVzskldx#fWu7EgAFyw@u7wrN1yVv{37;)7)CJHxS^GS
z9>;D6TDRTq_H2)cS??4cd{r`pmOc!!Y2I=Gd{Zgc5&~Q0LOO+tlzo5updf;8Gg;D-
zvOgOOk+>vdeRdhF2NZo;upZ<9N;cK3gSiv`7=B8pc-i`W4`DFtC{;hcU}y#&|A+6w
z<|<LtFX+?laFTgPP8>N%679+NmIPmjtJz;`ys!nUDIz&Yq)fWc({g{;@sL!Gew#{S
zcL9k2i?E)boI%cjg??O}S?vBys#X@8YL+*o$7Zhe1M{PM%Hx-A6@yQAsiTv#+Qvlq
z<kh<0c#PnqA~Q`ka~pJMqU$yry*9vWZ^Ac+Br43U`_#j!s7LHdKAoo3Af%cZ_V|ct
zQ%d|Ar(!7hil!?n=gAVEO1Zfzyk*E!4*Ee(!e$QC6N|FR53yiv_V>EpY7i8z?wPaW
zK<saQTqzr{;{kdAjc~F>&JY4FR=qVl-_?aBMn6s0!ap&s^%JcyYU%=#5*>!0$)lge
z9J!Z1QK@>Rs8;*>S}t0N{PBp45$7orfX832ZAza<NH)TcAVd8H){qV%IUkzhR`T<t
zR%s7S7w-p{d@@oDf~%_;UL0vs4I}J8O&@pt9zS_H1n`{jzN7cPi-eblUF|uhUCa2L
zG6I}BTTYb|`CuUo(7<Rku5Ne=Uw&`g2z9L(t9LUZnX+(&%!rRGlBXvHmt-uR5bte}
z-5q@628CGoLPzR}!F~0;MY=^JC@R^F>6I+Mi%7Ap`yKw(0-9h?J802og&@omSIc;I
zE!L_IBL&iG*6pf@Y7F;zNvx^@0eL+pZ`uZ4jVb2xnddsvJF!tYHkTXa9tG@1Q0E#Q
z*0pA2$EJ`{4WAdrxI-#ruG6Y-8!U;dC3}Ol3=WF#X4k9tT1iCv69Ae!lVK&KCPaxO
zJX?G41buBuw@A`CNwWi{^hAo?96w|C(vipOij%$xQdwnTPf_V+V6UQATZ4e^eO?J>
zD|4PF#d&2W`vD(vTj3+tk}5xB1;a`2(`5%tr>bTAYlrB;mmkiUC86d@KPKC{=FKOs
zU{k(2p~oSy8Q8CTvDkS;?z4oskFFdp&p272mb3Fts=)?@Mrtb)7==CRvfyZGs71^=
z<?|GXwGF^@Bljx-FYrp6$<87gc<mrYo2adbU*jzVV~_5ZUv;cCdNhTZivjJgw5}2~
z6<BpVLZ!ymPo=Ni!+u7YcYKc`Ij2zS0~qxB9;YZO2dRT;;)_nOm8p8sl7PavQF%v=
zN&2*>*UfJdUzi84EVN0Wim`||R2$h#oEAFCtPfIsfbYn5HNQ96hO7VTBeFmC$!JlZ
ziISP8i+R#m-uM{W{Kj$w*C8j&qoaVHMo!;TaLR-5_bM$A%uCHr=Zib14=U-a5MV1F
zHbZIHleP6FIJG2n=tY{t!HK8clO$o=+)r|qo2L@m2|%CTtvs=td|m3;OMkTYFtfak
z=sbc<uTR|elSk0?(RpN}R$H9V-B--DaPXhWJ~u^)d~TnyA0lg|=!gWY?dluVQ#Dx?
z)hG{7frXc0B(p+{BP`Il>_XZMjt}I_#EcunkSVcAm>wa0BtvP|&z2#j%G6zKc>`JD
z@3HNpZ{z;~%Et2XsEstKqMWyv&mD#JwBsjc=~u|aO%cs)%~et0bB>3_xM=tMY76NC
z0`TFV>&sfAzoh~?J(06qV(W&UJ9iNkmUve(@bO|9MJulU6|I?d!bQyMva{z3E5P1%
zU?NQuYfU4qT?X!paEq)LyHlDD3d3ssOW?5cvKenQWDt@t90`2ETJm8g=$j?By{*%%
zJ?@JsC=V5dMpP_@>*B9+#Sd9Z-5o?riAVu5pYoEC98sqtQ>R1d4LihV&gDoD({wp-
z3)Ew=nUoR`7tS~?mN23?!w<o7TQ}H9+U+c74rVMEE&`;g2L@)`D5y~G#qIhl5bOn+
z^<28InlY*fXdL-rNKF`Y@n#LYoGGf5#$8k3cJ|vos#;Ty4nsc0xUdO$+EHv(Jo{h@
zYw^)NmlJ}83K`MvXU-!!Y!_6F4Q`%mtX9bZFz@!q%?FdXQ_E<`No0rHPpt|B+3$(q
zzAt_@Hjb1eLC}dP@G?r`zW99VcmZUHqAy&`eW-`!9aQR~?x%pk=#}?IGNj$Rb%9K;
zi*4$6PGj#50pMxyWFxFtr~BC+b$?=J8x+%SJn<zH&Ylrb4MRrM>4%k0z{Z^+OO$&E
zW9R_1(}(_?$9CPJqip7|J}cq8fH`*cxU7b`jOZO=^HoGx3rzJIWHhA(hqANJjJUUc
zHp5bKz6(sr9TwgGi(w$5f~hx>L$4t-LeT`zfelz8<nkdmxaXtNRk^@yu!-Ix;o$L6
z(h$Rt9tS3Q@pDSNfB|PQ)uJcuh1^}h^=4<)e5{DqZtpwC?)oAe1=XKX>l)X<_(Yl>
zRK&aNY-R1nkxt4{oUz<xSP+JgMV{2n>!gyCYdO$^bv72W0*pi!O7|C~@n^9rE9T+&
zxe($sDGD3vjlsfw7NwQ;%4d0EafND@M8qRG7_QX!;l-2S(#v%VEFnB93gMs-ocfYx
zq#;6^l4tXT5e;o+Aty!%`kGZ)VLjAntPH6R0;&Xk_G+3Q6%udDm53iBDGYTLzCU>N
z{7GLkaUnq{rR|0gt)3fBfcTqF&3tS?&(DL-hgpd8Z_h{z<`Ti5niTab>|0V^-%+im
z<YS@ZsRXtd<pk(xl&LAB_W6~g2qm?CI>;dp(j#H(CkUISuba_Th?X3XEO=c(ks-l$
z!8Mt-?=MwlP3od%{{d|!MwV~&vY5|CcIOUfe(>9UzpecgU%a#MN2R+aU5I#PXRq8>
zT9{2ZJ*>7PB3k(lCDoSjB+-w{p2MGFElcKQ5!=FO(6rj{x~?q~fOQU=3!`GYh@9zZ
zn~b~ehHy`h-sMkMKhs@yD|1K0&qV1&Tk-G62<mJ_Pu6;SR_{vR-Q(nT{K%_6<mnx9
z3Bz3{(<^5d2L~bl6$ShT3UE$LVOjXY^V4foKs!~YiJ-Gv@q5;&o-{d#?Ikc(#)U`R
zhuU|J3d*+b0!fWfd$^NbJzk=OvsQCUc%;eMOJy<B`4pPX*?CJ0;|AWhv7RQjX_SHO
zkmIs@y3Kvi9!i0F#w4j7IT>`{g|R}x{*)~5D!DO{w<WiE4<?GD%aidA+tpY*%m$>%
zTK%xZHSO>~TNl@J*xgw%0W_jGS(wxWXXQGm$G-Ze+f*0f6ERw2A78bz)+9bZF*6>M
zyz@l9x}J>vIibElB(wCuo{#-Z%0qMf7p_uik8L7@-!3n=#_`di5Gc>LmvhxnFBve^
zo~-xC9Ea_~k(an-!nThTqsXmagykx^y>W&{pzUTHK5;%6c&f@gG4$pUE&t%-%)tU=
z!K^bsCDy?wacd+`qwMG)^^drvi6&DYCut<_#1nDk%hBn?Ccoi(4srOLX%4l-rEvJ!
zL(}0JsO;LgM5QaXQKaVds8``l*B%fqI(b-(#EHgdK^{2LxI>e8EAO4eQfZqBgx7b;
zRvE}Eo^y(Ns((fdA+J5-<^u?iC(KuSl}%0#^$wLq+E$xGVJcF2D7D5*D?lS+dFIx9
zGSPbc9P@F!qxabx+nJ3g+^m_)Y^d%65OR!@3eFES{Yv;71g+n4S}IeJ#4#he6+$W-
z;Ds=_?`&bg=epa&v6UWM9;yvn%n1>yKue&%cyL&JCtk!K-a+9|gxvpbNq<wScxI6)
zM{zIV)Mg0{^0b-E+va!<Zr#L!Nu7q;3?Y>Tw#F(q{KCr4hQqlwA%lnSBi+j7w%Q+N
zrxoJaJm)jX(DGq=D<qS=d1#5%)VSzT3Mf~2GhoPF`{9H;Pvs@LWx>m$p>VZa?-_vW
zn0t>%f*qJQ?=$RoybnlX3J?q#=4~<Faw=|b9&*);lA3jcwQ1WzV?OMGzuVo&7^i8H
z{$?W&j=fzjdRZ}>Ou%tC6U<?#c1DSwODTb$lacbRNU{IWDD}>kPzt@#yS`eNCF{ut
z26hDb&mdvOjx`||agv2#mj|EirVS?aF=W1hqjDIYP8IOVGUF=k!7S4;3Qe1*cvB|~
zp==_VK<8?%o{@|7N?oK%>?4p;#xC?}DE8jUd|g6#-JJ2WMvnMWhHAk(@~|K1j=0uO
zl9k;b3w8POrY#9$l0ymbzdFVfp?Hm&Rs9BkcAxram9?-K2b$z=0yau?47d;*BSh_8
zJWhh}&sFe@8J=TgFEH^(3+vLmui4o0dM6>7d-eRqNjKk)-WBzE1d(&n?NxkV^gMgZ
zR&SS}r862FQ+47L>B2T%-cNK~^SOWn7C}wip*l<~`5n}c%#E64f$8ak#RbIO4->#<
zX<lG8aEjC!kY9@$KEv?Dac(-I8~IF!^n&rT(#4!kT-?Ni!@42UdW#R)yd{KpCxt|M
z0Npzh6Xem>tTnY&6M=i_fsd3EWt?_5{6o%PP6RJ(2x~g+v08>23coh68`Qurk`B?o
zpK#X;nJO#;Y?R+k&{UtMzxB=Zd7q->1I5Lc!uf_Z&<&8VqMT?OLbxhm8*w2cuFJ>n
z6?eK-OqCUCriw_4!763ZOdS$4?)kH?u_4hgvGk^~14Tn5@Yqk!r>cDE&O|vQt<P2S
zJ`d-c@+7mAcHvBFO-~ZN9wTph@Bm%moG#Yd6ue9TyXb?CfW*js<`vZTE)lFQ>m%AW
z3DVM($n!V>MMON6cQkQF5=WY3!e$iB#a%}gkf~b?;Tu%P2`020O6PA29(;5YdL$iP
zjNlopVb4CWs6QY5C<-C$ULP`ko?Df8VfiUW6Ln!4l2oPq(JrUQ`!PqUy{QR)23L;(
zp`9p4Sk%@zft>tmk|4&JREf7ImeNgpf_!jW5(mr#%@v>Ds8kdA5u&_%vQ{GwzMN_R
z<7d=}c}H#^u?#>XVl(G(j&`D~*S5RpCo*l=cJ3}vU2ltz)P}7C=_o)R{Gx+#cy48!
znmzt5j=2_;)8nIiR(lFBpT=}+SwI{4`zN<y5xvw2gW)MDfS}}hvlb`37>|BpKQNaG
zD+eTZV9n;PHBc>MR27y;HQ|rCi_zH{G$?Z~gnCL1esC8$2FI$DM~cuS^3Bz*iK(&3
zz$KA2GP{B<N%Jscv}U*w;g|%mE!)F(sfB}d2$@H7o!~BHXFLtCd`EPBgt5x8XgE)p
zW5x`j!r{--^~MLy`0WZb+6V(u{m)BJnYhKQJ<h8O5zeGrR}`a`1Sw)<eA6Di#}a0b
zekPmlxEl|&1AlOoUf2Mxr9kn~3(@<-ra5c&>9A|jL#viEGr|p}G5GcszM@a&?Fq5t
zXqWN<^@W6EK*>Qvsn$7rgiK#H+q#?+N|}QE;54sALX_sZk7yoLZ*&H{xIc%@Y$^a0
zN~pH|F%)0cGSGLgP-mx>ir#^EN?QQzBc(MRLf{Fiqpteoz3ko<NRB%*Pq_&z<!UxT
zi?GE{xjB|R6>4g^5aDVz2%TBMAg-r!2GJ?C3aKz>k!q=p$LN<z6?H+|8bce4m&UZq
z@(HN7YAO!n;~m6Y?-VD#?;S1QASwHa_EYX%s?${h{^!BNYIoVH=&e~1vUbOu)Uw#(
zjB?<m_48EtfbDPZrTHRzz8*#0jp@v|BiUMn|MEo)qId;+tBO`?S>_zKhc~oLJV(T)
z$}-LaTv4M0wbJv$6;-3hK4%QZ<lKmh+rcxy!sTWj(j;qn5()CxTN@6@@~fqv_j+Oj
zSUk}jeEPvC=%-OSK0a{VibbSyBx@B7XvAK~&yMBQxU(qyu|8L00-Lmn7G>{#6RMEE
z+3fJ03m8H@TW#*$*8yQVb?AoFd&1F_^c6Yq+OG}F`wR9VvtG8g5FUc#x74%`w8a;$
zPfd`CF2?0N_d9$eE>Mv|w7DvoreB#0V=i<0Ib(_M9%fts1J|@ET^A1LzRAVdDRlFD
zIOrySfJIj%&8D?88IDe`y+$6~GEyFTl9$@!QDIVS*e2@~X%7Sw;0VU$8~()z=dzcW
zclym=?DBBpqO)2ESA=05P!7fg7~{R6TMjM@cf!NAEH6D%-OM8N^+9JU4RbroNo=j}
zB&V8P^Hau4Yu`pNFK0I^>+lVfqZHXHOo>Dot`ve%3quqcRe~yCsD<o*ZK$8zg*-MM
z>0OL5wpQ%H`X=7KJA9uD%WDVQDB?V+z#&gEC+bCozKne1!KxVeMe}l*+Z$f!bD3ah
zROf}c7PpCQzQ>B1xX6?qBCCVGL6w9kbBu?K@h8-{<`@&{%yDM&OT3*&#T8Q(y8O=-
z<*O>nfhzcohW^eDLo4HM>xhGpe2TVWVGmG5n9O8YOLI6bDpIhkUNg%*<!!mhW0IJ9
zW*q5!1ewMtX4r{@3^PW_)>lOS6x~7scJR4G*b`w|@8Xmf06)un9-`aP)x{+ycj(oX
zpCU26I*J}10N%-r%(e*m$d}$b*O`NqO2Y`QDV3Jb2fnIN?%cLO_*v}SF&5cJZWC;6
z-M_4RQ$jssreIzS+{r%-Ab|SrF->mxN6$AHHb<?=oN`B-2%k0IQ|StIGn;=7hW_*+
zTo}gBdeG?^rar{R@<zAh8Qa6da<d9#_2g%&4>15{l@Dkj;Inuc!=PAV9X8GA*TtXm
zd%TTUW{eZFTM3Xg&1OB)@!BTmdDxi}XO>G3FZW*Ee#U2xdck?_b3NP1;OmpM4@*Zf
zJWs-gLd}>L<RAh|sv3Ms<6E`|gCAw`Q^Jh#xBJWni8~Lk*`o}&-z}tyo)1q6kG18O
ziC>X`+ZWJ8`P}ZXm%M!U+WFH``ManxnAv${4mxW2ITb3>r<^7mOdb^08IO+-mpYq#
zo{0|g)Kl;YdKpPaJsut3de6*{T<%lzk?#YB>Wmy7_Dd}vDEqwjO4sKH>+UwPSlA)0
zU4$9BV2;q?`YN5XLqsK_e53H6u~+rxly%+-l}h-9Rw>wvk}Z?e`w*#`a}}j^)jsS)
zA7F@m>vn{mVd#lZmDD0(bee^lp#b)pjV0yHN1Op3mq(AFrOuHiSIVS(1{}2Qw<SXj
zGfLH8cQTe*myEd(IztB9HJzQi<4d98u4p(3)_vyN8?{S{8ybSbLYZ<}l<-Tf>l(7>
zA&q%T8`;sKI=5vew&(sB3VrU);RnEgLjUN&M~2-AkB_TpNe`2PPqjD)ulU8l$0;%q
zqlsX{#9PYR9WyHe4kUf6cV>pPvX{)dKB_LDLe6kUxIbI=K&W@j+Ib<xh*qS3Y#PZ?
z%hT9(qM_2mAL$t)ZX^EAyousK6lsoG?#<;XpOq1=9k5zYybR`nw05Iglt8Qldi946
zpPV5`E{cd*3av=u_F?xZW*dg^L&3FIq!$@zd;>iOVHblg#S1NCFVk1r<#rm>1%<<g
zpa|IPu^vp+(^r0+db%scE{jyhp~4B=bKqg;e3eFATU8X=)8OmRa=BKghQrKkfH}Io
zE&Q0b=?+_w5}k7CT*ya9v&K<qjy!UhWA`V#sKCeIYo}M5f*PONI!n$cDT=`hQ^sG~
z_UnuQa*rfbguUL}eb=;t?OlkI){Q94%@tjOH2Ar|e5%*QOU;H{#N;$J1NkUn<M937
zgHQ=q-|>-Yv6>Ez@kFDfQUuo4%FP6xMOW49sVTR(J+%GPi0;_Yl2;3Ak9MZ~tD|h(
zo;+jl<uR=FfJ8mf^f4WsVWTk-o@>0PNm<%#^!QCS=XtVC@Y;Jnp6u43=$WPEd?ZIp
zx#i%1y`zWcFjxJ#jv21B7fhS&4$UW|h}hi~^x~IP#BVr-(eHXIIusz*zY{LmEhl?a
zD>rj)Xm<`~gC;pB6t?d}2h~5b`}r+Ad<NtCn#03SITE%HD(^T(Bd&6B>omDv8u!v&
z^{NrWQ}-d<i#IXt^YsrJPq9_bl_ETOjjxj3-C&mAxwVFzbgKEP_l)(4jP&^%pJV%n
z%y7GGPhiR2hZQ+ng}6*6&UnGLlV0xTB_;N98NHR;HkcVoA)kH+MMumQc|V7T$9-;8
zfm>)W`gC@$!uwI$tFxK-6e|X~$8Fm)@g(MJ8rnET&n(4@R%kg&FwkbAtSg*OpV`b}
z3E`W~bnHh?lI<a~hvqi6jlS<o^zv!#_Ct0Qv}0;n({QnAC0`Skhf%Few=9)xn!j#}
zfxEjMKiQpDP1tIake4=%;d|*$;d6;j@L04N9}*wT#S&MO=%M#9bTaFkQ7@YKd%Tpp
z$Cpj{Ozskd$4I<1lhKsY{d9o@diWz32=>zu4Z~x?Jk6Wq0!Y#FLwqle(g9|(mk<~}
z<Rs@0k}H;*v4_;Xc!0S2t?7*thAf$%tXDp3VDRDZ3GURX%v(eZXOqfOAlcSCUjZ_B
zuZ7iLpiA!s5Mi8gf2z}ff_{Yj;M(;%`%ZTqH46g`M4;k^F&2BLp&V&s4dP^+A~9#^
zqP(443p@h%er9E)N^u+o3uilKlrf7q3c0cS;^!W0WkqZ~tX@W)bLd3E$MJzEE;h29
zk8$1L5ZI|AinW5wMyCf}uko@-CIvlqndVXbz}u?C)M|e3s+mfYoF>O{($3fU7*=Bp
zH@7S$5m~}Uqprs+b)u}7xhJ>&Xj6->O`T{9&+}8gP|u$9;SkZ1|7Rqol0iyg#6vgL
zKr^xxk+Q)-C84?*VV=>CiyO_4T(*3>^q;)zeV*rv^@ssEu~Zx<b=I{jV*)O8>wzFV
zT%wzZk*wWv!{#_u_6gkU%oEE}#{qw#{JaI_3-6~Mf%3fgqD+NK2#|dmEHzuF;uKp$
z8eQB)rV0gRo<W`oo-e^oOt)U2rDmt_?N3EcLB1zKM)EN~m~iQc?$J`YC($|>px-g{
z`mC3#^yz`H!`_%Kb-)<jvvY@8UQ&K$!E`cmj-6>Yw<o6=QpTxn5LV6l+hZTik<*Om
z4D*qB2SrvVXWt@u4S4f#RtN-9w4qSaY$P7l04Yeaqz55hKdwz5(-i0{ua^pX23hdh
zFF#?fHm9qfFceP^L;T5F(K7H{2!(x#_|lC^koihaBEX;|CB1$!K!2I-J?beVQ{{m+
zZ3#D?x1o1&1(C~kTXF{RYQ$9tgC<^~7QjD)*FWcR6)YiAF8A#H3%8cq0(PBQ1QKLq
zJ%)iDg4b0wKJlr0SKT>EM^;_^&t*^#BQ#0y=Q_NnM<$VMlA3WX6)V_Tsy~dv?VbA=
z=bEz-vJ97-khHkIc$~-}pRJdSmHE+j0=iP$;iB4WS+LNwkrlefqfrp*^W>oCIyqCM
zIK<=7@!EDP{gfRux5%V(AG7DuSE{i(E${l8uygQt@xkxvneECHXDz#;CaB~*R-=MM
zan4u5qL%Pk>rYgEnS=d|sfI+^9ODSL_I~;vwbRLV#KVYq(clP6;q!+KkLk9-=6w2I
zi?_Tg;u3zh)GehZ)?<9tc<7l%rH(+7z>4vBXD<TWj8pc07ZohuQ<Jc#jNly=;nm)Q
zV`->sce&o-MVs(jU%yV%xF;=vtb%x2$JeM^&Rq7aybI$k-vEu&kr#!8P%X)MYrgm5
zj)jk!42=L~zbM{({P8^7p=jMCC0L#j)Uqv=i>!jI_nnHqb;-5i;cbTok~Qb`79s5u
zDMTNBPpz_ZHlK@fnSqb5f#xk_i?~7UU<8;b-9S$t;8Z(P=);WDbNLr<Ht>a;i8Cfu
z?+4E}wn)h`#wEH0?kW->YPa<H+%I_bDY9y?(@&9Po;S|~RBbF#eUM0j_OUx3KHF^G
ziJ^^;{NQ?LvTm?Jvv&Py#5uP(&Cs(_yej_^Tlm(r(2#h%yee~~1BaEZP&%hm%ZdI0
z$wyKxEdiwJd=5n+CzhU*IwsTiz_0|Mg7<tw4jm%-G(PmoRk7);Wt72giH1S!X*TIn
zUb8X_GY7gdXEVC%v&M8kp6u)Hc`?>eB`>+k_-^n7skhwQZG?*fQAf;Q#aU*J5pV0$
zN5wV~kvp4=cf}R2E?PndhRLl+n7SX41}TIdRt>D~SkLb+t@GsIDP=98p<+HZ$2VBc
zI13AuVON{=B_`lPp4|$P!B&iq@Y;$ATW}PagVsG!W>Oxi^{5t+!Ac#~jBeiyJ@XBy
zPNASTbyS?1zsFuDnvHaMX@L77RrO&<;ibI@t4n@d+cX2qq~kUA_U=(pc}cn;PoI#|
zraJO=#dOWXl-1ta(V7c$#L28YPD7rP48ye;<^EGdio8iEU|}WAMQtdZGu(DgQr<Xj
z?5k0QD5N<$>7`oxPT5Xxf#_!q+rl2zdTWL+-AMJ{0V&@0a9+Gg^xjCX+Z5u2i<whz
zkQUEOeeE*oVoj!~Ge?vibGct*`N-}QdBE@|C|G1qb{hQw+-x1QhFR_Gei2`+DP=qv
zaBYhbv=sg6JqBvRq*!yo?Z?7-k_D%dR#G$dFDjuY@buq2Bhq=2WaqdJ+<cd-eoRP4
z%om#gMT5|k@EPaAzPJSEKs%{oRY}DR6P-nTL3BFv+(XcD4e#)>l5^L<T^cbuA0xb_
zU6h#Yeo@c;P1mt9wR^`nE-UcX8<>kBUU|zE{)uL1p);cIAC6@7=$}6qgId&hT8g@4
ztQmcxJ>Dj+c#m9A2yr8(#3ayrVU!vEEshP+{A$WbJ2OXo8RaKY4G1dwdu7I0&96KS
z(Ft6oR3P@x@g6@XvSWYR6})50PYX>nx<4$U>0pf7ird_=%0xD3B?GXf7@$V8a)U62
zQ{H5Jcc}LW?7A$9kZB5AYtx%P*so1Bl+!H$S5dm^Zjq`e{fH@POd)}yG+1B~#mR(!
zm={8`3K;KI=!YXlQL*RqsQJBhB8Mk4w$P>oA*yfHUcXn%EoqpP)Mi0W!@uT_P~@0n
zPN*Qpy`Lp!J@vdH>8($p_-C5;wrcv1BRkpY{jd4;G~d1tC^u`AGOhX`o8(Vizv38_
zdWZWHB*UlscyTH@*i3#&Z+V1E<*rjFVwbtyrX6c+$zJkmooT>U8glkD4xT=d>*Vf|
zCrPtI7%=waV{>G0+#TUtjQ8YP*<b7QBmH=$)<;l&t}@^x;<Y6bI|%2}*d!XOa@m2A
zb9M(k-o(50<%M+f$GP)n<~sd%%+H#og!>_DD4JSG$n%pZGua2xvnD9xIVN4M?H^l6
zJ1OdLwe=cHdb7^0FSV)Fbe#0DKjgL1Re?L#&!4+Kaj8BZG7v7df301Y$q+^#pKBj<
zVok`oPZFuMGg4k3qlNWI62l0kFm(N?!y8s)NIR;c1MErydBz8i4O``=UNu7x764Qy
zHPWi)HAeUEkk4)p(H12^wh1NMMqAq4!*s8vWi5gx00{X@UD}_e%~Eql<|)~n$w?Q%
zzn{B*k8`eY1G*^`<@muiUvT>1=X?bpQI{togU7`{!5x?OT>1}ZWusT(B;JXO;wlKA
zonLu<ba|m-06z`O?{Z0zV9O+$hy4`IEc-rDK^C+?L1|E@FV!8WhIX04=q@LW=DtJn
zhZ6T4;{hD}{KqFnJkRdxNn+%vm>|C4711DhT&Z_|Z4okzI9%iqEqut!W7^LR!kIoR
z(WuIIMB4fBIEq#g0!HUOQ8Tc_bBL>#=?Yj3m!*POSlhA-Px@R1>l$K>Q)zq8wMJ`0
zhYBA|yOIif*QNSX(KufZS-#rX2^TyWMuMVfSaUsKlTX0xhwpsHU9&0_q;1jk_`bSr
zSi^7*vzfB=Fcn&JFwn0WvvTEA+#;z&aZCMJ$4U^a2m;gl`(CREv>A0)R6tBe9{aN}
zPkTE#zKNJ8mzCm>vo44qyY|D1Ns1z=l{^MK5-Np71o}eB^SHT?ZL*_!(4zgvvbsZY
z1iDj;!r(1j0%zgZ$d0s2{52NV_Lfb2)ZtWBFOl$Ew;np2im}N_;2A#Q!2-Tlj9R!{
z!KNJK37x+qYcB(V<>M~pSo>MaROQ(tnKf)@0}V8+<_~(}G3@+=82<S_id~VYcQ;_p
z-y+94(Di2}d>Sl|S}@tcQLBE?o8#XNU20rU8<gUh`+721k#YfLUpVR`Q}TR_di|2y
z&YPi5Nlsp4#6Apw>4ou5UX`}`<1{EeJ%qd&+&fg<)f@$nuC9fR_fzpiyXGzLc!jJB
zy%S)o#P4G-T4pDPb)i#wr?yj(oM|CK9pdN@N+j#qpAQ=<(KB=pJmK!QP9WWRr-K?M
zs#M7yH#$>^kFJi8KwtWZS213VeA~q2-CjXeH`d-_uwN`rLB?{l)nI_vTT;U(Cb15y
zPtg5^?>w=c#7SKHoHgq1isktkBf)t5f%GS7g4HsCYqL_4X*~fQ+Ai6Zv8?JQBGLhZ
zx6c-W%A#2gcv-2@Pz)T64>0=C%CdqdG<XFA7NMTjIukFBH3`g_c@m`KzDz22C{Pe3
z&6Y7myvN&0Ov}h=e%0u40Eu<M%TMeUe&7%hl3n;zI?oz2y1_khKdd@mycWW4@)OeK
zlXu5qxZa$tJxh7wPJ0*nsEL$ducUfl?KzY$8GBFX4v<Z4ig$(Jv!EK}g8Asf*9ZwB
zE}z5SHxZ}JF%$JWX`MPwPB}zpDZrQcJozp}g+(;dZPU8+diG=viY{(}sdg|0j>T~%
z?)^+vum@Ghgw*16P*PfAxZ-tLm+TM=r8~4o_g#*YO8UnUi@0M1;_&9e3%u<f;!za$
zL?7F^!^l;w_JkNd&LEO!W^dgYfgs>DLgY-wJhwlAq|tDhxwv>nca||&iis_u?w8+1
z_ux3}ao*4jDz5nO#z$=Ig0_nk;>lt+)m=Q<Y#4``$3*q)$(R-4p-8y_;3UJ8FZvy8
zFa|t%=9|!-GFK29z%=npurCuF^tY(E@l`a_wZ{tMT(NwTma0i|;+7td+^EofZQw0C
zTi3Swxx276=IIJKd4~n<MYLmmR!az1axd19khZ%iYkkX#VPY+VAFsJJMij}J(3Zyh
z8HHq&6#4r{^H{IxWIiT2;^?+L>m;yGX2e7T%RZ$yI4+i@cb~QFj~tS$eCpS(m-s^4
z>3(rDRHNDAdGHjP6D_8)2vof4ntlW_OuFh!U3$G|Gt>3jFbn^5%FyNOikkb5J#E3O
zlcOfJ#^^Zw$DGPb_l9ZU;X7sfGJ?TnU#p*aqH<c18sqHwyOzB>W_ZQQEdpZ@=v-m2
z#oOG~))X{0uS=_O&B%r{+x46ush2JD%zlH|O=-(o29XyXi!B!N4PGGR+z6l3HR*T~
zo0<NmR<<KtNcy_P)?I6cLny!f>w6S%_NCFf>$sfR&kEn1R8wi3*|XYaT}#$GUVHEk
zHn)$B4+gX??&{s6MJ!Dl<>K4{er9=QET6J>VfLa>aB=t(XV6)&EK7C(BR$>2Bl|wx
zjP$;SM=MXH{p*K(TdUUIuZ7l8_`COcX84U-$=fQNiZ29+XQR)7tNOO}slL*>s<1j|
z(Z-j+HbV&3oW5t72ca#dp`Vq{M2se8t)UVqedf>ehNa|v;-SX%!EiAr+z@hxd;nMO
zZsbl2tQHyXyZ+JsdjStH+2)rXG?lXj4JN5eA2?BEc_f<5DTYvV0FN5Z0zx@E6IP)<
zs?4l*l5^NI9-o>^e@q?4?if|H&nIl%h*B!L$QEk7^DZ#HksN)Jhy$mTOI~7|15qiT
zSx0h6?&Wd%lv33wtF9_wMTVzKj+mI-<q>Ar)GP%lS#`HNnfmdy#3DWFy9`9fa8E(?
z%CSNK+B8{!mUM^JHq_k4`ty+`QT!s@T%MIdHYD(2+JL>s&L18pVt!5~BTqCj{RrQy
zovYS1?sIHx_PM-muvveHP|2o*rv@zRlf0#bm^rlll7G8<Y}u~COExlkuG&OgKsaII
zr0oI@hAyc!Z-u0W{bwBma_D&E5n_YY*Kq;XJC4lM<6Ase)PwG^;4=V3FCS!C&51sn
zqIqIl*<way2xz?_NIG852K$GlRB7$$#xfQ6++3BKcBS1dwYV!?3^$)=K|7&ew)e^-
z#OYsXzE|0e4|f|iqx4L>7qpJ4>nZ->B>}2%i|KudHANE*)R-6rlwlncO<Zqw+D!OY
zbT6QUab<Hcm{qYhHvNOPGSIe@F{lZM6)Cc^%6n6@h&$=ho_y#+!NSeN02EqU!H-*b
zUhrN#M)e5xl2yLrn3eT%qn#0gudZ@izz!^edJN3Rh+%rOE@ZpGQ+SxzNzA;w){N^C
z6>fi0I-i%pi>_BYWFZaRDaCV?<CPc#JVH*DR(y<xdCSw~5@Q)7Qu9V^C*0SVuB@4q
zW{a?ba*VIU4*+G&!<P5Q15XZ+`cgNX^#)8JNPDBe;0-NnzzQCQf8s}*&Ke6_Olpk3
zSDDDTX4UbjRz@PaycW!7hx=L+x+*ga>r?9yxyJiTkHo7D?DD+)rMA&T%2cS#Tsb--
zyw=xpD3jngnidUi-NoaaEY}L6#6hq<mqlxV@v<ANRQ+c7JR)P7U<Qfh5jc)&;A&f$
z`6;pC)kpOa*2J+Ax+V7e_=rPo96>hN#gVyP^3@+TJPi~dcuo?cP@E#vs{<vw+wK;k
zX4s7Lq#gwDwR9E@-=}{94YBu-9X!09e*N`i<+X(uBU&zf#x{vYj&uA5#THv8Kid+4
z?f%^ZCkc9|!QS9!z2W(F7+@GlqZmVqY71!WAw{t8gOcL*!(2mZ!+e$G<^@gXr@R%L
z54am4O{!RI=LDijp=Hbt;-!;Nw;RZJ$3{DPE(MFwRKRhNtze=gKG+}~r%5(8^ULSg
zx4nQwv+uysyx<atK-GY0Hp~m$bAD(<6}A4l&nHA-JvqR2bY<`LN#7x&k_30c+deQI
z+b9W$g@c+6pN~vA4+6~M7phQYolrtar%S>@bn5K}@AZe<EFNqv6-5*ta*Pe?d-c0C
zJT8$qIZ$pnP&{vv6=JaRq{?HdArxZ7?qO>?DTC^S7w&}DZgoy3q~gG7Iw^RQL7#9^
zdn}VkKD#0s2t-CSOswG%&S$5X&ji=;HLfS`G_lyO+{QTJSxYN<i78&n2{<vnqZ6})
zM;&eJi&<@*OOB1KX4?OL>mI8hv9Zah1C|ZnsyFlsdY{OtPc&QOhfDuFaGlg$In<zq
zaPRE&7;ygL7a}Z?h7aY(KHwLVKW+|cT1Qsx?V`M#sM`u?u|c7&3o3u$^T?W=(e7j0
z{6mJvZf*9~wZY5*t!9;anKDW{Z2KfmI4P-c@&TBQ&z?HANkreT#irv%+wtQ&P1CpJ
zeJ8x5j=PcH=`e@yHt#l+$GhDCpi5{RI36y(IJMfesvt#^T-#d6_I*GJn<zb7G7080
zW4!geFX54mT`cR|0atGS^A{W1DG^OkyZeRbmY-C>6d0_cKh0Rjd#vB@DJY5a9$$I7
zbv|hR&hlhw8X6A2{Mjc?%jh*PXVbbRBRk<P^qt|_21A;w^d}dV0L!jH)EC0p3x1>e
z$(r!_2PaE5m9L3vqnnRS5ob`tMDW@z&)wk$mJRS1o%F5G-cQT0utJ)`*s@z+k&C}P
z&xY*#OnhCZPgc$ehp7H)B~Fq|`|Su{lM4TPdAS7lQA>#EdpqT)m`j7}iD5D{HU-6}
zmuQh@L3q~pBwKKnJf8;Z#JDQjlxm0QxhFdI=C@?`U&x7PMeB>X#5B-x^K;pm8!6OP
znX!Uz9qY`EGgTN++&S?u)Z{tM+`Wers@p61B0jCckZww{yOdGniSx=~eQ9zTw60-d
z>wBo4)T(y|PhqA7GBB$K2|s(|jBBxnvKhDy8Yf0~?)ezJH4h9>J&FtHc5dQpe3Z5~
zD#;`7Tpc0oo4z?h*J?0-&FrA@6kf{r6XZB9iaD+|#CDBmDo}DR04b6`^v+8NS44sQ
zluH}c&7=YsG8g>kWKlF9$Vxb0<a|ECN+GB5Oa|i6mgDa46I~czVfo~~v4Iwq7?m#A
z;VmJsx#G5X%ZD|bx@q_jYRuL-z;H)Zg{I4j-H0b!@XDkCOqduZDP5tVcQ?kYK{<k$
zA{(7GPzk<Tghgh86yG*LC~zfJJCVi;L8L>{+3X=IuCC1rj3E?L6p8moL@<n)I#-`M
z{XLvsaDGzFd5b7Dnhu{}1YGA!gU(MFLbi#1B+N+<ACH=F(n`X)dxZGiCwzMMIhkxp
z_shbEZz<(trJoh$%6MSzRT7|zw{UQIv4-2O<g$o5E7ZdF;`X317{1*Le=p1PP)D{@
zIP96xof`b$iC6UV<C(@UnS@d9bv#J+CR?>P>)of1Eq_=PtUfiWCl#*5BTo#^A}oza
z+STL8z5>51H5d5=WwExeLB=H|Di<svd5{zyjBvZ_sf5G_8ghtn>Ei)SK>PUz)sX<B
zKwQ7~g&Nt99bBo8ys}+6s0*86?X|sphX+&MNRn7-YRWjznxs}f@8W%S3g(Z8q0-0+
z6<zw#dv&dT`*8Z49WFA7msS0u>Ta`?Tz8A=Ym`nj%FbsEgk$<px~@?ySB!Xc1?PlT
zvxtqFU1q+cMul3VjCnkcL}hGSMa{d1d;8DcOoq}|WYJ}~ghyiq*uu>rhV^i+sd??N
z&Y;@q;F@11JsDo*G<7|4s?uaf!4h^e?F`0&B11CVWh(V}=ZzP-0rA-e_&`4kyDnTK
z>{1<5>CvSLGYN0^DL{v@YGdm4nl|(S6J9j;a!Gqn9@DJ3mDIw-eSd~i9DF@IYpi?b
z+g_?^83ge<^c<MRZL4X0qm&&3Q$uyeI!#0;D>1FiVmkTb`_NEPcV93`c)#O$U<>sw
zYKT@*PHgRXIN+41)+KV))fN2ZG5sVKzvibI!M8r>**=P|n8dL}lwNh!2+t3RRIAOa
zX))#%WMR`}QwZj-#Y#?;vjVCr<b(Ds(11}ed<jt4?}znqV_qZWZxh$REHylcZyiM=
z>Wr6gn@WS^?+zrs-{+o{pBW7iT|#mmMjWBN-I$wa2?<32wRB;l`lz-t_{H-_1rkT5
zmovvM3A1MY_rNM-kt@atauZ7jxI(5^kj#CxP-%U|IUcl!!@eq^S$IzptB>;JU47mC
znUubfq)R=6BO3f;{|BO%q7fnP#3Lyo$AApIv|%(Ys+8R)kPegvj6L8e5cjwEX)E71
z@pKtL@tN})`?J>E$DAi8nWDCDeHrL@H!ip$ch!b0=y^H^j?_>-Tm#D<Qf)>94?`rF
z_}<GfUtp0&eh&NQScAs}<)yikB|b%rtEq;9s+Q(K@?8N1sPdti%qB>F@+S-QiNxAK
zCtP)jwfijDBhMd*wQB-d9FU9RNH1wi67T3we*zXq!+nrke!pQ}4MS^?hTj(MIpM_i
zyz$stx5#l3OJe(KeL9THvw^JRBSNdRPb&6S6W9OEk5K>T`M>u3)A!%E|5tdRrHzB@
zZ(o5MyZvh#fB*eA=r=PH7y|<{6B7d~Jrgti&Hw+Gm7eW?zW@F|@eqW=!OhQGNP|KC
z)k=eHXS|pCDE}bv^fUUxj&bT!SMsOmy7X<_4+!Y1DH(L$K2Fn$RUdxsrTDyXwY$ZF
zJFPl~RK|M9E4}ZNzduH!bGU0om8Dvvk$!Gp&P0Y4`X``TpPXf#gN(61aMMk#{(R+p
zT~(f&&r$sZ*nbT4o|5@6(b3vEFp&a}QTy6S?f&86;@Z*a#YHx|m1^z!Blp+52w}TB
z-8(gw^&Ha=V0-oqR8h)t-;tVh2WBKwUZSZgV6x`Gch@$QxoaKQZSQ?3x4-|wikEwG
zj*IV1AAeV$y@8z}jm3ZQsbj)6=UB5_!B_$0n9hg{hX=LwapSf{A_-~XN?Ah+O&I|x
zX>VJ9#OE5xqu0;T@|>+J&x|nNupOV2v90=j_Tr$vG&qY*S|;Gy+<@h3tWQr~S{Gin
zO7qemBuAcWig|6gU(@vB@Ewy}5_%p>kaK7WUk4RYWk^M6#5*0)COIl1g4CGd_aPxR
zOtCK5p&|jPF(1CXxnX06!?6(8C!wM)7(Mil5T3k>WvW8LMn!cJwWA}Rys~4e`Un&j
zeu+JC!k9ZlmmrSs2I{mDb!v(*Iep(Fz69zl6rTNz1K+5grb`?B5Xv+ZPD_(PTuQi8
z*ooWJ6lXz_>fk8}k+x|h52!0a9L;TX(4(^k!^vMbz>;PQCKPT;Gsc)kW=q>#PfJR8
ze6+1LF{Gjt-M{87&P3N}+vjx9Ti_K3s-BklmWC7)*O=zG2^wl`O~n8?k%c))W({+-
zeQ6+`5(wZ9*E6EFTGfrOU>E2&V8>J-FyP}-VJ5B#&G9!Fs7`sP?&CLbJ73`<FmK?n
z=|FILEW86BdO@L?MdyYUv!)y3GiP7MM&82V(h(UFF*tox>e(_h*JHbdv-%3B{3{#?
z9~<w;2iaN-r;j2xvwa&5n(cU`97}Z^2;Aj{<;HywE1-U+@93~wNu8d-$E2fL=!e5`
z5s!cBVUCJs@_VI=s%H$P^(@8d^-{#+_sTbbvIb={bV=et8KS6q*R~p4%wae9O+T<i
zMVAKNG=fADN!0dvm-wcUF!tw05K$caTcQODH*=893-O?j>3Ng%K3r*P!b#<%##~27
zG2%&Dg5`zY@<Ig`5+==~p@}zDbOl-+Ca<?P&JVlRM|BGiJgHj({=xqL#Qz%u9dwQC
zfIwX<YeP^3_}}+2H%^|7h2^jO|IGvP|14}Er_ap7$_jSV%g6!-VENxC{m;+;pZouR
zUVj--I$&gK0sLR5@elEzk%^J*X8oC2m_R82Gb1SegWmq<-+%c(@(=@rtZiKFOpQ$(
z0OSS~07iOx4jM*!1_po-C<U;y1}Hh1y14;CKMYL)LV6Yk))t^P15;3^sgbFHHGo`5
zNl1wTffyidY5=sd2O0t#tqg&700$EwKuliRiIEo6Co3Q$qJ*F<peQDy4B!F$&@KcS
z0SQ?E0MurwXKF=jfFLI*sdW49Xa%%4(6a&3S_2F$4J}NqK)(<`sN_NQrJ{h05`vHr
z;1<x>zyLu}=F4w8O8||N5rUGqh_p25l}_K(iVmdTCbt6xl*N@0BxHqzB@{t@w6t`f
zKc-d&7LJBM1ZfGupPO&20%Q;f0@CtIpe`Q3L-H>W6yLgN^=@#AN(+dAIJx}`G?+%>
zA^8^q6aX4EJqrr}f{41ZqJZuX2sNOcowXf+#s+Ap2NIzHg0wIQ^KF*}(8Wg20fG1{
zFcqlD_=bqRqmhxR3&76N0wgaIkUTg6B(w(KrD6g=fPY9XBt!vX^2^YhUq90YF=GID
z$PJ*e20Y|PKmhT@2{14L8ki#>-2TN0g8#BM6bPn9RzO<-`9pGh6Cj9xXA@Hc6M&0@
zjh(fDo}s0w6$Lc~0zA!64FDP|z&B`Gx*IB16bL{IdmsY5gQ+DDG}Eumjs^%oD??Kw
z1cdL@ZV2BfzzxGU3xD(XSCLcv$)t2{mf==h6kq28&u{NwXKHghtuG41`IB6}F9zMM
zkJG(fH;^uT*C)Ua=)afeFY@~V_<b-ZK+n$D(Gp}7_O!;v)(G&%Kr4_NwYG5paDztP
zO!?cqZszQ4YGG)gXJ`16T2ay(+)i0WKuQEePv}P%0JL5R2&Psb_j9vwKb8vK-qFw+
zU~gb^!<)XfwF4)eogJNlqaDbTInaS<)7e`%+F8;W0*&+>EgWu*0Re$lUR+LA12l09
zpq>@TsO=rT{JQaiUx^?fIOrMM|H3GUJ<xw9zcT!jfI)pAh74}T-K-Pu7eU=1<^(O$
zLvlqK3eb3K0Hqgz4`5(z2L$NRT01%bly5Zsb`+==<b^<g*#XVI5BWY>00P2yR=@K8
z9q3P%5aIu}Tm66L`RDlmi)Q~X(fFtOpNWa_d;Dj-t^XMr=~@3L{{NqNZ1fDw^^Ac4
zqF+~?2*JkD&c@mv2u~ypvO7>Twl*+uv@r#RcRef6e@=R)7JB*?K!CoU11Lb;jsaL(
z0fg-ITmkCJbm}4idsll0prt)65rPxY&fXN%N%T+==wy2HlM}!|%fJYDNCb*+H2?ZR
zum>7A+#(XS0J#jJ|7a}8AKnhVVbw~{5=eBDF#tdfHg-T8P+$UBnHtj5(^3K)tZ%0S
zkPs#U7=h@3ruBWWqm_ZRC1`*>D9<4RI0A?aZ-5;D-&z57dLX|Kl7tb!!PN!`Ak(AK
zC;NuR$mlyB18YYshc6s}9)lYuOsyOMdS58t_TKPe?*PiA?EYjtD7t|B+&2WE)w4IT
zb_U#p7{He<z&DxStQOG4)ZqsbfQhLgNCf|jl9$)B0}0Oog#CX_{PsW_5C#Wp(5&nM
z03wSU<@(Wva1+k0Ky2PZ>scUtja@uM)I^})4j`?w=OI%11`qfOZUp*ygX!k?ttx_)
z?tenSzk2@J|9=m!|I0M~-v2W*u`)9J<o|E$KSnmD|MCC-CmuRV1OPx3RNe#h938Ad
zX)6$9Ss*X3?+W;l!+vunjI@l5v<#pDKj55A9ZYU(JtJ!i3v1^a3j{esOOV^)`~vqC
z1p??z83bksbOKsf+kg<>ILROV2G&;grr+vDP`Lv_?ErKEc|j9B2Y|6P$ftrzjho!`
zXQZGE(gu`7-t11;-}p;?;0@Fb1{+6x3sVDtq4iBwaKni+5L98B13~TqMCx0?Yha;g
zYIz&COdV(uC_&jV9VI}*=myx){)<FFd>PRK0H8i%QzMW!0*DK!is%Z-$%;yd>579I
zzZ8%+F5Sw=)R@);<QBh{Ip6!ZL4MrSn%0E(*CvqZ{oZ!t&2HL$!uw^aH+X+0_ASu`
zwcbb$G!+w&7E^u~jV>r_v)47XGP2hF8puHv=Kp>u=lW-Ij`la{1c;c*%@0n%uigZp
zB`2>eAt$T!h?e$Wp#zOE09AOm!2^W!-wnBm6F@^<P)%y1^$0Y~9i#xDU`b;_4WOa9
zX$Ac9$lnIv+xG9_lKRV#n}>$x8^#}Fe+;1pd|8zn&A+MmzecHBOkX1H*Jx%>4bZj%
z{LS;{w2d4sEPmzr&uRZo>vwE4syCDWX34)#{txNG?I>-l+eC}=I}WQG9N#jiA2|Lx
zg#rM`zlCsXD~j)U9B=UaoJ@Ve^KY`LpD`L4{Xl7OgYrw}^z-v~iPc}B0^Xqdnn(Tg
z{9}&wr}(t3exyu41ZjUm;(Ol1qok}TAu9#|{F)B^gzqsm*?0c_P5N^K3INa}cyfVK
zDm@2~D|E6pHAFDAG6h*9NTY6T17tHG8z;Z%p+Io|*|Yv+P2Zul=x<EvpAC-w5!XMW
zV)%Ec82$w+#(#&3@n4`~`gf?9{sk)Le}{_sU!Y?7cc@tY1uE8mhl=%Ipkn)XsM!7m
zD)xVeiv3@p;`n!{IR1@_{!4Xd_*eRVtMK$(01zaC=Rb$ApGMv&?jLRVUz_iL$8!HW
zM*H8f*Z+>G{&%eOzhj{P9ozixnB{+Bk$-JQ|A{gFjRE~+i2vSpz8M_IY;LlKo17hJ
z<!A|T7f=+_m6Zcwp$6RiyxH3S{_-UNBY1w>Le>R^SbLz|4O-nBEZ=Pq;l|u<H=ylw
zjZK|E85cPy3OgD&{MiHml;3|+aQz8_o8Esv{+ky7B{T@Xu?cDd+F9vY0LX8%9Xn%C
zHuOb8fG^^@LF7aY_*bO9<PLxKd>#H9@tbS}R7FvK`I{Q>^8)D^8k&CD%F(?&__~?K
zw|RVlK={E72)&`B4S@V-1ojmF`=Y;fj^AHtcyFEO7Yrc90&xA>`-^t|HQ0|V>MudQ
z=7)a`@GX=6Ylxq7<iCcx&1?U<`%Aj|*ZtoU&R;-0|4i=xiN^je_V2U3-HGG@F#O$I
zzrxV}=}iBk^XokSqW8xvL5;t7rf+&jL`_8TmxFFkr+5IeD$>$iUp>*!;<=5=-v|CD
z68h7eoZ{Bq{T}MSwEnKix5eSVi|>E0>;D=N!ygd+;QwDEVf+J<Z}#%9Q84`h#djV1
z*Em@Ifa4cE{MUF`|A6O<uKsHTY=1!TRiFPg2KGN-_@Tc4er>;M$&G^F=9|BJ`<sGV
z_gm@vQ}%h=VFa=w5_<rNy|xt*H9$y4SXx3>L|0xwQAtD*^p}v7fSAZ1+Eqmql_cb3
zDSq-fx2f(IlwZ{9W>eAH>I=Es{hYsN>n3V|?8oTVS6Kk9$p2<%i~_(5pr=6izFS~K
z4l3He&fxEc{TmXuyJ7z+fiHt@(f@!a`d9RR+AjMOsy8(MlIqVxx&D6jZ<Xn%rN2o!
zOuxC@KQ7v>Q*}1E*<}7xrf>Dm^wA%A{R>XNz*A8@`X@fWd!_#(vp-6+|A6D42K@`R
ze=*4aBI~~z`hURwZv+2!3O6z9Kgtk(N$7sgcLBfT9KZJb8g75>{VUntrl+_3k^h@S
z_N&N2_V(@lMq2KGui5dJMjBpgkk7F*H3WWJmY@4>XqoC+n7RSKr8__O-rl{^wFBBZ
zn%V&k{~AKiQs30r(bU29&q#dHg4^8M{s#)rFP`-8CixfQF|Y&bIRJmsxql-n5aC<0
z{~%WiOFat<YXfq7kU*@B{uY?x%liNI3VsFS0^Crx13Ea`S$&ns?@<Kd_VnS)R;jL$
zDbT_YRAJoK7L-4&G3B@Q{VLy^LAR~n1WsxHL;Sbm{e{MFoZ10FnrjdI59|Qp3(4R3
zBqjgTU=C{hBA|ai<R*RnGUn#b-<jR-Q**X6bpYN}m>@R>asuR{64D|7N^3`lo3~%9
zJ<tSyEC1|_zZIfRdKQjAd#y)5M1C`iuktqf>V&>y1$iSL?HiXwbnBFE5#PYyOu!Rx
zbG`6yMt&Re1^RYEe^N6WI$GNNKAHb0ZU63zzs`s7E1KUF>04g(<;>xhK~8{yp4H7s
z#|;nPj%>c1OMuMJ@^5md@9Owf<wevblyyY~B&1aoMZW3g5Apu7;Qhke?@RhacsGzB
z;`E?DfG>{rS4@9W9RCx_Uk=ay4)<>r^*`bLelYfTIDaX@{|Vz);J?H7YsLOg2*2Mo
z`WuXYtnmK{>o1p({s!|;y8{1&^2>dszrpqM7Q#Ov`+7O)Z?OHg;qXrgf7}nE{_ENt
zMC*%tyCDa%%UjD(QV|joQBvai5hF}*9o(O|gBx19_IgghU$&oaT5neHKXMJfI2Eox
z_=>L`H}~MJZ9sYa4;n<yKV|v<3-06w-R*6N+atjnfr0`9=RZ|r-{}8a-vVO%&s^#E
z_4+kaGcvHaxqa}h`^)@(S&TmmpkKo8f09N1MF9RSve8je0{HdqOo2uq9&9Z141hOR
zTW?O7zia|gf4%W;YJYnY4T^(ShPU^h=@9<qcI6jtZeKtq>S*Ec)27kwvVKi8zKY-r
z%vYg&1;0&Ceq>8usy%xO02P4YyET0S{tiw53)*juTwgLc0~^<`BS4Bw@sl}xBmbv1
z@n6|Z{vwdC<bK7^c)L6Xw;JR0liu8V4hHJm+5Wk)-rNlO|7@%KKPV1B9DI}DPwV%W
zoAZA$+ix4^H)`_3{(r3S&xZJ`H~3Sl`PKezp4-a_<iDW#X3#(9&wr~7jQ<^h-^%P;
z&GhB%N0s#D{nxta&m{e~EA*`jxSiRT7uuhSv~NGTe)RrQru}&N3E~f>+OG|Nfc!(r
z_O<tq)!P4#-jM!3p*TOp_V4`puN8!iowc#<ZFKtCkNg_kJ%2w_`g*DTcHGyn3v&Dj
zfbYKF(Z&!|<=$M<`kH;;_T7pT@THErxkPw-%Mpas#`<<CZW8n_w-avxtiNLUY3NNJ
zqWfd~Z{u!H5N>;ar}niB`;#$Nj+XkMH&A@KxwLolWsd>y_CAyeC_?>#bx>01^dI8V
zvj9~bhOPj6An<2Qx7FJ1D7~+rf83@NKamBw10xVh3yYg8l{Y6U&U*F$dmEsEDQHR{
ztNn>9YkPZBeNcAwr3C!S+Lt!K*S0@H{-?;kUWn5D!Qj`HuPlR{6et_A|I+mpnYERL
zD?rb{0BGZ2|MhO^7qUMi{&v!PJMi|$UyAv68|McJ@6Vml@9yAdUHRsc{$}(yn4gAO
zS${EBXA>ZZ(c87Xeb@UT(i<%>wRW`sGRdFDT7tx6Y6Dt`uX6d0<7SqiKYHIG{y1j~
zYb)cMakt_zum)+5y^Xci?ajlRu0Ia2H?g*J_=^#@eczWFwDe>&WH+wM%GwI}%bNUl
zdi-OKMviv3tbb|#swoC~UwNSWb~JYrHojQtjjOrc)w;>)K_>SthEe{o**_!$j#l=j
z##TVX+vr32lciGs*7d{gr~$tt`Dyp#8_nMgLhI45F7Fo={{vHhaY5m`{{CUWkC6IZ
z4S$Qme>UQ)o_&G*O>zFn(@)%gQ<Sfh{jLUoJLnJB1b)h*?Lm=3_x9)4So-rokc>dh
zx03o+Xc>WO+1rTxZIkb(?XlY!`3HozBYxe=`Q21VNq=^(pf~cL79HT>ag%WW>J&gQ
zKdTG{#jgSQ_8jNCi4gMKm;l9h5&e0U_n3x;#GVX5{+NcufQ;gs{d{4;^5&aL2a}tC
z26F3PE56@F{&QTU2K-p5zg{HrKbRojAFYxTKtx3GBWJkz_V-^+<i9q7{{q)v6AK0|
z039X3NYB*55yTG2k=*p%mc6(6&dsX*9I<~}<ePWG?{@V|K}P?raQmS)U;2MmBnm1j
zu0K-w2KRH*55V7?$rs=+X7Rh9xz+PWUu?kH)WN`n{F~GGdZc<2H@{tt;=DyfNq(F4
zQ&QX#rvM=({H^JW0QEsxggMvGIKJGd`hw+#PY@3j-?jgjEo+|d`|%WCjr50!5Ppky
zH<JE&)K9kZb*KL4&M&KY)A2nRe!VOGbx8rgV*AtUAU7QU$aa64<5vgG`D?4^&DAEr
z4PlUju>*cj`Alz8_%F%HE%Cn>y{sJPpRdH9Q~xSi$6EpY2xSz%_?fRV{Lu)S{ZFat
zPyX|EvOjOHe?##T{13^0m;Fzj|3dP=Q~y)RJO0>?^8CIAo<DCM(chAile$6t7hC-Q
z?_7)fUgR2Z!GmTc1cK5taQKp~-{#9VI1Rsg%rD;~eI4+{nSG69e=`vaYXcCq@9$sh
z<Dc_@?>m*hRKS0%$bW799`U|c+`rUbHv;~8vG``|^G6;2`}yg&Q{C@{_#Y2ZzXbK)
zD)B!*P5lA-b1D88=c?bs{tr@Ll>ASRwtudY|G4q^1%~uDX@3C)Ey$OvXSZv0vzFh|
zj@!4JFH(5^^kDZZz)hwAN)-6WZn1qm=x6vXudw=-Sc3=~0$skRHD46<wn+Y}C11nu
zw<vmZ2KD`GsV|TFw*w(~`Y)Rd)BrLQGJq%b&k(<z@B9Fz{|)e`1Dn4F`hEuU6G_$I
zN&Yxj`3cJEcc@>_Nq&NK{2lV^@yAaf2ET)RJJk3I2KYP7Pv;cy4Bx@*e*?R1{zU-a
z{M`3-GPw1Mw+4J$T>Y{EL}a3B<!AuB`IzXJwW9`*{=D!M-?80jw!zJI5Df#x*D!M9
zV?=KDc8u(-Edh_es}dOqqqWU<|8lD{x8dsM-v;_Ihwr~`;}l3GIByXC$!Bz5F#Yh9
ze{-AYySx7rxG!PmPmJovh@W<<zU~YC5WqJ+Re$F5_m8*!6{|nrFZ=(RTdObJ{`I$Y
zzjFM?A757fr}Fn}{Y&x76@g!F<x<>kz5jXtm&Eh;C`d~F+dOH1AMn-feK`*NarORp
zM1Lqze{2#n+z$NtG!-PA-^c$Uq*D9>@c(1)TcD#ju0%&NwxtP<MEnW<#f^~A3`nEV
zhmR$Lg~XVVBZ-NGVjO>l(aZ=<kY=>=5rRkwHs&m|dh9A6Z#dac*u32hyUEM4dtx8L
zI!O2@?DNixl9R-2!pkat&So$;#xahSJYv0jZ&h_y_w>w2fJyQ~!>GG%-?~+G>(;GX
zRky0QMR2Bs+4T)(yi)BexegMn4(75i@$%G0nh&%|jhL($=dKmANSCc<3@g&WwkuLQ
ze$hfQZ}Gf5wW4{Qn^cFq%$~a*bl8`z0>t_~u;hwxKFBzqb4TpuVZY?pZ^D0_YeMb{
z(4$|r8c>}cfHzm93Sz7Z^gD*<&2Pn&;d)QA^l6xEK4uk}-p;jx(qhPUR8mnQZ`hhg
zJy8W4&~=_Az|r@q;`BA=q<U-BCe7Iy8sUh3T3lz0aIbHFKNw0ndtJ8p9&vCl61_!k
zN7K(K5B@1Db8tM?nJgD$=43E*wAUlni!P)#nwUocC5P}US~%8Z?`2K4B|HdYO#NV9
ziv<W<CELGTU7@2A(4ifC%-;|FEsV5{1oTp=z7W$!n1k-bwt69F%&=jHuLJ`|V&?O=
zJZd7mJsjB)u2;(ZIb4ij@NyAzvUvHru}#ReH9}KtY*Zp9Xc5E)F*tCsnvBa;hamHz
zIm#YqtKN^su=XuaKUuAu%k^w>V;(7Hq(5?{kFA>f`A@wUoksTapQ=}{cCI#_|5SVR
z{J$%6kwYB03~GmGD$5n&D1k>d^@92dMm4`eww_C8JSiAMr~@YV6kB(W)W-HMWn;#(
zqyW1&9v`T$s@kz*hhtkf;fO@HRmCDb@g2Tsuu7fEbP(!>O-lQw%}ReH5bD{<7OuJH
zb_l&0Kp*=-KXwPH<3|U`ICE=I8hwd4bgzcYK<-*2Q1QDWN>eZ%4~0XdK$eDj!nki-
zs57^9DFm^o8svRGDvGAjA-~L*l;sI&dcFREzC;Xv*5D9MZ!0CuCD8XVQW#A_F)fW=
zIm<4QG}9c7Y$HbloWI4g7)q{>oqlelRwUC@`Ch+FsS4@HoG>R~dYGh0TO$5IZ;-FW
zn22ml0TE-%H38-|kZJmytw&2U`08F)T!mPEk4VmB4vdAQ-F2dm)vx&*H{xcZJD>BW
z=9LBg7T&oee@V&7n|;j^OFLXC6H(@^AWdT7?50L7%h^=&(Ntzx)NLl#=P?V_Z23%^
z-qGiEPUm0g&X_XGcG~`Beb(ocNVY;~vmr)KXE|ysXRfu~(nd~ap_(n5(yW0#pSjQR
z%9&YdXSP1KxoKx?8*E)tH}ux$+i*CTn#uBta(hN9I3~|Z#;`=q{1|+3ow_t*fqrYb
zo(qN~G8NfsaIun3)6i%60q&?qgwW5BkOUbW4YSib%!%m_^~+Axc-V(IMeV4*?9}w<
zf|%11a+95s;Q$eHQtEkq*=d;^HDXRrcWz&Hn)1OU=EPWw&Q3*leu+65-rBO$F`jc`
zPD+zo12yZ2(x9^~&^XqHux#a)&iO+PHQ<Z!J8h-t0j@EVL$fnX9R^fS0Cizp*R&+R
zPjxY!`}ypYrV4_wpt=woQ&k1dlBoJoT364Mg0vnlxzn=Tqkoi{pMjcIwUm}&aXwRe
zgvO41Mbk!I<rn4Hfwml&ii1$<K(>^$rTy$lp@prQg2EG8xE+hLWan3GnZ>#_eAArB
ztGv*6B>DoP!!ykCP*#0Pw=baj&FYJ96i#*uE<5^FoT{z2wILE8LCo<8*#;whcGl~q
zQYR`p+F1EW3tLAF&+SF}aT<z(a^<J~&OHG-wcwGtrGPqImy9iwy=$LG4z%+h=9mr9
zIn6CAWL@xqz<jc0I4M`#Cb8K#Ued<)n!CK;u&cmqfTfQym~kMc$K!1Zk+oL0gGO>(
z`0;(T-^ZuVfs5T@97mMI939T<1wl>jjQ1kfcF;ZsNVY<0han`rRvTJ864|cMmO%0#
zCuK69?Qqtt?pOMg+_JM-v8la3t_+xWF_lo^py&NK%VPh7sTjlFhl}aWBsx<6G>=}Z
zf2TCAH}2|C`<#pImM)L{DUDj~(gaib21uKozSjdR${??~cI?enU@Hs;$((lWT^1q#
zLz8Rn@@d{+#nh>!O5YbXHHXcy^Qrkc?S|EAyVEx)rrW}C{A!S}3v|s8B_@Jp5^bDz
zuG3s6I|9QYYYV<#p4Se%<y8YD<O4J^R;fN|9_xLq0F#qNE~0ao7bg`Pel5XHli?25
z>iE>Sm=H%k<9s)#*h6~}`?S^=>e0{ItND~0p*Sjl?}O|MYBhmJC(OMDRP6Vs267zc
za1ph^CMAUpO6js=F*A8IvLlol(&K9uW@ghe2YuC?%$SJYew>@(m?Kf0v6wMouGDkT
z;#h);Ak(7uDKXVFrd<NKH_==JY3dpYu$``<sD4vuEL}zGHNDPg$ei{09w4R_Y(1Xj
zKs$~DJ3#_Nekk3}Ig|;jN6Q(N1Njo7eXPWnY$fJxi&{J#>lH8%%uqxv1J+!#70UA5
z$B3ENFY5|v4#}($s#z!(MMVNOUtbK~1>=U|>IU*;sP&@u^a(V3M$I%|A(+qcc53t3
z5cH`6z!RnKHn1w>YCS6#sPNq|1`Ob*aOmhLm8PK_ARNKIo;j9Sqmr@2^Erw|tLkPn
z2V<Jij47KN&gy*2+;Yr5Tq^H*=LZBZ;}9PVGZB;rVY4Mr;BZ<F^$@KLYnzJMM2*yR
zGIcNWJ<-50b*`OhfLq2)7IsH6ujXnm)TL+yU-bHaZvPMVUDZqRG05@#$J*-ZI@SN9
zwifVeoGVwazUu#RC9W!||E6UdXgt*7rO!XHFE_DOH!x@&R_5a%+!^GL>y~8rNR^n7
z;tQ1Q)6TMS#)l{5LfRnENgah_+-MjG#CpAK6vkun)<^jRErP4^zCZ={ndlk7=0)4r
zL4wl;L^Kk@eRqXf+Auyfs2Ey0TJxxY9xxFF9QwupV}%&l@;kgtFK9Rv@b>h_*@i2n
zq%2W_zI987N^l!cp}NGT9v)RlV{RB~2iTS_a=#9eEnQ8$UvF?oVa%bmibC4;qf=YD
zT|~>UXm;Fq?zCGA3vp>O&DX6^9NXrQ)>6^WvXEtvbOw9%P}BO(mR9bS>uufCz7b~=
z3yA{i6A9HOJ)_?aodqhv?Bvf$N7?{^LQu*>wrcco_l0@K2eQ$RLwRYP70*p8EEGrC
zcEzxJrPnUDFvR^6<zyOk#nv1+3r8U1(s#0GE0$MRv*?akD1Fpyt>G(YVroOrL}?cR
z@emJE1L0<17EdEV*%OCbYR;Dl);JM`7YSto8CI>_tCW<W2W6~8ltN8ffAl`?QYUcw
zZWGFwX^WFZp6LO~B6*tzBH<uUlF6*v!YLWOJ2|orm#?AO=spGCpkPB%7#RLmt_#zU
z_ETJp@Wm220*QxqdNJ?<5|ZUWv@vKic*F*hL}eehtD>2W3oMr*kr|4KN3b;jzwhBd
z=!_w_Vg-+3o|_E$39*+W@P}`?=A{&AEzuI;^ac4!Chl3viE$8R1ZxGWr=CfJo_qS!
z#U+UbC1;RD4B1$Ttc4j)VftP7Wmm8HU%Bl+(UvdX2F&&S@6{`{@4r^BLi?|}4sKWN
zzbkO%_(-YvAgKoPqn8+cqZBNVE-o8n+R%Xo^qp3kn}eYi`6w!}L^oP0ksfi>e6?MP
z?F`3#Lk@M!DTEBnE-%HliNk;BYy@EtfwhHa_%PLs{VwjG2b>1Qwj3Yvyly41Vg)E)
zP|94%<H_A?mGXxlE~isOR;-B8c{>j~%eh*WkbLF#LNu*>eL>uI(Bx{R^;A^iltB5z
zzH(B%s>0?GyCEX(tnk68cK}PTkJW7+%g<uLfH08o)5mGW!B*NLw?xHhPNj!Y!H!z#
z2{Kn!DOQhptT2le4(?zJBl<{T^GHz^$?uCsL%8<OdjfsjsCnEti`xq}{;mjAf1eWf
zbsM6_&7%&os9;vbaqi3{;-D6>kZMrucTq)47h!pf^^SBTtkNC{SAwb|D?c=dhtfgm
z!f}Vv?CS$TQ2Iehkl|so^Ro^e+KI4cqnYLGnMO03_~M6$RfC@O$8;v(5^nA?%bFpv
zqfl&oJj5*^e6>WfjZIn&pfKxCbkYdP(m9*x|NemiAUGHdgT=P9>N4mURPw7=uFRzW
z@k0H-dX;l^-O5#~Ebz7tZpzBbkao`Nk6i!ftp7IO_8=~tTtXVT`v1DswEw5N&bex(
zllA}AHO{O3|CP9wD$VNW&~1LZQsZ>qU0LI-u2z~s0HP74b1<}PR}dZoAykL{NFTub
zA$W-=E=Ckvb7ym>-MUn14f%s%oLx<X#dq8{dRhl-9PqAveVeP(x_N!aM%QLgZaN9G
zwykezr*Sr$X|#5BG<Q<C6puy+o1zjrLkJ+Ay`t2%Bh(l0)1llBzuj)#)O5c<$C&jF
z|5``HZiQ4m9h)|GtZ(bIHaFuC-S4+{w7~=Pl9hu!gs^_IyVKgz-n^lu170{BRq&r%
zI@Z>fCK-$k1=<Kk!yV#4ly308vZd6<XxNp?2XL~x@&UF`xwLX17=Vr><hSBv1W?3W
zGQxI2(c!bUZU7jO>I)7H_~MvIEYZ^w8d9Q(zF^E+=7_+^zRclQ)TFB9X*X+Osja!$
z4sThmAR~Z!k5U;?N*k?K$WuMMWBiAnBOdAxDvqksd#qL!S}!;ZcVsEvd8X~@<VzpL
zf>5^YJe5ML76FN=)ufdeD-3D1t^cSCqBX;-Kqx9D1l8v7xHq*wSWn+4LH~e0t2K__
zAXoGiucs=pWx1ml((q#(%;h>_01<;BaAT>hqYW%NVCV8rEBE5Dq(Q~!K>4Bw@B%yJ
z<&J3ZlPb2xIBOsji!)j*e}DKx-kkdX(x?A(>3?V4${NuBb+{0)x~_H=&HvY4-T!$-
zE;j$a87f{~FB6VRZIL#m&EvuS>P!}tj^Ljp05j|SznSl;PNLLBVg1y+G%?Sn<&6aR
zqN$<xu&Hyb8jd`*#b-&Sm(m25MD||YY!)b6{nsyGobbD@srTo~-Tvj!#VLXu`@gFp
zgzdk2wX<f`s#<{8)U2$jx!V4(#KqcwTc|(8yerG0zRPK)9#w++F%()hkA&H(pr@(R
z0|wzhcg))#>1WGg*%t+Iki8)iE{`kS5t`m0pU)_BKByoE*WSdopwbOS4?3eoBQamS
z(jN*dwYARe-FA6DFsLMNQ`dd>xjMY9uJ(<a-G#Q5)iqFvYG+NIU5D1z((dipw5hGI
z5HN63fClpDPX}s;IbzYaLhXc7E8|CVB#d(nIC;gE{#bF?5#}-3BY|lWU=E$6=R-$P
zVb=nk{$RW}5>Wi)w8I?rXqHCjlEA%S_$mD{e{V1VMqd;;iZt$Hi+XF7kF!OkN86F#
zk9GuO!RVk*^?W2pJcomv|2QUoVZz}=Ks#Ti!g~Y#;g~WNcrOJT<Y$g8r-4*(0Sj*4
zP@vuy_4n4}`I5o<XrwP#kE>1f(4+W+u~;M;tM3o?15fJveBHsmm?34f$zy|IB$Gh*
z3X1FV^Mx@87}M71;s-sV0o=bhj5!$ggFJ2b_V?}@=6)@-Gnq$Yd4MQ5g*iNcr<CII
zqj!5Z3s8b)5m~OWw2+x$iVc?FKqN+Pz7;0iQFEwB<g{`w6L)>iOmVBUye%^UF$&~l
zrfxD5JC{^h0_%1=`5uL0Vv!g<!9egr+sI>zSS-HJ5K-km!OrB4t11BA1rZJ1jy@Ub
zjtNZx&4<b{Fn|+ekwg?q?@%6~9lN-vi8jMQj<&S5!ay<_^vC;lrb&DSl0YXX<{P%N
zFX9W3`|7qJHpOn*J?k6XiJT5Av7NDaus^0Y=Xj7c1oHlgBw|p&821t%NW_C&O=)>j
zH$aoWq%ZDM*!K&_>5%0v8l-92{sbsN^&no#miSK84!&h8((S`73*E#=I?6oQ7u-g3
zh~$a8BNE-tywt*x9ZXkpRYUCvJujs17SO7=SL`1{S>n^-1L7EAom!$cs7W097x|fI
z1E<r+KI$$kC44Whu}de~sPE;~SB$=5t(+UkhlJFi;l#MzHK44(RO!GjJ|CG8qbx97
zmKcV=B^8B?DsR#iG+$fDf!MzF5Vf&m3R=dsR%FkXF><*u#;9D!n;4Shd;xU%o1(h9
zfrjhLS~q~;CGudz0&ccy!T09XqQ1^pE0hn&frON`R+i<il|&VCsE|?QToL?dKurtH
zom;sDnd?GiR}OAvXI3^|aaPSoezUS_nEhw$s$9pu!vTCk{}IM6&Mn^GNPIN1(WF{M
zLWJ$}qLb4c8BQHnR+bZnvYc+&*v;@VGo0RMWI4+R<UmHdWVXi`+kxwXEIIdUeCJ@b
zIy4Y=-*e<43S~~IWru}iV<2H=QM&j(BmXE5kh+AFm3s}Mc~jLOmd+kl)}>p~<OL>t
zt=d={#Lpt)HoCIF_HNvDp~Uc;geAyJI*Kigh*HI@@P_QorC$4w$5<EgVlh)v)D(xx
zYM+D1%rwBz=rn-1hpk!nCGx`Q1-DV7!b8_1ezkpNEI@sUSwqcC75jjTA<A1#CbE6q
zeVKFtGc9BjJFA6(+C8g?BZ9IV|7H_$!pb7z_&%eEQyw67329Cu{`)KAv3hg^D`W*S
zbf65Vj&}55dFW$OxjAOmm`khnH9SOtarHxK1$hKWgP;_W5enlrC7KAcAp)56$Qk@>
z8W`41C;~W=RBTNj_f~tIc5&Pq%@TS%L_*C49h<=%HTl^aU`h9aHQpBl$!J^8EZ)xG
zHvFnw%z=@s#B5(?Ct8p*mbm7Q%kZ>xm&0R&PB=;M!h^y;6L(EX%u;<c3)#0H22EsF
ztkp9~7xm0pN`26yU`2KhQeDL!;xr#gYs#d+c{Cv;kYeS!9<KK?=m$fTl1J0E1T~xt
zq)F;HY3|<#uduAfX;6rK)rIem@G_bIC3*D4<Ixa|#e=a+_W+rr|3_U-bxm!}s#^5_
zSY30~|Ko~WbEyAI_5jJX|HoOq%IN=5ySn=7{Qs4>uDt(8r<gU?L=4Y#@OceDqX!4u
zisAQLJKe6<R(P)J4u#PRqPK7<&8(H*W3!iwnOE{Ujfdh1`XuO<%|NMn{f0L3G=Zxg
z9Wi3P!M;8|kv9~_h`s=>In=DH3IqqM@Z+|2>xNAaw0qk(ZE$reX;I}!!wQzYGOU!x
zsvcFAKWux*S$X#(53hLG?pPj(><BB4<)x)n4_8+Wlq<vWD7jcF<-_H6>v~V;=JlH?
z4ezFo4KDOK@eQB}2jn7(M7cW9Q1xgto6oGOC@l>%7}Jtc`F(eA>F8<&nb5m?u#;BU
zQ6e&P8H5BR5(VkP>1O8D!eR&)jZX6Wq3L1Y04X;5uI->N1Y;>YCKxAued6PM<YF6B
z=cBEmo^bF_6kDlH5*K@g-AZZH&oNu{`AZ(dDA9$D<aEPNsZFD25&;l<l*-snrLr=X
z7@+y;N<7Xa60W2(qj1phFqo_N*yMaw`pB~-RzWkQ{k^*?aJFn%0UbA>R1WS?a9jw@
z)vmC(m@5=BJZilXhIm$!f@RFy&RFiumD<vT+-@(_-eU%aB`MD$*26(wiXo*0=xWW=
zZZDzQ_xoUI(x-GMLVfW{U<P_nDLtVmjDO3R6P38n*Td=ZI5_>Dcr$hWf9cM2^_u53
z2m8;gbDozYNDljNWo_L`^#547vbF~PqW?!-ZS~5l_TQDb`2O!;l>8E)3#MJO+N26D
z6SEU3QLWG<wk8D?(08)4ILWyIXLqXNk+`qVN@ozW15X{?`@lzsq_J<9@bzhbA{xb0
zv@uO@d&>su7{t=5_Q`l$l17T<npIRz87mi^eC5Z==ejGM!J%vk)iZ_deg`8U5sifg
zgJSKMJU}p;tu6O$Qm7x4CQxbH5}(`=4aAmVU7$kgjzrKD@oo!3{~VIHoip4Rn-0T0
z-`g6jg$W?L#v5mLF@J|H0ZyDKBp~(gZ1R<`Yik5LEsQZt#SeWyN0O%2XqKCQG#(he
zR%!BJ&)n`36SYg&VuqLngV@G`Y6}q+Qn4^T@bkWwEp4uP^e#yBqhC!T93tsrEB0a)
zoTmNQMy}vCJ|@tnw;L4Xgu-6IUy+!OBVyxi&eiY+QzU7n5fKE_F+xntYqK1(6?2vk
zb&a5bFxg-HaI3l?&X$$c-0_{p!09C#t8puufNAU7w5vy(q7H`wWzLle$(y7sK(Rvc
zAal+rFAJ2Hu*u^R^=O!WAATE$Ua3n)y0sPkD!Y_^1yN_4i@9uQFNq!eTmcX(C;;8n
z0%*(eb0|d*IgW(=J1=Ju)Erb$)>t`s8!%w7?O%LjmDHl3R8okSjcqmG;fsccY-({t
zlZC?c`eJxvarmK(xUf(bC}G*vmw?HUF7L4lDOt+5u9XvtCXTF$6BV(x)PSqQ^anj;
z^{d~Sg22`#p~y?p%nM0gjn?X0)hmKK_`GDTAjj%ECwtnuO?ksc)mb?tvb}DdQti~m
z5}BbltiB~B$m6LS;jNlmFyyeTy2epm6I0S8Oa%D}J#2YERdRfno_w+{!o{ox^Chfd
zKpwIg2x{42S%4gHBKP6}4)@-BuUNkoh>B8H<AA(ErXAIAgJ1&wU@2B5+7#)r?Ct=1
zJX>{(rMR-u)ZXds^57{ET^>3oVnaKY+}BUa5Z6y1(w0&mQ9Bx;Yh?kgFfcw18H7o~
zHhF6^)STL2M{@dwXIUf4bXG^!C8LX39Rl4(s!ekoLXp<!k~>)s9hXNnsqSZ8lD_C8
z)+E^{ZkdUN@k=P<ngFC-Wij<}xDqb5x>iZoFas)^H*H?u3b0`=6O_Cv%P0&o#S-=b
zwgx#_Fw5v2j<InXkCMf)yi37FJVj|H9i{N))U<FjYTUNoT{$wa(bd+{-eSmrWN@VM
zjW2TPjgPAbQ_T08%lPjShJJMA82{Bd>uTBfuV!UUtrOnY)Yh!7y*mE85|{b-uciIt
zuJ+BFIv&hE{^QQA*uQfZ;y@_o3k>?gaXcIVR3h4P_)Q#fuDzr%`;ivv_#mDf0pqT!
zZZ!I;XoG-VI5qzsj2$n_5HZW>Fz0A+ZbLV9{6Zse^8q4z#Yc$p3qP5}IYxi_f#3T-
z+Pj>CyNphc^2o2>H{|6;=cSDQq+y?Kn4+wcQ(#uJK1m&VrcqdqF->8iEC4Ik>Pm&@
zS6##~0@KuFXT{uy50nn~9c3vAueAo|3(9ZsR(1`Q54$R>cE&2zKP8$Esp>a{HY@?L
zVLT)`eLf<+M~}~kB=65hg!`Dz1*%1McdArs9bn~Q`_}eAs(XOQJqq3GViccUy3Ga^
zN^elONnzKjK;qC6Y)sEdn$Ip#0DVf7<Kkd#KMjQwz?T6S^y8VmzF1K8n^L*73he)-
zYldKe0ONzQHSk**fIIzk0|MhHCw<U^xHfGpzy=Q;-lp}No88LrFitF`zinyX?CP+i
zLC1MOwp>^HN82|&&`!xqV#&<?3P~S{0o?my|6hvv|NCSAb4%Rjc5SiiEONXyKj?8O
z&6_%0ol0}-rsj{fxj&(-|M>cr);3ogKi*B!;%2UASFPti#%uoe|NPU)>i<;>=0Dm0
zlK&Ts0#;r<|M!YqpK-O`m!FrXhVw1!EEsa$)JWk*4xjj}hSO+SV=08c%PdPR*FpGN
z39r%8sJqYbXyRVL@ved20{E@TXW=#Zjk^0C98cWyr2DnH5;W@W8<G~?-C`+L<6&Mc
zV;nr@(cNv=vEbJ0_3`p~JR6T^6ZrgokZ0_mp?nMYZ!<67W?n9FS2&&`;mh&jJYJmR
zi~C}Zx7ZNR^RsvWpTD__g}=G0QFnih(-U_Q?_&_}I?LQIQ9g9;Syfln7eH5!#8BnX
znpKsn>Kw6%qlRcB_mCwU+q(!~@Dbl#%kQ0uP8a_0eO{iGI9LY1yWzJAewq9~>)^MB
zKi>ksQutNC533G)t2^Ph8h$qT)xggIzeVs{#^c<<(^(3?<r3YS;kN>QweTyqFdp9u
zzq{aPH_68}8S<yfnsj~85dHxC3i<Pm{C*R^Bae|^x52N-!gzH9{650p-)^9#z^_CK
zBYzi4;SWjAABJBg{Lrg+3H+)ptgdqD+EkvN&OdN@-)?}<aQS+`0N>5;MTYzc9uyex
z1;5G+_>c2+*BbE4_}y-R|AgaL8R(Sqd=41!zr^p&2KYXHcN*Y-#qSRr;Qz?)&luo;
z!Sl1q0DqIi*BSCDblq<Z_}}OEPZ0hGEa!?FE%LS1Ku5^TCIcOz4rdMcqx^oO0e^z$
z^GgPJ18)pJHo&=-wfxio{~5mzSj^=iV8CC&>uV^_yj&gw{44z4lR*v_=A+!dHEa7z
zk43)zn*o0bFYmq#^6;1eemBqOV^ppWT1p>nq;N5R|0|0g|A+kkrv|vt@1+^!{O=6#
z@9_IC40Oi%y~%)oGrzkH@C2{N=Q5=GRGztfUYmiRUo_-Lbgzp!{?;$*@mCw@_woD7
z2KXX=zsmsshA<TJ&E?^cAwOPzKW@?bV)^>2fqp%|-)ev#;OYJ#106K9$@LYx$ZE@$
zwpMRvFz)IL;zXaDc1J}C>cO-6wWsKP(O^)ZH;YqtH^BH%M50g95QYnVayx0oQ6kVx
zpW?B2djL-m>WVQM6y6B=z?jWo=L<kH&925<L*ZaMeQ(d=y?@Z#5!@DvLGsOgcw|Y;
z;`MIpkA%H+T)EeaMf79wR#|rVLh*r6z=EG^L(-O5m<?ntv0xm1L%S0_mL3>)$1K62
zpuf+8j<}XUVxY#-Ph)D!0Dck9(&O)o05=9g13?RZeQ?O)XRrCm_!j)C8Kx5U5A3w`
z1X!d<bUTCj;&_ioU~F&c5B5Xa79axI#_U6uafFT-{{8`=n27t~6`gPF?e<b8Sn@qP
zq9Nd*&yTZ?fC4na5w95eTfF!c0m~q*f5Thk0%r-%Vn7+f@jeUgy#h=e*aJ>?EXGg(
z6b@K6wzf1idutrE>QjwlB@^O&vIO$^vw&#uJQEoEmnYrzI4oA46bEe?c1P%I7ENE>
z7z*8hZ|}@!u*7YlLZnvC_1uZivi6Jij<Ct_N^$u07R`3Y>zoA7m*6uJTxVB(PJ=Iy
zzy%Uq9uE{taM=c1EWxjr;EDt<kl;26UMRtx5?r1cStG$eAi*0YxNMWTCHM^z+#|td
zn{kT-FOuL}C3vv}@0H+-B=~>?zfpn@N${H__-+Y)vjpEK!Ece^V-ozs68wM!Uo627
zO7L4H_#p{?n*={B!DT!6hy=ewf=@{BJ0<uD3H}iYJ|)2w34T(7FOlHW5?r>yQxd#X
zf}fM%WfFWwf-jTcmd|VOatT}@!EF+}Sc1!Amc<fW9`h&?e1!zJN$?5@?v&sT3BE>x
z%VUW~30^J1-4eV;f_o%*tpwj9!RsXWRtdgJg7-@B)pELf&K9KnpkY$a;-5EK5%he)
zbF(X7-d${&E&HGFcMPuCG7R@(l+)9*aFsoaVN_+Orzm_B!>GbePf+;ZFpR3}^kEAB
zGlo%Rojyq6uV5He)#)(`e+k2=qE7Fo@aHg$**!f#;XlJLs-)9fDf~EwQ5Bu`P<R)H
zQ3aiDq;M3&sCrI2Df~$cqslq0P`DezsA^6ZQ}`1YMiq0~Lg5Y!qiQ*Q?mZ0OkKtk}
zKZTnxyokzA;d%^zh{{jl8Vuh^<)`p+4BtfMr*H{|Z>I87_%;mRLglCMhcNtMDnEs-
z7+y@}r*J-oZ>926_&1+{@NHCn3jY$rw^R8k{5pp3pz>4rRSe%r<)`qEF#HiJKZU=C
z;k&5(zs2y&7*?qK6n+-NOQ`%5K8oQIDnEt)4Z};R{1pCY43|>*Df|@-mr?mC{3Q%8
zqw-Vua~LkC@>BTFFl?jpQ}}TV+o}8%-i6`iRDKFaF}#AxPvK8uxPr=0;cg69Qu!(T
z2@E@^{1ooMa21vR0@Xi;om74Zi}q9fMoaR$kGhkuy7#<x&a=5?a>DaSqs2XWd@J21
zzj{lfB{dEa&X0{@TXT<I>l-Pyxc5%P^Jk~1sg23k?zP`~41}!9o%~t6_)ICpnJ924
zuYKp!&=|i5k<Y?|d$icS_j`%gUc7cc;O4#bTu)EW82gQNx%aGn;8Q>W3EXmonmDAj
za9kPlSXz?Ly|jMz|GWfAVgit#0e5ob$fRou@m_SjVwrTEWLjb5h^6R>FF^BlkGhVy
zM~9rLmw^c^&}effT_;YzOL1lq;{*U9o380It06Q!T3qzBYxeGE2MZ}=+2@+2hsJ`W
zYl=NBNV-np6GU8$5h2BCOX69Km+yLI&qUrn*DIWGF}zQmQ6Q8W^(>%UV?oisJe%h_
zi2+U_1r$=2#DDNe2s(FW1BA|DViWGsg)b;idMpI6VECpNEEG<hbMJYvVB8G@Q({-@
z_ysEHIbKkBJ#x;Hc#0ENu<QUjVJX`C9yF9u*U99w$6vQ5yADC-UrD;Yj!YVDykpNy
zete<&oR_e=CtZghfK<*D;FA}g+@ozr+<T4}5LPk$b&k3YB{!57jJi%G7nE)oZToub
zsO#`}ubR81kh_<j<JE+{Ra9I}um&33A$af*+}&kxf<th32yVe;(BSUw?h@SH-Q8UV
zcewMPbJtnxKHY~|v#0ti>FVCQXLom17n4g)w#J8^%sVS8OD#Rm#NL+H!Du{J;SKLa
zP&mxhdVh5$yk`#wlF@A-C)#2KLD_dkapPbg>HDzp@d-lI%p>uhbClxdSbUA56^+gu
zp!x24vqod^m+u*iR`T(~E`O3=3)wCTec<yNXl_pR_~UCpyysLO<^v2Nn=>wZe+MR?
zoxQ@Pz)0+*O0#6P$vkzQ=jWH2kT@wGpMaIsvABY7Uwa9T#*NL<Ft1}njU!XM;6@pf
z{;cm}NA%;x%{>}zKDJT%8V2X8p)y<>l{*vl`<y@y4;nrm50jP@1=xKW&){Y=ysV4e
zv^DOHV2Te>`mXj$EwRn_XtLA7eSgs4$*)sT<oTj9=sVh3p%SH~LfhttCK_`j{i6Yw
zPe|B`3uF+Iw(A3(u0#(Vwv3*=GC6#4A^tYfv23@<8m7H{jW2Oc<H7fGQ5kWZL@%Zk
zHo4B)sdYf=Qv7A;NNI_U_O`f|e55!gf$y0dUH)>&L@Qn;X>s3&USwu2g(qm%JI?kM
zmtX4CJT7Ny3(@@_Ti`pRAN#9itoL!{kJoO6@biu>hB`X|J(I1QYH^pTE$o$zY=$~6
zLD=5k`P`nhN7)4q5%BDc_LiiNAB>8OS&$r01;IC`G%i=b(UE2UgkL*&Up-~D8S?y5
zF4xS3_wgSE5?K?U?Su<vIzr;eA<TY7-)r2lW9~6wWIs$YKNk|%;}i5W{Dv+UM|!Tz
z)Ecggp%9bp$Wrk7g$|&mCP#qHJ$WRTQ_3K0Vdi!8JFuRp7$K-wpO-wINLd9?N4`o-
z5{o9qB1$<=7$EAj^e<fy^H_ye^QcvH>PuZ^QzXXTEvqa*3}`_|8c^Ot|B!7pD@og_
zlusn$E5YD>t;+a%N2be`UsXTirAt`SjviH;K*7<#%^^UivZ&~AY9@<1r^`)-s~PT!
zB+0OS{pG&+DlOma=Wa~MqXOP;D7m#3M)ss{Yf?U~BL++<!T#NwmVv|R6wE*vVXDh0
zmQCr9So>|)Xx0gbR+0XwZ|gk$TsTLk7pEvcpQwO;V%E!+<vbZ2^IJj4Mk?t8tW@JJ
zYD=`oqd?0P<bwcZ@>`m|LvNS6`6R}5W=sBuH>`?My=|&gWQJ76+z_xFHls9na|er$
zRZ)ks-by9uYx?kbFwJR7XKwtg1k~>yB<-&4TISj5ryTbV&pPBpsaa=3H1_jM5?=ql
zS^+ZLDmqJ7T_Ljb7b*2Hn%9Iydr}#LK#E@#JBwM#__n)d4byQ<QC%&nfAE%02tG{D
z=r>!>T$eTZM|i`FV=#>Si0q~&9btJwg$)WR+T=4oc<Fq?kY%diro19j(+yrT9ZBp-
zvO5R8QM3lFegV9!TyRv(gD7o7IDH}&>~DuX*VP6Ld#RgVb&kUDC^cO_GZ*@GVRt95
z`BZ6@>Wl}h6t|ZC$gyE(brpZAY-YtOC*|#XI~M1LunOZ%Rziqez8gqJN6C^D&K|Kj
zK#Oib@leCe9T9RZG&Mh>VUy^hu|(xA^v`}yw#XJ-$&kIP>h0zgGbqURZ}`*KpPcD&
zKe;!YNKO`hoD$YC>Y)k@@vs#*L~Fe*+ZFEDHp+|KD5tzJlHfVAMO&<-@P-wsO1npc
zC-q;qRY!$GDTspt<>fg&+>Tu0W=-;=TXBoTCvV_ejzez)#am>C2uYOdAH8dO6cpkL
z+KRe*91~t#lkzPXB!QC+a779(TG+5Tkl*AEkP=@0SWcz)C-Xh8y=U=zNqg`U)|elz
z_TUzja9x+!(8MZK^d#-i&q#SwXl;>46-2$m^Ekp$#5XG`dbm7(Vj!e0-C0?1yxVX-
z82l358^a9+MsLX{O-FM7ApmNdVxqEqeIFQf33ta3->BfUJ57rFu?dbNAUlfMfT;eZ
z4HTv~LZCmvL1_2|8H$%lZv3pQ?;~1=JoGa=@;pGrbq{{OR|#KZM^^2onUCkb@zsi3
zqQpFvr<={WCUgVM#^|vxU6ayqnYr%u_@5Sp;rChq3;0lKIn&<dC#rdV;ygceWbSFu
z7C^-wORj$YTvO4kDxMrirWQkk&ftdwGY1GZ(G|Vj&(9Tu7Ma+r3h%9_;4C4;DQv)^
zfb@1acV56ULmBG`OR$D}?#Cp1czxmkPX?{xo~&|)g;z?84+t(n&u14kFE+b5blaVo
zb(Ie}EO1zJr9P<Qh$=YMXQgSX9?y=6)5Xwptymh74Jj<^iGMY|899FS&T5mvj})O~
zI(dK2S8ydT|B9v=H{OdX9cQjVtWL_%vUNnFm3?JjR>tqn>h0l=8%q-qMX6apmPRyd
z!!k=Umlo5YJNn%CfoIgDRV3|CW|KWb4N{Qt)Nletr?e{MyjffqC0;7*@$-B@xGG*M
zF;FKFowY|?JE=u{k!!N1Fv*?`IPaW=cl#PPXH>I!uxKal3sNGKBza}!k0cqyxoa|)
zjq`<H*J&HO%QT3VlmbrhNGCen3=BHsxadz^5wN@1z_<`Wx)A-jwP0vUnS<=S_6*xX
zyE2NrfVcC)1eiynO1{HI>xqg|aQbp1PnW9n1grifCI-rw{=R6NZ@)pA2oHUaozGih
zV`p+hio^=E`|S3gU$RrOP>9e!!fnteby{$yuB`KkOf38^{ie=4lVt?y5*Pl!j(dTH
zdm&ns_>`>?D(Fq|F=P>6upo^U+q&=*(S-N@QS*o4*Kf7#d!7mq$zSFo${=S|3|6%j
zx+ot*=Lucjr7ck(x9B<GDz|>N_nBZP{LHebZ-)69$&|x*Nwi7dI%zB)mr5`Y&>f!N
z=ix)Cyn-JT!k%|@bVwij4ItjHP>$}&vU|bvW4E1F1vdH(kD>hAGZkP8Lv#L@^7Q~m
z>lswV41>vIs5aD~#N)cVwW4w!NvaWw<t|b?N$L~ROiodWu-s1HIHaYb<UH*Cbfm_H
z=u&iR=M-{mf9&<FZ^`{syIdUUs=jcUh&W(mhH7xB^f;@@iL;N($Ngkt8WMr{ouFg+
z&zpLb+0WPyH6~71v(SDkK^ZpsEN!;=(=PN=l9$>3Kq+s9jzQVQ`1Y*gn2YiAmusd{
zjG~&EqRz&jHd)HQZu?2UZf69Br4@N><nLrASRNAu9`W|1UxzlNIU2B0I+Z+w_gBf)
zr%7L@@fvEekw{XKEG&)3E@{N!S^fR--qvaMU=rGIDIw-1VKx0#b|ahYNA==O1-Iow
zT9S(T>#zG=+8KzFO_q_Y4tcvN;(eEM(#=*hVEn>^aD#?sxz8TqN)epuT0&anr>gZC
zW?rR2O`|Qk%i*IwwvD&{>I<|WGvBQ{`jNX}KbR`aF?ox1<nCoaRCOklrh}EIv&{|o
zw>u$Ii;f14tX`_21QsNxZ&d%1D-u^c0Nba*#6>cJfsowh7S0J&V9*>NF3K-q@Tj#2
zSJMVTm#>{9A0){-w;jv<qsUfxzk@BM=*nxH4(eh?H&~FCl<c1ebi^+9P|EKzuXUK$
zrHsg2uuILb2lL|HO<L3s_MAty&FY6RS|iKpy<1HT8?5g8g!3re1Vm7UFMGeJxhNz;
zQ*6d8fF=p>MmtCqme)Q#)P549GD#Nw;-V+V*_5TPKt{yrgmyFki(oYs4pV}o?0|B+
z?-m~RY}$Xmoq}Ys5A6EOBk)lYmMSDmFhoCy?qz9^tL0aE-5mBjQ;(O5Eb_{Uc#-Et
z+F<;Z;Xe5@m2~jfHlq<s#OlW9{7X3=3-Pd`U$hf@Nr_rz9)EKg3U-i;2PRW}7!S+9
zr7~b2`_Wj>Vf{g&W0ckwQgv8q-nP_J!#%n{b$zOK7a!g9&{@d=mOYVSPw3%yY8FHG
zb0~r#k+xXBsbxQwMEez$5u%aFkAGuji%0!Wal}e8pP*K9GNdoJVx#$nS>LLaSpfH7
z0t~t9SVi2rjS36k;lFaJOGYf|4Nw5B+4Gt850AQ9jm)4lzbZ+t1Ln+^3mRt}#H~8b
zl2r0D&o92GFEChu&2c-U39p6H^N78$8h*N{GUTO{e5pJtEF;JL-mF-prmNq?&br;9
zw-D4<=M5>Hl>u<H=ZJ1ON(Ko0IePp~bo@CO+{(yk+~EPr$86lV<kiL;W%(&4O9+bt
z+gS1Z7A@OB2<Mw|RZj6$NC{;&4QG&lx-Q!hTsN_@mjsT8bpuqBU(!i1fr$cDNbZMj
z<fc<5?nCoU=IJFXoZl3;Zy*?cx0*dfjO$D}z^V}PZBMH>|4Vxvgh8oFeZt^4*AwcX
z6C+U0f4@2UcHs|xOY1VLP{0<@HAwoGS4@DkNElr`a#wYHf=S<4czi-5Z6Yma@;T)Y
zM^IKxmzY8A;UVg<fr)uthMa(yZEy=z-b>PR{)k2ii+K@1!W7@*yqAl7fK8^cubM{-
zh(j7d$<}2~Gmy&4Cx2|a)%dM%<&ep9n~L(=m^WHwpXhuAfu(qG!?JA3Hxg(@j)cC;
zOBH7z*Qy@*ZV};a*SJi9r|*y7<74C!hMUH*HQ#9z@8FJV@c}#+u=<mr8plN%fj&Q(
zQ}JY*3XOmP$=$kRf${s~H&s9XGWY3VjID_13rnUgMgly3%D*O46J529`nLM0tc|_N
zzD%A9M4r{mB_V!DSoh`nnxSKp3?|A;Gwj+#NjudIN#J{-wtX;3hQJyX)t2koqI#3<
zMys>_y>}sXKTi<S?B&n9=*3G5@~9Y$;G@UuCy<Qvs~g{<<sz=gST;0@;NH=Vr;<Po
z7V)?1fJHrG`&;ucD`AVOe`Q2xqvwjHcvLxshp}zY(kMCO(h@DPing-PI;_mezAaKB
zNpLc1?nUI$60+K^x2qA?tABpu{aF^mhTPHRkTt97V@>fh=U?65%0tb6CklkCD6+>L
zweeIdIzch>$frZ=01Df}Rj#v@O|H%)8gDES6|*sqeL*-tjv~$Outa&#?g}h5F!kcH
zUDmpV!=QR*17(yt_>Q0X6PH^I`2J|_K;f`bY`afcm#c0RWO;!c6S~T4d}?t6aRA<$
zY=KrtI=m!?vom=&-Z!p7Sj=2Rwe<X2MJ7hyP(}NkA4+Eae2LRKPw_zQ2-(IMae?$2
zVkwIUNwJ9+@tEc(zoVjk=&gwq7=Gh(j<{lS#$lp1Gknwqfdcahu|A4}VmvOW1(-uB
zI)c_+^Ip4LK=7;6Gf@vSZU)lS%=nrGVNXwF4MF+i4VV!sz!Db~e))rofulxu(v@qR
z5AzS2H_EvNXFh&`H~L8W{6*Xc+sqJ!>zM}}EXA7SOHaRpZ9$uBrJWMefg(V`F7sf=
zVJkPabY3v!YG>rKFcpUtXmGu5JGX9I!#tDig!%X!{mvEdix?qnt>8zwNW$Rr9F^PV
zPjI$IBm_$?LSZ|+sGY7&e2>U97_)*{^91YZwxm5Tynli-!DZLm1A>xzHFv)pcwbIy
z9C?3zaF>l)aS;yu3c0>uC!PPTTjlMe;j!Hf$sx%XYGg`Od*Vlj0KHgEhVwYJ7^Mu`
z`d6BJU7^sYWjODdYL5tD+~Jjd0A1&&jLVDW&&a<CE5$gxgnEHgl^5zw$LlYRuRg~g
zy^M6I^R+`Z51jjfJG$(z2R=eqVSfbTpsbbT6njSfUspOgT41!qIRx{Fv-Syc@boGC
zMYBkHtPtI7E;jlq6(WNV{LE;6L%DdAN^Je_pILx@l4NCtZ*altCWvspG0ym19?zWy
zB`8bmX@BGoluczC=$|<6n5;nM%-V2ic<V#qUN14cgbWf$t{p$I9MD*^Qo?O`>9Ebs
z;o?ZL1>3_Vkh(K+m>CpvT)r^}0~1JDKgCRw3=hvdSdZ;xvXJ*T+Pd<+0k`JC^sfO)
zJ}Q|X$SlMo_+jtszf0NyZ^OJaW4cfdC*J9qpib`~=`OWI;@;VCj6&R*R_ox3K9!&+
z)*oqiA<S5j>qE(SnlF)LU*N-sxUvnBR%b-5tYd)9GA!JtLPvT9gt~ehAyj`41V5{2
z)j44lKvB+63cD<%CCZZ?n!VMV*r)8`vE>1>6KUVgRf(sHY^t-ckJ+$Vac8PM)m>=?
zxPmim;6CYyue^vI`!soG)wZHd+|{0ro(z_R@jnBPyMm#<?u+}BetTo#N>=iHfUqy9
zyyZFzdXDD5(JAbHK^$rSt~NA%<ZA5m@g-{)>i4T{e?LtqXGlm+AlFFTrYgnGE2JmY
zWu?gF_4y~#eq!=CN%2~^04%+xPiX{%KH*One(WThY|}d{A82CnJwd}hXHP+xROemo
z#hp2O+|+es6!!~ZKjjPxWbXvZpO_RM@rYXyPe5*=irfpLRnf4!ANb)>QoU5?$ZKwL
z--OY%>#cLOd+GvLgL{Ed(2IoAR+AH}um;~%3dG^txo$!{EXhULkEi*<#5bRxkfF~g
z&U5V35j+?A_#UXTYVVvI9_85IVLBfP>+W`Djlz*|kB*(@Eq)2?>c@7BZTIXjHOGFN
zyRU*b^MTAjmNYHu(Ia+~Bo1+}553CNhj^H377w3&_9mSg5~q%&xrrgbZ>fv21Z9==
z2#ZH?64#w%LawCT6?Jz9PwdM1`RyDUoya9bV%|Nl5HOsL!u7I<J76jjw$R`)cxB3%
z0p3tu?Psdo;UwoGxvYjBmFM;^kL1|Yb3a~?O{PCARjdT0ZdG(1fQ~#KdVl;@gv<A`
zORXBLN@ffe#+>`gepU+qN}37Z5KyRSF3@>D^!w>rz#Hu|Gv<l|j9U6?gCO~@YshJZ
ze5f-2s`zdQRE-a}pN27av5aErU0fpkT>8-EUU*y_YgzVQKRRg?n04CqH0Y?iLa~6)
z)P+)6OQz}>;k~`&evN*E_H=5DoRhy=!IVHg^aBWzk@`VZBPL^stN~}Ce~qH3)tRZZ
z%)68sPPy~`sP_wl@kGJVMs+t6WBby%2{vLfE}LW~wB^w$GvyZQ^!J{QBNP0A?}5fN
zSvx3+wmlnhNQRf~<B3h24yRO!`{F1<AZ!u|w$fjZepjXWF<a*7jfcWOC3)@sp8H*M
z)Wbx_K)}b7Gjr<f@JnjAu-3cRr#CAuZ__YPFyrq|&qX^#!x5xqcZo75*~or{U<S9?
z9-}|iOaF*JKa{1sib8mM$#Pa!(gl}WX&Sw=-E{-9YGjY`prscmD{Y)ov!iVYB1qe5
zSIAg)?QM&9#g*=`F;j{vt>o*3y^wiWr9Oe&b;ng9`#@2pC!X=iL@h<59a5FR@z<P?
zB#Z<cPg)=uszBZAZ&Q9Uzwc85E}+D4=)DMAG^)1v>umBi<ycepPneaDR&@oJ;;eId
zaTluCiPajaYr!ua4)IdqiIKnjB|ROwhf4L<?I6egl6_Y8KS*f!S~Yyhj_59a{i-B$
z{o@OxA)irddh(nNy1v2>huY4wSc)1>Ov#x`H40=U1Rih@@9R<Om!4vgq}=&n3WdbS
zncYy8vTW$u>o9K|4=8Up!Hy)j)0^mxlo=Naa@c?7ZSl2LLTH=*wgaj^j@TsZ0Nohn
zNbbJ@r2<Rg=HCNWqdk?Jrjye-sa9=!S8!CXOwcaWbo{hhlWy5)>hPSZu1IfD;?ElC
z>s&zV6WmUen_Gc8H|{VYYAJWy!QE>~4}yo;IHVN1j`G=SYjQ-m3M2WM2rO&l2zejz
zw)>v4e2W1~q!-`yP9nU&r(pgt|MY3@N#>vK`LVNvZT%T!&$nU46PbB1q4>)5Br#V(
zoH^W3?D-_9ob<T`v#;b2%|Io2Dw@O<bTqhq7*!SFx4c+2RE!a$(*(-}Fy-lh`H;9I
z|NabvW(&)5sN_L!f;ZO3<mfz9hhG1Fci3&R52ryEn&R<$3aLYtO|Dv7p6bkvIp<_k
z@%8Mo`EsDH+?gq9ClZH#fGgr)hLHLXGc)<aqVbai?Ii|%9r8bUY(9AVt2Uh%eto6~
zm^#c?NVq^zEEO8}XryCUDG)xFz2Zo&c#^P2swn`Gj_nOr%<VM4nKb~Y#q*n-q3n%k
z-4h>oGM5GkR%^Q7C|*8l%p0)HAvt?+l{`DR0;)r9x$W0SLr5|c#bM>*k2fEYOJ4rz
z8&0L95rNpIj`a}l#7jyk{S+g`IvUTW-M;g64!0?I1WL`vAh*7t*&Uj~UqepeW=Y4P
z4?gaR(VYP?G{bT5=Uo7`#}A&}{g!}fA2QdHM!a=<?2S3AJ!9e4erQ+YjJAgqP8_LK
zM~t$b=)53koIC5MRBGuiZK-Nw7$~Y-rXWbF(B%fFQrMpk6$4vJUA9x1%l&7nm`r_?
zdbpH~uoff?(Wk>Jg8=~M+t~W~t-*`P=dJ*vNnp(16#u4^m^o&6MDuoNOeLI@AG#dD
zB;y56Iluq9Xo_V)h<BYJ9}D=^xHST|hQ&UPKS1up6T8utOxb8G1n6?wpNvp=lyhux
zbTx82J~4mnb3B>ZjKAkwd8$bSbK=q?o9CYUQ^UDPT_U~3&lqVK_5JxJ!A`07n?t_c
z*>Hhd#4$RuTEbd~!zd$&iYdg=AUVNn;TaT9U}J@QadG>Agyjc~qiIA?MT+uQ^9S{6
z*<8VIGUb}305!6Ty9U+=#p3$6W<h2i;oG)?{OZer3!a^qkxI&}D-?3rD}z@A7OJ`O
z{Sa)0!-F*osHfAly!zHka&okI$^HF8wwnXOpY#U@_n47Z$K1<~X0ZqTMf#z$4(f$~
z8G`;JjOGc#g|yw5+HSG;$8G4<lIYn@85I@sv~Or8R2WB<$Jp`5rSxvpU3|*`2=bt)
z;5|O8?U>WdZlviv2eNPy6H$-j?vMA^o?-82^papszijlnZnzggfyjDrc3!gV(cshp
zg}wP-<iX2(Te&O(oLSKj9iO2oEq+3uoQ!MkyJmB?e>4yt*7ZCoHTVLxpY{%oLg(;T
zv0CNEH|hsoH*xNJPzR@cqZFO}FubcU|DKXDXDRmx>M<AJ7Bly=0c-=rfuNFQ*l<B;
zSGOEAvq@M6e`9zgz*~u<*($>#CUvBoiXs0{YuJN0sGcd!Df*jy9VT{u{3@mRe$@sf
zg=7Rvjr~}mwpG$juGzif;3XCnPLU149=A<^hdmCl#C)_XfvaQUl0u&Fh`v#?P}E{L
zy%I3eUg~1Vb^9jzy4ES%8L~p#D=d1Oz^E(JX__-AWpo<}>-*eQ+bnHyBMHI=0(e3A
zTs;A)3+AAh+%zq~9({49l7r4$v$CvWb7@L_kz8|em_d3AqnPyl=E5$;LF?X0vueLa
z@xTZ+a}1D=N<I9gg7PDw@23QAcD%2WeOJ-eQc;V(z1=>`z}<yd+*{gU$er_>HY$N!
zED@qq`!l~wZBwuWace<ia8Z{$f&K1d%g3G=$Q)_a3C^bHP-mi1;41Z*XB_A5C~Cz&
z-O<>zM|p8uv}Kwo%_)D{K%An-O%qh|(9KBiv}-BEICe#MCdiiSwHEVv=$@?=(7{(h
z)HPBA1E_^jU_cw(Q`hG6s2{_L*zG(LZOK`iXyIv$5pYdIxg2kd0k{@1G|;$K{mZWo
z0lJ2^rGn7dSg8Hvhl(fpqB`Y>YRwHoHy{q-eEk%zjpfj8>fPRc<8x5?5??N;2_<^s
zy|ebK6UOH^Frz~ShCY$Re9B!*QL?x|#ImOd^1@<v0l>Gu;MVeX(RWps;H@1eo(<Ic
zfeq+no*$6#ZN`)BJWea^Jg(E9&v!s#ZKyY+n*0Tn$TOCoG8R(KsVD)1KRXW*#N42z
zm?LKTeUW4qXpyuV>vps1${c^L(NiqoxBx2)Pay`W1Es}7BUJ&4x=`n;Nh%P61+<pZ
zry3R`b+7c;V~Ib7@v+Lofd+|rBL;ya(u}fUxO)z3ndF+m=|66{y}rw$XCZVI(;jH6
z+p>9{q}TdWzhy91Pwm0Ln?X_l1`BVV#`*K4yN?X|p+_O|9j=d{!!rOfstcpEUl^L!
zmDLP2Cr=OSTk#v~(lo8qHE3njG?vTi%F?t-8vhko@|5ejE1=MomX^|LszE`PH7qyl
zeAk4{%m2<^(HO@di8rsNsGXFsD%4D8Wi?8xR&%;~DviXiUDw`T;m`4c&$-w%VQwSt
zy0CTcy|6B_L3qf|=LD1XE^o&MJ7?1}ac;36JAhBbK)-DZfycYVEgY6mnrBdh6Y+j<
zXJ_KhfHZ+@vj52Lk%p&DAu`jP!GP80lB@YoRBz0hjo-@F|K?UJWo$?PtS5Sj^9hPU
zwqF}v@t*rGkm4zWYCD*Y?0T`u4YE@r_$T{rlbatBqjfVPjKTMwuv1nUag<x094Riv
zqcYdk)cTf{8R)b)WKJgGBIY-=__Am$Vc4^cEVX%}bDVEX2}og~-NLdY(J6*v-^=1e
zvB^N(-a}e=>k*+bDu@cO0y1KKkX}_$_XhGh#9R`@$jeAcRS!7y&bjC*Y>ZJT5`~#O
z@sQOMquJ@v2Zk~9MMfs2yK!N$LP6je#r6XVk(5H3mMmcPQ}4+!WCWg+51r4SKZr@E
z6=!h{)~$q$e@O()UflHP(T8QEsAx9T<JK{T%TihLQc7N%_O%Cp$dD?9Z4EGcB^L+D
z;v?mF#4K{MV?go8N(@Ok_{j$>t({|K+bwS7^I{4&b3XG!Fl|0%GDe)PZQuCEZcI-j
zo5YdqsF-HC3F1+QQix(E@bGtJcrg&HpP2Z2=l9r{SIV~m8S44kL0cF1t$5MCL-@K6
zw{tMq*B;7A-%}}WM&6V)N50c@<NeqLjUY~TmZuc9ZqiiA3^)fhtA(9GB_H3*_x)SE
za00jF`n1eByV<OH=fuZfN=APzSDEg!`=__kdQ$yJT=re^8|}=GfAUQ{OgSRc6N`}y
z5StSkkvW09r@wvuhzIFs6x|>8pVk8P8Q}mM&5|qMhS#kh*<pt@>StH6kyzq#=CJM2
z`7(;8W~Q5Qicmc|7ru|+31?}CFy(|Mb_?b0#cKNo5?Y)i_^G(9!?0uQT8Eo`dy9YO
zPyP!>d?h!Z{U;+j_ghL;yIzELI%g{8>6O)Qjxu)}XJM)Px;j>2vo|r}V-{HU{QwMa
z&#dT1vzqd!(pT6e5vO`632VMRCFtBiP_YQ?Y=EQhU*zBTZ{Iq3s(ET+)TyVw>6J52
z@6zd9{J{;gFT}9b(-AM4P+HlxvQK<6NHbPzv>&-^PdXWwb%5<T4Jp_Za-5!(Vg}0p
zFj<T~4D9C<T<2UXzLmgzTwZu)pd%MK9N<56@+U);_fK~y*a@^Ccc}a;X3$!on3C3S
zM1cQ6UXmc{Sf!~duO4ID{@s_B9!53LcwAFpJ_A}OZK<xAIW|o!AtE|Q2kG8Fk&vJp
z(%XUbL?&@B7(z0f#6EJuma#eeIpJ5_Vdb^EPVaBUb8V*+s6!Lv<KH##bEG#<!Ge0y
zj);e9%K#Jdfq}zW)%$~*)9L%-!sX0$;^`$kwsGDkqd+po0qYb1ra1o^zxiZ_*CMSE
z>-e}n^`Ub3m8}!Um~W&^Q6H<t%Z4J*`+mZab3!{hx%UdsPDN$_QA1jYFRTGkJtD_B
zYXJe9YmdJuw5ZfYW2495MDyy;7m^W7s9E9dulFsfR>p|X(6=F2Y~uALmOIddyz=!M
z^!{^kx!dI;!x(YBLg=V`c)}Loh04C6NQ7Np1)aX#m&PD{0FL@zHQ}iL2ree%{@=eN
z1Yy^tcN3FI?(0?=g1GCde%Lm@{RldjO=;U{M8zo!3;5J%L@yjuq)%Dr=i{zB$5@^_
zpegK2#vHI(vDcL-;m*D&b}`~sH@$uN_pFw6iEEFiv&#YjpxkCH*jR%6;DeaLvLAYM
z^O*mV+@)->?EuxTKm8oV$Key@tH9h{4aUzIDKRQ+{n*Atd2)i*8EiOik1P)V@4}5D
zyV%C!cpmQ<Yjbk$9+o!<hW|oN^oRv2vmDX=C6xW%dr@&K!hPUe#HV$oiEwg$pZNCA
z+|lQnZf+V=wEN$mHF5nSf#%0w6eYF3fskXrfoELX+leCH&uV-LUYCL)ZO`>TX|@MH
z?h7Fpfp`E}lx!2<v5)%=_9rg*W~Hqg==J3;GlVBo*f$tRM&s_`kAJe=rpVt`x<2Sp
zG9{phR=O7Pj)y<)HSl=>tkbdYXD96U?{K2t;T8PvOkn9<)kw|fUAcJtMDhWjRS>{a
zuPni=zrcxZ;8T>*L)SCK0}6xlnDxJfW;hKShSEB6P7w>Me|44=m)VsSMvaMM($>~N
zHl{Q>`;@Aq6VxfFeZF$_UTv9n41K(#HQJO%XqJ)?$>VYUvLzS1op*BQVeB+2sXrwS
zoS19LO&E0oJPj<1=(Q2GhmiiZ5N#DJdP#){O)vBY9hQ%*4cmQe$?Ey0CSd$jkw=?3
zIWmW+T9SBu{^v_@Vg3N?!?8M53TfMr%|)=?v@tZth5}-O@ydd@tiApd3-J-=n~c~z
zleG-3vl@?g!0+7!_UJ3wfA}-#_1^#z1<@lWls^UUZ*)*Z765%|(W7Ko-dUeZY2__J
zQEyLiZ#Xav?NhWn^5OQ;0;GTkDtsTemILSa<wylu0IF@|CV)Oxte@|u1Lx^JK`ujo
zMYv!G_W-Uk0^fQ(^aBWFq0&V7pMAx1*Jeh^7E*DUSd1a}79i@zcB~xP`Ly2{#;>GW
z9G<>^bzZ$aoJ92H=|!T;`K;F@-h};?HSmV!xdf%F>Tqa&IN^Ckd3E^i_eB^|tXuT!
z1W5>d$P@g}8YFlrA;nQPa#AsoxCt3wEg_vsU2*h^){4jkap?tBbu8ynU0Roofz>kj
z@&&SaL}weD8QRs>!uI#hRJ%*>t7qFq0rTjibDnjlxUJ0R!}RCQ7PhTSyUlSODvYkg
z<*G>5B!5F@i&Jxi!C>&)og_28pXx@kwCeM>*ZsCq6@l&5O_ef&n(YsjQ`_#f#^PU|
z5bEAciM){9e<W$05kW@XA|(|b-L&2BzW2T)P%cnepJ?4^&0p-9wuyWt!P}QXSOy>R
z+mE835EYO*FdAr2-rcs{d%mtv^6p=rAhf?kgJVqH5JAuer0jp=bi2{q@C*dD@xd7L
zFITYZ58m5w)@vwlzMrUWi1vqQz9g_`p2W~T-MzuSB%n24@P^knG7uCjF^o^Qax^#_
zHWvw;Pd}9}3I5W8ryq&^<Xkt}G6)$2b$A8k(_J_P3D`!10ITdId4l)^8E*$qK9Nj8
z01Qsux8eA&F}LB)pVhbFyddnEa^4|9P^bd%Z@!(legKj;EpQz(0bn8NK-4#1Y$hmx
z!44hRT-<H+?)~y^H17AWpx|$wI5mji8VeXKS#_g*<3R=^C4EWk$-tmbXnq)tM_v&4
zZ#Thz9;zV*kqYq}(LPgxpnTx$nTkA-o*-O1z;z{yf&@Y=TYwc(eK~`;Hu$vd9vSi_
zVMb`i1zqcds{qmm*Oa)sm)W@Es_0ie1PO#S<T-zqbSBOv*VEJezliF{8&a3))!zH2
z3sCh%ri!%pm7Ny1M|cW4W9yF!{}r!J`p!qBlQZrxI}g8F$)D+vz^C`3bPd}VjHfmO
z#f*(TVSP9toNwnQ8Ib?n>~x!(@3JR)OO@v_gWGdGAZknr4dR^;jh9j7H3!XuW!MxY
zwEm3$SlM?I=8l%>-LI427_;LJ?u8{M`ab6oQlNS{odiuNL+yMbn&5NISMRJ4kd?C}
z`7tnEC3r^mNh1VsE#dma$YFwgQ311mxC-f?7XqdM_@}ypKK<syHG*Ja<A&bHXm>V3
zUHvk^npa6*!F3s+S2XgOC@?3Qh}hiWKfM^oT{p8N$MrJ1FKZhucaJC6dmMT%uXo%x
zuuSjX9{eq{`JdP_0#(*`v-*}mzLUbPd7nG}o!qYZy&T8t52pKOPyyV#<gaM7t_xwH
z_N?1<{@dKIXe1yQsUHmmxUK{G2r#K03+wML?KKATAD_K>WT;({>N8Eh%7xJJr6~h*
z_ELGP-9cR_fB4;sSHSGtA1Q?DTUQgM0BG9|^2tesDd3`<pd>H{;9lC~t>?cNTk6ob
z5YUMVY0MaYX4iQ%O9Hv|<pr7PofJekZ7hiPPHJDmJGptg4T#(;I@!Fz?Q1^j#B(q;
zcN7HXFjZV%cFFjA=pCSg9pGc6M)D3Q89E?n|IcI9?zde;^B-H~dM|MQNSYUAfHJQE
z@!*D{zrghvY3iT#*?!l1(O$Kqy*~XE0<bK#dB4FQtEvxbh9`dreKJn3wXXuE18(=u
zd6?b}*WJG!&w=H7=rP>s|JO{;IiK_8pHSX^ya~C!?=O-Za|m_K$N&@2j<wrg0Mod-
z2k%~>kkKVS!3`PUH!d_iEOV#->~hRe0D)VnBfNd9>&(~3G1m3<6z1i4z`><7`5n$<
zZoy%3$Mn<B=P8Z5ZA=E3!^JeR0c;<f3+m@Xdh@@#iro{Y=SGbvH_qRFStYu0`F!IW
zFxz^we@p6?J0U>_cxHA?>X}#9FRh$?t|aD-(OENI`cc8#^hW171i@et|5FK`XureB
z?ejsb9-cG2&5DvH;zR-~kl_*A`OyoZdGidfzfmTUJl;Omq||fxZl|{v0FS$J9r45+
zG|}Eh!^A}AMVWZ2c=PK8D@d-xOKfLTF~d7i{UphqXD@i5fzUKi8Q}<nkpg_2qfm3I
zJ>QOT;46YYCBKAQ90=L4IeXAB@c#z_NL0{};7+t>-1*h((u)$9j?jcDDAAk^GVL`E
zq(Ybi2TYWD^?shvj(YfnW2D)I!kNugQ&|7d5pE8v3APv5X{uv>T@|}-!rT;HmU;TJ
z(-){<a&Rk(NJ9RWG2N+Vj9|#I<8Q_GtghG`UURfM?uhKou)~F5Hz|GOjQ6pI(B!cX
zWZH2-dtvMy5;#`Cyd+y!cv9|Ng5SE?1_p!P>0eN_)CXEwS>(ImE{_+Vn0NGg%@Kei
zZZRN^UVW27V~)9E1xA|6)HVZIDOF9=mF74HY;0V06*WDTUs_E$mp5@Ml@|h5LvPsy
zXXloevtuZO8K%5rcZsat&Z%csoPD*Und1@M<8Cz4f7r5E_o6B8h*|cM&(9ze^#jx2
zK##obn|Jq#J__RnLt8^2kJ72TLNZy8HvyRHqu|HHkAHZS0+S?r&cs)A?{E0=#76<w
zTE_Q@o$VV1TxXk?$84WyTxoY^nf+gZ`X^{xR<nC=6naDA`X|L3NBaQ%;;pZvnLc6C
zpY<D*@8ljI5?&;c`TYIHN33j%1Dt_f>*Q6<tg+{|mNp7^+a3(<S}7J-e7U+r@S?I#
zu6enYY*K5+%ON3Ab7wQ-u+xT0MCttkf(YiyoZ1rCW1g$4vw7F6!?K=QP@%*P7Uo<<
zM&=n`tJ010bH@*KCmMi~LmVmGvcPJDlzZy&zx)eneApYIeALV1)P=wBww)8wIbXne
z;Af&!O<gv0J%Bgc9}$hHy46k8mw!VJ|83}^L@F{|Min4+TPbSe-G{YI@cwSRtpPOe
z1g^&W9DTveO`h@wFDVW=L#e_i8j>7)*5f{0!LgW^pH}=x`saRk`@`(P;*xlrl}p{{
zo$$~$A_AG^pxUhM&ih4vh%QX?<!{eXaBT#N3R9wxc%_;rRor<;gHT>&en${WU{Rwr
zb}Svsw5;FCSKl`1OClpH>MuXa<O&}ZHKa<6vRGM~nSzP(zN&?<3XDrMqkB*}APkh8
z_E-_JNgTlbeti;O{CgRdg`j#a-bw_Biu`)2R!MTDrW2pa!tmJV<c^RQw0SKdklXRE
zG#|-CF_P4&7+LR#<}zO9TRLo{r@?qZsjLe5Z-ul?1V?l>oD&}E-IbKvpWiJg+KRKs
zG*U;n!|~?+K<>AB_^N4fxc;-h<qUIBbG#%lxWUiPOtxxV?TLZQVZ1vee1Kf9viKd;
z8SXjcFa+)lPD9o<{_-!i`V8<;n0ZS96G$cuUtcQ;a?@`6zmI8QJunH+HIHL3{vC^0
zr~b{*TyaAa(%<*P-XVEzD!zhR!SpA=B6FHIlfJmK%Rdnnb7e=EvYN8obRu{f5mz`D
z(4x!z8T#7(vbap@uiH&_x6s<!<3c3<ia|s~9qwKRzw8oB;aX%NSeOsqZYW49_`of=
zAhr`+784LbXDn?fZ$ThhCpP%Fv|W?oclDi5XXy+}3e?L0)z`K--N=JXlt{XsM)I+8
zTpboq33nOov${c~p+wb0E>6AW&6N$c&6IN*yYo|9hVoZd*%4w`vgkMqXNtX1TB-!O
zGWv4rwvg+{wR{bqO4`pa(&E+bZ2Uq+C;C1yed)Xy(;TgxN{b=Cdd~9NVLWC>533ww
zVA2ajj@4vKY<xDxA9=G9_SUK`Wgi--w6#~S?ZdE9bQO<STk`VD+xkNXEpaSHRQ|!4
z!YccwsAzS~D@f#3{!&WHM#Kz-*7WOc+{(FB>+a2dqA<m*kr1K|QHp6O>?mfbZ6(c3
zv)<Hrs~k;(`UEjuDXVHeKiy9TN+lfI{x%eUrk|fpJ=W;tUs<2HS($rkm#mw<Vd@`I
zglpGJZgQ^hBc6r?PM`?&iSi8mq?=z@kNe44VLKTS%_@xlbhQ10s<PlESxHo}mQj4k
zFEKcLB;vd;_1NzzG)B1w0}h#%+lTOPc^2mmg6g-dkSRJKM-5-~RtYvr=|IkS7&byB
zX%*bRmy?)=t$utaok|fLBn`~#$<m3%neRV)=z3Ea*d}9c+U@T}*b5_0(xnIQC%7p=
zu*ihBK^6*mCNLa=@o+@>qz6HUt63rL<19>u<L0Q#=>MXKZbiZ@XH&9Qh{wq{+sN>M
zDUIx~ph0VP_&R^j49AhbJp>A)+I<U7#T4^`&_%Ymk$sNWMZ{x)1o6QNK|jN%<{G+H
zuMqBi5*+Cgw66Byn$#C*1Wqw0Q)xtCCPruku~|)jV)wGXRTwm~o0f3Z(Cl{F-&;4q
zH>(C$+dG#>8K9qX%AK#=OPs^i)u~)L(71}E;n^m!VH{%NR*AgO<K|w@)n^*kb#!Md
zHN9*w9poz-JKG*CQs9g$Cu%1vN_N8&6{GnpEodiq=Nm8%S^9c95k}|-&F3<QF$sCy
z{Z@sCsnzHilog&Y5gYZ6DOgO*=gf^NRLYyDysKpw*(e5_YUQsJ$Y)s^NGj|IHG5>e
zVr{#4Tr>>Kdwg&BNuouj@||JLeKMaKjoTR8JmdR8_|EAnk9?#3*=6PqZH3FULFYfG
z+1tuY<D2)W_-eM*m>b3BR!TB$*SW|^HM@?Pm04KA>|H6VR1T*S(?bBka0!M$R^m-b
z2a?=63oA2y7i-EjH*Y2p1;x!q43D@3F&FHJz^QoF_j;_^_q(MhgPGCRp<L!?Vly;K
zwN%FEj{#a0=pv-@?*_Rs&jbM$?+MV6qi78|UHSet{M$+&f*Tm4Xk4(76zy1-Hwxe*
z@~HkK3`dK$3VZoKz$6ZHhlyvgrk4E>Tp(2xt!y*DsgFFNeLG3W?`6<9)Yru8NK_pL
zC|eW*if8}8eITVFRxmjGhw<OG_)v+fR7DsI8kx|Tyz0l;+~veqIv7b8Mh|5ik-BG9
zC8s!{qQpZ~oh^l>it24wPX6nj(O9eF4683`_@V122q^KVP<--g4tLovTp~-SMsgds
zzK=7MS0F#UjEh4LOJJ$gO)w^=4z(qCcZ)2G$V<7re1C>1tHl}`M#a8?Q@Ky*AD7~g
zMI~>M{+A;D2AgOOQx6(+Ka&tTPgb(l0c9;}Uk_vRXHqZ*gHi-L#c}w0&*F9V71ize
z=fzR;Ysy5d1su5tmJ6g2N--i04;`VG(CQSCc*63rjP}L8o@#$LZFocy>4_(tFh<GL
zuq$j&*k^&;8)k`up2Xw1@gft2#vK}|edD<d5lVC>#g?6F$ap+VH}^nY7q=2Uajbzm
z;3ifgvTiTM$aKX%un~_k1pWBMg#Qd3fJv=^-lOVxV-&R#7JF-C@!)_VX?0G}5;UZ9
z#+jPWa39%Dz;Q*MT1-vqP1fYVaHrBqAGkx5q3L$SzdI%ro>?+LEE!%g5?j)qv05_e
zLCJxG<Al0nSSN814NKPA&576It}@I^?8J#M>S5WZ!h)0`Z6`;mm8Sl$v5B=M##myx
z>i*&6?*hv9=~#J*d^in0q3L-;ICZO-R2utCIE{yjHBbB!sIajSnpJ6IW@fM%UV~2P
z{5ala(OyDfTFjIz+9G_ef24ch7=%WdF88{~vEG~A5IJJ)-*$WtgEygU&ZD3=RQj*F
zU~y6w&2q2Hu+{w+nq_ea^?eR1BCa9#hzV7Y@Co_sXIgJzD@Ka!O^vNGi>WYaQTq9u
zSTW{9%(_G6Abyl%cx=5;q<0DI<vnve;-&2X(T$nqKJe_$vwlZqQ(AYb7iU&Xuckfn
zY{prI>vLj$GyKnOSG+%_^X-<%1W-wKfuLrq5V{X%;zcz;Hfeq0b<ZUI4PwatlaYR3
zSWsqztHSVxR1o)47|nVW!pa!pLtPK-TdIr2a-JIMWIlIjZ-nPBTVlMa!Y_;;h<I$y
zU(w>xoGQMVPQ)?D3idt*{_8W|p)Z{4_+|ZNQt{^8LWX#!xwOx%77qRzzq!Jk2U!L*
z^4$1gBmldXwnlpB@kg|&v(;z1PJS@|-VnK?J`3fT9meE^(kpMa*Cg^)9n|aoDy)Ub
zbwXZYv$Z&6XCSU1Z0k{{ba!-B#Ig&E+YW*Xg-Wi5T>7V98|Se0w<iFfH<h~I2CK7@
zkF(|n+vlm)mrg|iiue&y5uG3h)J2}^t%Bw)J-}C;J9^B1+#H7PRmE2QgVQL?R{kYf
zrY`wJ^U-JD8vpbgtp&2p)xp0YLbl#^<yrzxwzHZ^!z<=<`cgCM50niXLFgZ7b7)A1
zaG{?U$S|on*eDlrZoiI_Q%a75P2s#(lrcLqz@Lgz1pUMLKqhZXf67QN3tJv+%FI%P
zmJ3}D4)oqegF~*6!}I{}?xT~t*SgG)FPAF48U9p>b4-zQSJh(0dc5*oU8mWy8a(eG
zq0O8ty%s7<Cum}~4Ra(Rl`4Zl2f1@3$Hk(oB+<60Se+$Xqd9$)^xh@IJ2c_iHcQHl
zk;T>o3M-SK1n>{k4QfY^QKRw;kI}~KgbUgKp<f^?thfjqOryt+DdRk@R25sdWz~TZ
zE`R^Ka{OJvlw3^jvI?e~yN$Y}{m)n98BOrRi=+GKuF~3lg3TVy+W)86CF%cA^r?Ny
zIIq@VL+1ER`6iSb+4xPtETRbi(c%81g9&^a|3{x1Cd{gHn{g2^<s187o<8M9T?HQY
z{!eiQO~N`|u+29K7wifv@{bAsYr3gbD*$X_)ZrdhBez6HoaXDB9#{s~l$+UZ^T|L}
zF}=K^{-pA|%G>I%&ZxxXLz?i%2byT=vX?fu&mo@+vloU6$S#c8I!GRV(`!GRJ69EZ
zLE8qbE(I=<UD5sd!#$Ys-@4z6i|GX?l~F{zSXIy%ZdmzE9Y#&c!k$!ug-`@Kak5W-
z{R{$BB7MW|wKpe1H37S3N@ewgpp<>VZ$LllSV=bPhGHOfr8NaIk!~*-%ERlt=>zt%
z_tsw@ln@jx2_7V#F?5;Gmy~h-XN=&^s)KiAuxH<O8|}@T4g0h6?4_sZsp@kkNB<Vx
zBdef^1q!)N5Ko!I3~kP;3sEeH_<fQ07QJd7ecU(`-6cXMzp!EGR<DlI9Zt528Y&{F
zCY~ThsGy1bzrzFnZwmz;U#`h=T1X?Nz`z|nx~5|Kzn~D}qi`chH-Sl9QQaPg45q(I
z8mgp?#4H&`*1rk^RYE10oml+O;518u2nRgxzmo*g`nG{e+L^$eianZIK{MgTE{=at
z8ON9q0jw*-&V;&~q^MtGa|_##_DqB}CCh?`B+)$UL_T4!vK?HBh&`CHfJiRYyxXe0
z_DaY%)|BRu`EZ>1@A5eY;yu*;;m_J-3GV1if?-mGl`I~k=tHfcBX4-60tX>`G~qPT
zKkw)orj?)VZikgow=-`;n4lOYgF`?CUQ7y_I`TFu<^u8fNa0`yl_)*u_rQ}(Oo49;
zUSbD+QzZqP6k0Ps<r8rCz`4n12gF{|g#Y*GCp*ci(-51Xi2b9WNndoV6nh}6)3-IY
zl+<m`Z`yT_e(EDd4fYkjyO?$cr9A9qmwVg8_wq2dt*unPCeO>Wbtf}l2v{>;!*5~+
zDtY-zRzivz#FK~)Ls}Ea%w*r0zoYdSghrPHSovQ6OVgl1`pOcnnzmZGT}P{yXGQCf
zUxi*?XSf$o5^^nUgudJRu(Y*wbl2h}bplAG<t}bh0KT6IZk;&2xB@pW)?(|HjNj9>
z+z`CjtlGx&TQmHV>9~G9R6#vF9GqYD3JTMWcSCQ)XXofeQQu%Tg!TRF#G~;n=c%BJ
zBs0D8K;=><gSTT)VSVi-GqodnRH&R0ZO(FNQsUtBP0o*XEKExqkOZi^6r6h}XM$V#
z^5W;pT5ojLO=fJJE7x1b`$qGp+QXxk<EIUwHO1rB<mJ}R?a!6ACGZil2hu^4TBTS~
z`$_|lwIwh&z9ARPrYIy1kB>RyU+K*=Dn3uA_c;>pUy{RiO`nI$+)E6(&xZ#NU2bb7
z_{2Flp^(`sb5QOdtiLKY1)waf$10Rq?%I9%2Ddm$D(GTi<wuObnd*vi_+i27`i2pV
zQ`2IP0QI&O-lC3}Vy#z}^1AQ4UNl*0P%nG1=P>E9DaUDLMH-u^*UgM=9fj?cQKiq|
zCvWj;m+qv?TI@a4OJC!jwYTRo-lo<qd&}6{u0VO4rcO@(C4C1;aJT<eXe<7?h^#k4
z5)rCF<`4#E$#)`<j%HiG{*oSQ!&e<(#FmUMn3{}m=R5HSo5X=8%^=s;_7-tE6)sl<
zQQA~_8i{iVaC`mbaNgDX06qP-hr0N>1I2*pJt3Os0>)3Ot>@(+M_)WO4}x?Zfpf^7
zxWLt5Qg5TdHANDYPE(@H6iap*jdQ8cYh6`pvu5KDvIebXYmUk>3|hd%^;Z%=B{`hW
zuyK0;X_9{rW6hE#D};HoasGKjbojCWK48Uuu*Dze{WG9Kp`mrKT81ydRs)2+fL_Yg
zRg6@=3<(r<hQ`JzC#yr-dBs~m7sWh<f1A-mWyp^z$zVp2)5-np1FBNVhMVdUmPGw*
zI}K@AW|D=wEFrVj;c*tzf;L@1=M+?vG&$U@v(%R)Qko{3Q4H5b<d{X_vkWb(w-;T}
zU{K;P?Lr~gEhxjj%3fk|l|Q0bw!K*^>S11&FAq8juDXfg(XIZR8o0il5TtF*Tu{sW
zew1eQa`%}UfN&r_Nz3HeBPQt{i$s$!dNq4+V2LU*8Xl~BAbwEC=IoeDv5WHKAc$+R
zX%uOTmdUHU9aMSK8=Zg~Bmk9wyD0|vrT0{I)9adu+X*frIta>E(R)&J-=P8{=<;g8
zsV@KY*nF@HbS~{#?z=<7R;93Xz2CUsnj7q3r3pIssL*c&+COHVXuSE$Zw5%hDJT4H
z+)a)8g7aq<axTPB*u0^+A?92EMo^KS!@zubV4bj1kU4jMabN_>YzTm$aB`;4Czfp5
z>135Zc-p6@DCNuxc_Nk9W{|M8?pu&}_d;%9jHvn~=7Jk9aM0AceGt|GH%0hc%Ic=P
zQ~(dV`67if?s%(L);A-3)5|~7RfUvke6FYMCq^X<1|B-khN0Xn>q1LSmGW3ogRY}r
zw`uHB0XDGpq;5DDPSD?WC!+ZIkBFjasW+`X6MAng+8>oDU@MdhSbR;T`B~PGop)Un
zcx^wd9Y()JbM>?gV1ffwOnIaLhveR4QZGsIcbFLkBu;TbBt;5+QZFo2C))C}3XX9;
z{{L4pHGA+s`1h3fJ7aL6eVos@6pq>G&Hpg|2PFDGs$Ts6V*is3mPN%awY5Y34o!8E
z4CVqOjpgf@)e;ZL|C0sga^;o+^!$g^Y?k_esbF)i{#QL13;Ij@zk<N*onZ42<Y%LK
zMntSsdH%bQ>I(c{DPZ%!QldPH%ggwwQuQslM?%#)Zao%Gct+k_L94D8ND~KSRIQ^#
zS0D{>{VxcDH}|yon-{EJ``W5vhlSW`fXe@er*8nyE7;m??4+@6x3O*8Y;4<ZY^$-8
zCXH=0wynl$^hI~~od2HZ+1Xfn-&t$sOR_VW`D|paR?0vuO%m_i_l%85e9|r5r6^%`
zp8fYTo;QgLo&d|Z?*Wumr5GHsRcsd7JKNWajhpTtDy{G%#G@;8G4mFgMVVl}6zLC#
zC~a&<<btxQHeZRge|(GT_#V&qn?X$HO(-9Vf7RL7bao>eBc_-G4qcC;sRmx3DXAAr
zG(EaF$+}_(@vmuQi%ivY{f?gfCv03EJ&NtvFMZ<9FJWoX6Nx+|jd};l-wH&>#k<Wt
z^V+}cky)S-<F)Hi>{?T0G$~owHp7Nd&n^>Jx~H8XgmksdJ0}_sinBcJ`Ll78g-^;+
zY(MSUW#~=!=~-GyRf>N##Y?(0MB23p?SdwTcb7Q(WhB$hfP|}qcS~k384-T4Cz>8^
zia4|=CfW^)8u~rFIQZFJA)MCBhc_UGEA9YhCSj;VOmuBYOP-BlRb!|uGQ9W{Gui<S
zNxCm>59NC}-!~a66m&KU_g*Q{a3-YZa56j=SF2H41_zQE?19Km%<~;>R}bZ%E<tgy
z=9usBNVrs%Qn%=+RF<RYUZ>Q-@d$@jqcbI>V}5PM-I(m&!Qrp1LQMK1qAJ--VU%nX
z;+&!XIfsXbzZMD+29mF!^3Y{m<fnxrG9rz=>HL{VINw<4m;F?ur1(sUH;Jt%DjJ>^
zPT_!UEGGIcoDh%^QI;MuG?SEoQH#U~CvB{6Mny`yK_EdvQQyI6EGMEevLG|P?H#@$
z!M^edHnWi@pbkBFB)ZImWUS|<u=ckx3G<#L!<Z0}sb)YHj+FDg&h6{J@0ZitQ+eB{
z!-y2%tca%fb@!RK?xIMytXunmyLbzyo<2LAN4~qzyZm4d`-8KMB6SK?r)2N({GcD1
zv!NxS8YAmq1%wFRyt_2Z(Gl)jOaqZdk`ms8SCZGBiH1-%>97vWKlF>1PhfM(&QfqT
zBH*kom!7Wr#oIkJ21b90b7YZ}g{xwGpSbzNS%I!!$rEq?!}>`)bYN>}JPgm3?}rD0
zgE8HL5$3mv6b2;Lh0pEu7uviv<DF(|`VnKTBpsGbMBB?*d>~O&)_tv|ok{pGIIUth
zl;z2X?pjOprnti!DrsrgzNXU{h6X%q5%!O@Z=dqHlMFA<eA4xq7sM*H)!k_o5;9yh
zr7qYGH2lDuo&qlX^4c_4RII`%k%pwLlvuP;?Sqv+qpOTOtpM+ojcu?HR{jdK3(`3{
z>X>*;e>-p3xJiE`?CtwC`JFxkFFYE_lQSu(>I~}yMxcSCQBpT#kU_c?DZ!~8I_<7r
zxW_oc_>p91X_CrAGS?VnkCK5%5TtrqtN&eJbx7*xvm>qAt8UP<GMxHXaQY~j@<cZT
z&FCo5whpD5pK2_$s(Cn-EriW*ZD$Y4<SwO{U+>aO%wW06N><*P!;CQ7;eCsVlBN~?
zqK5+E(dVDJOqZ1%!X{xCenjd*f--p8Z<EmCe54pe5W1E6+kz$A(gTge15BMm?JMCP
zi8b5e?(34fYXv`F%tH!fX>wF_2wg1h9bDjM2~<7ts;xTIg*q7bD#TC~DWjfl)|<ai
z%S36h(=Y|MgK>RYYTk6;LDe0;QyZ3}+xMa?nry_HV!^rwUz>-D^*3cR)1RRFgLe@O
ztm(F@H(#5N#uq*FC;hhBwDmm~htLi68d*!pojC)Ap|u4a>Ds*zY|GE$GiUE{ND`9c
zm=RsSHLM_v&5Au*2trmixPUCiwRT~kiRB1JAi7U3Ps}qCwvZt8A3>XCdo;)B=I!qf
z_vq+_(ZoGo)wIIBe#3`?U2TypK0k9P!}jG0Faw9_K@O{Yp!<5?@fFMDXVUVZZk2>J
z4dS*|m2VSiq@nbn$ODE?2xFJa*RlvTP)}3n4Olz6g2*FqjvQU!FCB18+=0z*^K5kk
zVQ?!;Mf-ZD*mD*w5*+?PQ}eS%rP8%JV#}~$E<bYD*-D$zX0o(<zF_x?H0nw%nB$xh
z*bhZ;pf7Q6M6GjjDj#{xsEh9)t&M}(oyD2C{35cAq|P4Oy1Zf9aG3uw(x0p7_`w+*
zJ>N*)nul{&0I~dT-VELw-1MU5syj2De9^&YZTLDEm^$`w0mT-7@e(e8%pGDYpR)tr
zjcVWlHcj5E$=DqDp_gsD4HL08CFil|0j3q|ZV_9?FvlyK^*B}THL`n*@wcxvX1-nb
z6Jz#LmwLV&cXFSCYoC99cs$nmGo*&A?oY1iO}ovu0kWUXPLGzOw)6IvHzi@OT%ube
z3B(qoigxyMSS!HJuA^*Pyu0tHMx^a{lQkgE@AAh~9X|uWi}Y4vAzyyOa!d_-_ynsW
zW(Q39!u5(Yz2r|$upD6HIuncfCz5VAv_-mnQ14vxUm_Slv&BCaJbxE>4ra`^c0wgC
zRy2%Wp&tA|BORuM*a+_q@Lc#fO+Pf=^!lpQVLbMRb?rlG_iC^(e9+&c*na&ISn75c
zw>66O%c4U_uplwzq}4q&kf$e3s4W_|m-bWin0?$WZ}f#JTyyeQ4hRVp+Ot0foTvNN
z9Cvlct$}k}DV_7q)Ti>}gwZ<4nyTN0H2xUm3$Gnw{+NP5U})?hJjU5^jQ@0L95}!a
z{zL;ouv@?lx$&ALfGZewi#_dz8h7(>L=YMQw|mErwJXWgFXJ?fOCCByT+5}JCwRSk
z#djCxaC3j_M7}rZpqFuI%=Ai<^3=q)YevW-C?=ji@N)(t8%2|Pq$mI6)g&Mi^W^Uz
zaz}IWcE@x{Lp3|KGp5@6?n(I-nrJ@%O^zB?2xzVIE?f93>juvbepZqb!!^^Hdru!a
zSbd$h^lr7d1!$@X+=}g}uFbFg0k^$m1NXD?vPX>l9q6!E&9F#?J~F6&%p##sIEcFB
zi0F>uu|rSNnAlz85V{mhU2y@LlHykfg}ivn-vVXq9oX(LfIU0StONOmg<NH;`!H#Z
zS|sz=WQQ`B7bS^L8xvs8A9wu9g!JYKKOX)B<10UZToJ{&tGp5CY%F-%afaXAs(tuS
zbF$m%we?TrJfWaml!6TTo5n~ymj;|<DnC^HkQ@RLr@fXxA5cL&dxiTpIgMzJ5$5Y!
zZ#t>+$9|^HLfxbN`YP0wORlVJ_euB1gQVvMO_&6<v9qZXdMx5X)%{c}D?J6Cdc@{v
zOhV7_D7w9=@F+P>B;hxVicr3=#8`V%g;6|0jpL3)E7B2@13pd`=7S*~2E|iYI_DDP
zxTw2@RZ3M$aZJlLi84)f*G)y6_X<hZ&u`HkDO7kvxXbg%&h;MgZLhr$zR%2hXPDnq
z8a)v(9yl|Gft_aBT~(ZEv2g}ZV7T;m_n3<IY60~OLX1>AOZqz#i&?r~kkfKPD)g`*
ztqvqKQ^P0QMUGae;y)@KVu*5vB4z}RNJ9w2y#&ovM2_=FkHCcLnm+kxtmGgYuC_@I
zJSJvzu*91~F&xpyGRTr~c1gp;D%;%Z3R%hh_B2qmbYqG(f<$hICEMj4RZj)VEYuVa
z<7wwQj9!A)sCd(56!YioYi0BoB?6?Pdz?pY1Kjc2@_V;bx>kj$sJb6Y&z~n6GlE>S
zpyha<t6Yq_*BY}tt)n=H<h*GYTg9Rf>f9jQIYOs$C0$^=e~OH418bX`mo8czlAidj
z(B7$tC?Rq3*6`-(cGUdnGG|nQ+7&qMC@FyN&(Bion7ayF%+Pb8N+wP{<#y$SRI5-T
z${hCd3{0mDRxZV{d6y8_LdTp>peZeg<I!ufFL`*IO;yH?s1I2k4R7oIvbxgDQ8Unq
zxtf>qli#Yyrh~HCFGv6#W#3UJCz{8r7inl}KWyn7+|F?MO~!-xBTA%#4Xy;*CxEvg
zWjT_LGKa8f^UEZa)O-nu``dkkJ>u4je+=W2y1GPrW#;Fz%1&8}$xKo@s)7f@?y4RV
zI@jhFI;x5(8%+ki294s|qtrjKsPLMu14x^^v-K*Za;%L-QTxhpDV0Z0-w1kOhd3(m
z6w2A*@Rt~Lmj`G=xJvq@znUQ)g_=|MVX%av>&OMaD;dc)5p3fPWsgTVyJkmn%rUn6
zb5TzepJ6i@bo;YYFFL9C<czG~_1zFwx*YbiuSqWLEj+eNh@~<uR%E+r*Ga_&i&B|a
z*pHZjs!UU*Vt2#!CgpvarDFHX-6~xcqwMKLWMg4icW{0wXY0n3bz`1sKI~20k6d@h
zwLz&JBOTch9A-fFe35QBq1cyrx=6`FyuuB8z2LzLm>=do4`BQ1$1{A?^|><8e^F&E
z1j{|2Qib&g()XLP@?TDQT;lw{m5+BAoTAnSlvTy7sU-P_l(#=uX#ZRlS}!Y-0{^nU
z(Ee=#jPO@CwkFKN!*M@c$hpOjo&rHJ5rVEpwbS&>gnoGxrxCH&bi#Q3;L*)gJd*qI
zC|`#Aah(do_cbu5idUfuLyg8nhvQ@wL3~3DC|CF*^clt{S7Zml;RMO=YYU3Exn2DD
zZfgR!h>2HvHkV_p2iSDmZQiL5ITmAJQSEam#R&l;$6rD?;i{(ZW$CMps8mk|H4x(S
z9$t5izef_^Zs_HpzpxiaWHK#RlC!H(s)(wSv8zc~XZV!NrNU@n-cg`b!)wge*?9R{
zR*Jxvy#Bf_i_OxsR7k8{;oytDylfEsgKuangOULAyFMrt23ms%N6X8|=rBl_0)?Ty
zkR}P{cO-C&sOM$0^BV#}lL&|FwwHt!1+kj%i_|DYwB)#S$3YrNnmd%n8m5+&W<r8H
zRDEL=j^e6}REKH6XW0&L`V07^xmps^3wSi(lID)-@!NTh>G|!SQ+*@-_Y!?!ku}W0
zx!S$RhB>A-^8XWX{nzK4mFA7Ry22V|J|b{y1Q;{?Cs1#qI~1IirV`oz!lI-lyN_7}
zYpr1duK%Y7@S~C|e@k^7ApC#7)c^SXQ+Q~kCc2O1!vG{`fM{`z^ionCfWJDQio_bF
zAp(H+FFb*j=8jB9aOi)iwtZ0rxCESiK=r62JHe1U`(@4p8Q}He?NaWx;VstMqTJrM
zr*+6v^?D*1>$r{Ulzs1xhm<HiOCq+7ZB&+w=|m7dLQYHe7>kGfqs+A8APPa;!C&xt
zU_^1%uW#=1>zCP~{_S0-6Rg)c%^1looLBm55xaHN6MC2{yA&Z=57!@!-KX>F8gK>h
z-xs54qa6o^t&Pm<Ud>rvfrlR#DGNFhkd%tes$UE0>5O11GL^c1Ojn+{Kpf~dfUCI*
zs?3OA6n=|LmxE}kQ%M|@l_BXB$CU0Y-kY<oxJF#S7e=rSelXf)PL=93B}z&490R{5
zRIVSC1rcPbsCAHailH%!ccX$^ZCa;#ZJ4od*yofeC<{{9>_AF)s!`k|h2Uzk_Em>*
zYVW1?CI6|3MyspmDfE8+Bn^NZI4TvZB)Xw`W-(Ap4)!XSkx4qQu#tMlS04lO4oiZn
zRMaf>;-K3T*&!QH)9~M5PCMUWgbC@3?vnO}aC6gb!zW`#PCE~tV1<kCk}R|kjgb*D
zKZ(+j!)|Dyw;6SpCGTn#-?ba$s|zYi0d}jOvtZ5%)+tPV;MX;F+i+f?HS2JuhIiTs
zY~h^L@x6%nlz-H{wg#je4;0hAlf`*+dVy_di}NsJo^&3BK~wnCp#O>9%>+N|Wbwq>
z{I=lrhHF#sIu9rjACy`AXnCWR>ZfE4%+F0Cl)ILh0h)KhfOlJ<Uk3h>u5!@`53?8L
z`Tba(es20JjQu>%;c2HSj3g(G<}qd6-X96KnuHJ7kE3Xn+~vF0IK@&egJVkUkV2v|
z{MHavjblom>Ht*)<{-IQ_skbJ?!~&)MX#~5z%D;-pN^l3O2v1;+Kt73_o!)9ylIR6
zCMC|v;+CYxq!=u~iqytR6jlFCI%^HEjwR~9kLif5tOqlQ9yAyt695To*QdWS@p>8l
z-O#O1-vHi3tHiCyZXwnE?7@HB{{3zsd)X5kQgiUWzWT<C5#4pr1)YKFv&a`Selx!L
z1$M~bV|r`xleV({N%vLPdfoQ)#q!l;c0T7FPOrGIzd0n#kRT)kEV#LzvQl5Bv3a0!
zM&<<Kk5I^EmBO@0Id^o`+9sXyss<hFy-Fp#baf7UZQZKULA49(m_57@L7P28yA8Li
zw&RV<Zig($6mivYHQ~puH&BktbeCVp%b8Dia%Z-18y=Pq<&|+u#ampg#;t+7ra@P6
z?@V)U`ocHnjCGrau+brdv5SdW#fLH86`KVnZzaX8vA6-VL1d-}nM<SeV=D=KJv+)P
z6K$Qr6eetu^c4%0i|sMurAu0+QDSiDzdyF!huWJyQ&t6$jqbn)>=!S(Re>1OHbaQ}
z&!7(k*qAx^s>DbGYENr+&F<F^9ZoIwGg0Q6iv{$Ft3V#Do7OAx_s5ChXntI|?YUS)
zQlh#geTN!aYZ&tHlNw(>nKULQC}6z@5>!H}miAAs2?_Z=wKc0q3i>%)jYu@6FQg|R
zgSb>xMAkJ7tm<X^yed3jP-uZ)3X0(UxZr(p9;+q9Me1mawh(C}a+<CmTW>Q8V^jiX
zuj9M<+ly(6{HIP_q}s|X)s1YhUP_ZnON7F8pInO9Pj_r)N?AZXYzvYxJGQL;quMGu
z>@zL3g$+%RUw>@(_czS_`%_En{`wlBW8F91Il6AoiwuruQy!18r0BIHeaFjXlJoMK
z$u)Jgau9ENk+#M~T%iF2mW}C}4iYERO(Tj#rU1^x3kee1+bnH02CUqNP3zn*naXwE
z<%jaHFDEV0Gu$nYKav)b9igk58D<KmzU!L2P1BDNvuddgX?g2CQR}^n1#n0UOho-m
zij2MQiKHN2w9qFIpf`?13P6w+7@mStK<o~p@R(aQ32`7E(^s>|1=EbZw@2$;%~lo0
zLhO3WQLg?wTX+&0(rdvSO(^`ppq2)U>eEw8vSbT+I*lD*N{HvP1WyY^LAm`4@uB)j
zZO0u&KzUG?sImWC`vzB2bQJ~MjcJl@CV<zss8#u53qBwG;nx?}<iB&Un-q68i<=WN
z5U$A#Mzo1-SoSzWZ?G}axQ`)T^CqYiYa%A>|2z6etQ#Ij=<2fuF?LPZ$zm42f6*ZC
z*NTNTX73-g4jr}*4O%o!2dT;V(^z~<gN<;8Cr?quDah%EJ;X4lDf=SAEl!i7ngIw9
z4>9}!fhczv?r+)&B?aS9=P0HBT8Pt>!HRH)q5h@~2IS$+QI7wWoBj8~m7*#Z>I~1C
zqWV;jv)X}%TCTI_jrzk{r0Cz#FSIBYSZSupho&xz82w2d9Zwyds*(y(O&+H_3s$M=
z+?gn>lF}X(tda^`O&+p5>x&XQ;JliDO{2zGum67nk;TxU%E(XZ<9YUy1=f)-;(?WL
zvHt|<(3Mmo|8Y_ds)U13M+d7++ccN_VI7%E1|Y+#qm%v*Dcc>gGHrzsfcXdgKWfu*
z5`cLC7QlcG!2TZ;w3cngr5-?_{J(*W|9_x`Ut<8b8)tKE#6MG3mn|y4av=ax%X46x
zZQYuoCsQYJiB#=(c2X5-9Dk-P=>HC?dZZ54O-mHgdTD}TJHo`s*}0G@H4XZ+{k2oc
zX@iov3UyG4d~D_hQ8j5@mY`Kg0pZu#W^=d0wk_u{U5|CjRc=VrOj+4BpPL=g1*D5z
z(fwh0n5ESL;@ip1imFfj$pGTvlXo_787d!ES7CIQhTkq}OHT7?Gc5ksK(0Fx*JJt_
z<~(7KxOcbCFPq$W=U&F`E(;nFzvnR=;6e=vfB4F-i7lc9dy%%goT5(u;H4(^&bstP
z*z?IN<u$C-Gd#9xznfn)qV*Bjd{+GIc-`-MY`F}{_A72PMM7;dobV$OPHMx*%<eKn
zj;%7y(kM^SLt(fh0E$c!${c-_+pQZrA=gBHY2Nf0*hPW7!R36IEQ>3SFvKyC4GxRo
z*gfWpbYiYg*10ch8;h8GEhB7lZZZ<nXBtYXda^tD>#-^2ufUc^>xr}cJi?cUYrm*U
z(=vwj+`MxC@QRJuldi`3h{`4An%wa{)@G3Lj_koPIYLt_m|wuVTi+u#+;Q3sE_8lb
z<(?=Gtne~!xFf_BBboV=SuI#ayJ0kaW%R?*=d;d@>4Q*)&Nrr5C3*Ka(V<(w&%Y2f
z<rW1i$LJ(**Kc~v6+HG8`$>Uwbcz>s>@VHu)NGkqUGO<U)rcpi>9H37l&a~m4b&6_
zDSQjk-1&w*YHS51PiEQJl$H0Li0Ls~_v}E`s(k_V1Xx@8`l*Dmb>w&kpiNJb%0GW~
z8(g>sos<+FTUEL+EPG|05{x&DiMhoc!sRKt+3$b91+R3;$u@sV-|=qn#^U%!zJ5wp
zIGmd=Og?--hT1g9&Nf(gM_v7S@!7onw_N4j8{69A&IeKK0}sI~C$P94^1B_|Q@R(}
z;<HBj=cd;-?qVM1&x_nC2x4yCVi)9#&qLhzL~sz<ygy;1dB4<YUQCITSw~(>+->(<
z8eOS&fVbbBV9=e-=f;oC`i8q6ces2nU!6V{!CtY@NCYd#q%R4%9@FASwx_=skr=}J
z8Kqs};E5xETlKqF>O1C%d&)4NzUSoYTt5MBCJoE8(%6g@npWs*tjK*?H2e)RTPeC4
zwxigQt6UD~33Ge{V$LVEI0&=~E*ui<ZPLq1wr<J#sZG%5f2vS$jFnQunOz@Xx@K$c
zCfC!hG^pVLI(e+HOpOohFCmM)AM75ii<&#Mmw{CH4Y!-;iKyaL9N*NbWOLI-)N@j}
zR_npU{0MgF#$~|*8}z1FeXs~LKAD{gf1*(&mi}~j0{@9p`cu?JD=F-dyHTGhS|u7e
z7Gy4BIkXaOf6systo7wLRD@ubT_|A`ot2X+i3M?|fU(4|0W#G#lF?EqVZ5Vy6wD(h
zqrTiKJCr`YNcaqDWw1L6I-|UW5|SbJqk+ROjL;nxV;}Pa^$^7h^h&0zr3wQoWG}LR
zlJ>qpqsrnQ4^(4S&jhiL?{ShV6Z@A}%xlDvT<=Lzxaqi41YzeNhAR^<3;F3m+&dvH
zjIjUbVdL}ZepY$BO`$+-JgAlGe-A;RR#L!5iMR#)-U;GAcbtGL`n}TtVEseDp7L}b
zuRI=K!fylizp*6!28Z~-?|1V#$2Jv<0DP|GjYD#)h9_)k=a%6(5x*yFdFMxeiqnC3
z-=vPVG1!HOfO-m#7AkTj;`gxR4ZqJ}pljy51N%%?wENu%?y*gE1&q@H!>M;Ig~M0O
zc1*34c;xS(Ymza-0R3$q@P&x0B79gCvm4GMGF_hxy6BG)N!ZNtY{ZZIvQW?pa%-=9
z@95tRHOSL}$D#s<<h^qkC2wlq>AkB-`4b-v#PSAri6ViV@`k4?CJBP!;@^A3mIX3H
z5n8tu4NsEgVOiH>3i%f{q<{X>RWxuZo82e|$gC|uO?>bYm;T=Sxa0Nh9Hp(my7R59
zrY7ZF^Oy(PK_ufmg3b`<4@cRy&msB|p0sg8AsQkoeYrE13OkZ|%l1;6|G5TQC9=s>
zZXqHfX~QX9IX)DyB2ZUJ;=QcLDux}=)9&nTXr%&6-ngL)A2j9}BV=x1uOv%3h+9t3
zN1vJC;h+vFkB8uWhrp;V0m*)ODS`MQu%YNYGQRe-(Lv&@X2j9ibSpvkLVNjSSy_PL
z;fu%)sk(7vg5z$2Do-$CK(;XAJ{2@*YDbRKX)1UM7=ZJJ>s0_1>vRZ;2VF<5g>Nt^
zu|!^c>xMh1q%F?yZp>_YuG%+%=B=d1`Srw>rkcJS*)5uKj+JzA@F48O!#+Lk2R)Qr
z?kQr=jG<`0IGTYe<20gZ+(JbU0IRB(in9(83hLulsBF*{Su~jV4Ku9V@#1KxxIut{
z61YB}4#Wa1h>%hpqj>$Blj>A97RA4s1GC+EyF(?ua36thpMaeXFq^uKW|$SX?#qsq
znFFer%1I25SF*}*;EcwxR3qePYmvmZzP<yLRj;gdUq7alQ*emzYH1&zzcb6u@jjH>
z;L@w?uuzG(7N9k5)p&;Cu_5Vq4Z2Y#Zuj?HfI;7IBEIi~{3oR8BP2FOj-$!li9?y)
zIG}FbKN~yMUilt--bhd=(EJM*@8%oZ=|Eipsei7ZvDHF^p(1u(;!fOl?=|7*K;H*H
z-h~_2lSAPiBEjpr<@F2P<<n!TamEf4a<O&}+Qv->Zi;#^uRn^cR|?=X?kttXUaYb{
zsZmv7WWa2HO{PrToqKryg54AH#@G~XQ1+C2&dHWbdOG0o6WfI?AE~za$<H<<;;ME^
zgV{C6&s<#)9A?jWuG-U2pVCUBvp^x)oKUh@`=*j1Ce$<0UQ#RzvreEkn2p}RC8=^2
zQ0Sbe!UN`BRV>Ph=0o_$hp6{XJEooia-^sGy<ke2B0w*x3}QQTxJ4~jB#{2Km+V-s
zZFzmbwRwbjik`26O*R?D`Nm%$z>f0CTovJC8C<kk?eBM;3+Sv<9e6M^sO}H*jA6Tn
z*$4OMJG9XHGGZl!iKFzuTuY@iVFO!F4gMsVu`JtYo%V$_NAlbkJiD`!Xa)_grLu|)
z)laS8b*8Ito5E)LDQmRJr5!?`S{*CGcl%3}@2z*|CchGnzeCU(Dd#~h>Bgc84^;bW
zf|(S+DA}Vm>eN;Knb>8+aZS(pYAA>uFYs&U%Z?^B5r+RFj$rT;CLM2-p<mc_mFk}4
zO;yp;qVnr0jlUa44=5ne&}`oj+8>HDQ*^K{r9i;n&6qF7kk%;zzej}*8Mw=hAnZ|5
z>0x-4q5U}B*Ze&(5N+5rjKb_+BKO)eTo-jC7-dMyiwxhRlCBwONV^K=&(3VWqe<ul
z(F4xZGVtAywy3A_Db;x*pU1z1&zI|l-tCRR_*I5|PwASTeNYg_zXTd!QPD@dqiKCj
zJXFW#4@JJK2*iUXMt-$saGW6bT@=rmEy{qlKgtzFNW6tKN`>pKyh;UWXYN#mXDV3}
zL@Rrk`DL3GrTT=Y>rO3v(V}cZ4(+3>F?$u+Fi0kb(@2&7S$T=Xmc3OMRN;d*e;gpU
ze%!c-j#NdBr=jQ%QDYx?79&p@Jq}_qkw2xww{OZpk?*a!ETp^5@vgd3Q3A^zYkcSy
zZw-5rFdM=R$F#uF$bswkr8j@}KdWGQdw@I@yZ*$^^i5};zfr%4ok=WqTxnlqVI>L=
zTQCTZ@z{{gRgRvH@LeBjQLnf0X&-C<!MMgea`q>s!~j!Hc<7^9>Z*&?hUn8Aca<g2
z-B-2B@VL2_BrAT!*`-P{KSGILJTc~O0^t*Pv6}PR9ag%fvJ2n*TwH}}@8o7}K^2Sd
zLxCE4eU0w1R!nmmwro0GvzGU%Wn_oGCJ?#zRW1<p49%26PNW+w2QV;&tTwLP_Oaof
z<2z%aX6l~ibnlhsL7k+pFOctB0p`Z)+1JNdLvK91RiZc!t9V_`6j$UOH<)}@OA?LT
zUUdw*>0p!9>cOT1w)Jcu7RS4@ehUqv!=5jo4xchY$Oc|l$i&T1s`=;uU<o>^=kRaZ
zKTQUZ@9bv3<~^;i;0pzK!me1&fn`qFshFqx@?D;8&CnA=1;yNiC5Y;F$E_2Ofu#rg
z5;Rx@F?k@qN+}>)(GY}$HKm!1-?YG+UvoUWcQ~81$Ry0n!`$s5kXi-Eultc;Qpo9z
zE)1n;OG(-n+Ij2ecN)^Asy1TLzbH-2`B7Uirk+Ve%gue~0xQgX;XNqA%6?90r#mVO
z-nq;ychI0C*r=fkisYueNO!O5L}De@{Pi}VJ9nz-ERw{!5A!?8y)uc}!C&7-N8D#x
z!%8{u>zbEZ9y7^(#|Ob_Lo6Y`s9hBZy79<?E|15qooFFj9bIUnl+J9mM%Re;ZpPco
z%NE*VFyd-G$$*lWwHVkLv&&D782?hnqf+)t7bi5p3y<yL)5S&0Si^={u^ED$3+L?A
zKx0$&Rc}@NeOmk4+-<FZ0~C?hQRlQrlNH6=P6bnL*RnA0Rb&91TKbrsL(m;~<@mw1
zZqV0_L`mqeiGw1?LHE*MGS0GFjh_U1_^7Ab<lkSQ9>*B%gfsW)M;R}`ZH74|lQ-sS
z3qgQYhZKxNg6`U)YjF}FDu75mNONZ-EGKDcJS*R#XlayH)0eil;+zRe1QeOb>VR!g
zxqz6xRlK>6oU^PCgHLnt#@surv#AguE29Gf3*`BgSIPrA_>Rpes+Pv=6+vxs0NDJY
z69|<*>aYIqGG)|{oUFU%l3BQol&HIQS560DPM1~&Zp`H=s)MzabOqWqAOcYeteXIP
zW^)JVgEXJrK4OUzDd+LW{oY5C0Qe7<#m`zsW)bf+ZHMGs;YiN1)zMTf4ErlY!uHyw
z4nfAwvM&YadGOA%x&T)E#aDnv?sWJV;{~<jr%!E%i%}bBwF7LfXl(gw)Z5rW8@Kz8
zcBwhOe;O3{XGf7EXIQX6JFp6)24-K>hS^2U_ypMpm2jEmDCxKs8$565@(Z1trInu@
zoajk7e!pRbqBK|V!E!@WpG0!sAG0Zs^L~$@%dAR4WPTJXVPYJw`o&+OEC7$CV<*+b
zLgV0K2SM1Cuw4=K76^l;IfIqKUuhpyHsQ_Zk-1E3i0;#l5#q@&W0<6gl4WKH5=h#b
zT`U>mtom9t6lWN`?r!Ldo&&L0Qed}`LWNd$s^(H&iiw#Q2)};!JHNfPv{fCxlcS1=
zK3Co~`4=IVPDUak!@%P-=I<|dGpmhV)10>4c+JD{B4*>W-<hheA*FSf`G2<KgA}&v
zHR-){rdS)_it%Pz1t6DJ=r*<PwSwC^H1sj;*YGykc5+U<kn+E)4S?#6=iLV28RPQN
z!Q{lOw)>n!*3ub=E^_d!#Onyubn{ic2#z$Xs0<|av#EOw6_F<B^Hr^4Sq`CDhUoZ=
zDpQvqg+@}bUL;Gj=r?57?S5!YA$O?^+GEs9rIrl1hfqAngd+t+;l-u#moT2yugvQ-
zUGyRqE2Rf>{_J+U<u-NvDR1bfvSZ*DGFvG-?)Y*=if$mLJl+JyTnJZF{+BX<z(L0N
zgH9@3i1{Jv)QHg?(~f%K6zuM{6l1M=?M{}ViF~CJQu|!pm6i?a)}&<TcAB60(R;bh
z=Im5^69VGIu0(RYC$%}j^|va)(aVX*^y4Y()-2I)KYlyyGoD&Ygz*=tv3cjMk_*8=
zzU#h-@Ks`)U&2vo*KofLRevQbY{{;7`f9y*Y1l7rL9ZQ7fS(wHyw^G0Q2yQz{5+P+
z{GlG-1KYC7^UQ)sMS-)bq(e<yoq?q#$Q`mdBS=!V+5BGH$9}EURciv<+MIzk@AehR
zowio^UQA<L^pcT7yDO?CO$|YxXT?B=n}k@m-R#5N!vm~2Sru5oEsN<n&4&TU*J89!
zy@R*t$dtW}wv^$1hClIZ>xiYSzdxM))nCReA(`Xlib3*`{i2y`1{yk!n*AGdW<!OU
z-_qT(2Yq8Q?bDql(KgjAN<zUA;UHh#qc4<t>oZ*hEgvWAx9o<E4Q5QMa|Zpd;dd#N
zbS0!>*`TLPtYSkn1?DlZ(5F?GL6yY}&8Jn3{1W&hhL2(Te{UxZ@oOh^D()L>?Vxj)
zE|@x_<n9?f18^*f2>#e!ru!|_xBL8ju13IO$eQoe=@xcX$Xr^O+gRl;X$i2jb2;7t
z9ZdxlY?a=j92NK7pVpW^Je!oV%kx*9V37D{+1FQT<$2wX@lD6MRk1j=fPI;A(%6_Z
zH5ch^-Eec{oF%hSoXknnR&q1vpn~BOoYEmNbYV<u;p4qz;~L>n*Gp{@laxuhSNRr;
z5prB8hn5dJc$br!h>y>4T}2_|fn%cnGZ~M5eDW6igiag6B8cxqZZ|p)Obn{O1ucrq
z_;n?hBtS2m%N&F|lJ7;DNierjD958yo+^1?U%8XvtM%iKi>S&zX^8cg<@D2ea3vr8
zPwm?&%>h61K9?p&iTYY;=?cCI3Yc2;>9>boMf@zSlvk;I$;WT4)UEE#HC;Ii2JUjq
zWxyfNY$3;N(Ucx`Vz&I(dFpS22g&iF{4u87v3e#lUASKbrP|-<ZF*yicVSJUwq0Gy
z=rRk4W#uj#Z>}=_^Gu?tY$B!%qFVU){coNGn%VoUr7|Da0jnK&f66O)KaM8F?4@AQ
zso+S3q@<qI%#%K&1`P_yRw23Htbbxp_$-2f&M{`%t07KZ?b%L;F1U<>*5qF^7+<4X
z6B2yEts8P#`ydtaK(Jt4-lUu6-n-_exazb0_OadZIMorav}*4&6_@cC(6imu<FVcK
z@p5Cn^}+959r^5PfyJm|9PTEQ?0TU_(K2u9QI|)?xvkdnKxdvOIN3ETbMU}aljdG#
z@vJ2>K!fgV!PPd-dj89N%))eKam3w1L-xEHc-NULPT2+`7}on-**x6W_CRa;o!Yw;
zmfSN<kAeE0Z(PuFGa?tXJ78O!q9GCXo#o?cj^AK8%H%+U!r8@a&E(zcgd%PI@JAWy
zs{XE)Nj-v@G4}7Yjv1~tghM0l9A%=69~+U*_2}{myvkCSVqV|s#;|?lgEnla+S{B9
z^m3CAP8po_tXaG-G4#jf#Ib=5vb5*5>S&E&gr*tXUZ2^#<UxFW__V6_N%hXjrehZg
zj4cCPwh;tvgu~+(pHsTXHEq-2zWoY4FdsvQ`Q=M60smygp?qOySQ`Gmrpf$G9Mec#
z{s2?`j{u%x(q@O0yTfYPVy9|m57tKjM2-kVPBkcXsF07dlC@6#ZAD~WL6g@gEI$ZS
z06IIg6-77mmOCa@HZu%rVhJgF-&6LuwJv7F0+;W@a6IMEKw7vh$@x$w>qiy7n-c?*
z?ycDsO>b1*pY9ym6itQXgA$d*7iXJ>&hD&2-M2rQK~|dSW=r?CYQs<6KUkrCpm^7f
zKi1aPSJsbKj)~~CyL1~=+xs~Xy(2dznColm7Fj26tDe$7z2kN_U?S1W$P@dBxs-ab
z!Fm?dZB6W~WbyS5gHDUs6QjK8Zw}98VfMf4j`i<K>H6H2{sCe6iz^~D{@|Hg+aOr|
zwizK%N58hn;(-1#`}Nx!7cI5NdL&ytb3@EMVQc`t-O0vp8yI_uOG_jN(~*l)gPv|P
zh^C8AL=UQz$6BAzfsItx+S|f{!01JFeK;d7@&%Nw^)2Yrb>=Ya)))BeYPyyxp>#+s
z<U`@dZW6h)Hw6>$$V7{`3<><x#H3=|Xj0g)<z=!A5hy1x3%V2E15XTzI2`T{vt>n!
zfz3VdnBkIXv{TQiF~{C24)BY+tsTF=v&!qlFTJ?O4q93-6;<wRNuO=~Y<|$FAD*im
za;+R<R*SCCK;ZRa!Br4gPp7~XrTCh%VqnNEMOPRJIyH5*EU!y?Z+icJq{K7$UaFFo
zOIGma$H4hPejtT(ubt{V*j{g%b~Sx!|Jiu72idJo7=?I&S9v^b@~j<Sx7oGDwYhG6
z(1tK>amhX;{Jfx+iJ>LFWPJW-sdpJp)+xdOr<4rD(Z(Nt7jK%KDw(7cWfeo>>zAuG
zRKjHMLkGk;c{+S4THMH_a1<%y6vP)3(-6Tk3j~#}V%r%86m?~ja4(I~oK($J9*NTA
zxnp2njEjLl6AbwQ^?|yJv4DDcxy+`B_9@Oy<-(OV&l)G1`d`H=?vB9A`8**F%2C4N
z-e(i0u`9}(kfXiN7esHxb}!eC>U3-PC7GhdNh~4eW<-kwR460+YUwcvL`-I8)c0!1
z=FAA0sib%3d@LgJ&OcAJ>Qc*-tf(32O!M$rY;GqS_%L?Cr<H;%HpEqyRvT32i@s2z
z30LsxFdtQa*0gDgqR7ET&#p^@9iNmD!yBdJNkB~2%0Y{u6&@1(`(-Ks(_H=gv0r-W
zSNhv(F^pmu%_Iq6^Wshsja9ST<M5*5y{Yc*Zr&H@N-6mqTj`9$wuAI)Cum)(Xtsp?
zw{u_YqfBER`4IBP2bc+NQfEo17v8#Hii+)GI#nk$*;TKi#)<nHX_1e`WK2_W%1B$z
z<8X~_<EgQ-Mf?)`U1EHyt^?hp{+YhrKf4%_ow8^(#S7Jw_$)p8e`zWR&(Em+oK7PY
z^;^bqNot4!I@s^^OL9ScAjWPEt#jci%VQVsgfU+DZlPttJ`djn#H?`yw71leF-Nl1
z0N`JiWe^{ADZRJxGVUZ<p-&JzqC`{tdN{j7mfW^%&UUCp-DXo@Tm@*L=0=#`j2uu5
z!Q7sl&0{Tn@a2!xD7pp16p%-x1Li=I{9VJZAhFZv10I$zu{yF@V;3)`OvP^$`K!l=
zN!Dn0@#Gz2MeldgEdi*jLf++PujrF{iZF$oqd&QM#qhjAD<nq&PZC~CZ!@r0tNoq1
z5Adh@$B9ZQGulP%fdu5U>Pv!XI^t*tKfIiTp;kG?9^w(_g=y--qx_s9qB6r8g$;k@
zFnOtz%{R92Ief1x{i49p|B}-Db@Nr-pge<#6^{PGd0bI6z(CN=E6cRK%3CwYf7AT5
zviDd8sx<wIg?R{uc?QbvbmcRl^jSsxTu^c~e8yrSr|;EAC)x3bxF!Ykr>C+fmGzou
z29fHpa6zhSIu>b<Q&uBw#$R?hVwL9%_qJso-&6Vtbbluj&T!zbzG@EX(kXa{lOMDk
zP55@x{)9dv*6Dg^5Hzd&>F|Xkbe6#{ym#I1jnb7;93cg#Rt7nVf8o#Bqea-*VzDcV
z^m$~<r5A;N6*p%<Z_EKoLKfKEN0+BVA+yti(ckRia5)Q?!?GG$m1P}b{K^)1!(n@W
zc7Ye?@i$dBmR*Hu*JGJ%7~49Yk_w3&0);(E>{V>Ffpd{>Ke|s>{pY-SE6yL|5t8RD
z{zz@foF<PYY9?V8yHRAxQNHOKQboV5Qt+*$%OQ#-4OH}Sjz@v2BqXp1&PXF(#^g<X
z3WxKF6&@bMwmbW)r{eYcu&C~C(GLP~r-*%<`*M;aI-szmHHM@aX$gO*`a3xHjXpr$
zw^;7IFPHnpFa=h6`+&ygiyKOL*3jDm&nqg{=H~;rw!|uJDwBLhzvyQ$!d*mW9Ut63
zqWo&o8D$`rT+@xnj97R;zW=qT{)+S~>wQykWiD~>Ew|&f%~Z634p(qP)H<Tjm2%Os
zHP5}^ZCRF8HQouY2hP%?8;%RR2M*_IfEnzyjeu@X6C^>@q&S1Jxr#PHB<c?G9FX6|
zhp3CV3V$o%?(1RgVYx&+Fobyb&gV7cjl3hax=qJ&dgn&^;J6mYdtb)eI_pH9`?yhg
zw8qvS_5e4m*##DsD+m{ae9goYaEbDGFH!Bha{zz;hV*eN{a27T(*H#5>J_PX+4{*R
z$YJJMvdo(}z`36Dqv_k#T@T9B5d764w&aOX`10bso-1xX-O@6`{&ddpGY?V#(lxR5
z6DDO&faE}WinD)HI6#8a`soAw9WVX;_fs0n6sOH?53+B1xS`(Eb80E?$7W_uyk*?S
zEyEP??6u<5?kgjnfHT!X=h0K5S5AOovLJQ;b4vK5y0d`dz_F&&J1xM{cgnDmb~_X*
z$FQhk+kGdXa?H?!=~$O-B)}H+SWx5dcu<eq*W+V4(D6tObKbfC?HgX60^xEO2)m;P
zmw5!8>3HOc+cC9GWxxUIy{gP{W7$fe(|hv1?2LuTu+?kjur+4H!?=CYB&KCM@nnuE
zX5>+-@@=bXV||#5`P0gUv)=H&$(Dln;||VopDkzW9~L5V=gnc-VUywuT|6QaqZUig
z7%hIy0h98IT3;MIP(t{~k4dx=V$Zmb!N@L`JHcTS4E?I`5v5jn`?q*5C-aw-SA)Z~
z#1k=W1~+b|{jU-`DoF91&sQF_*!wpIFKF`qogVL0Nbw#`SFSe!!~2uR_7`KGovg3)
zctj2t-AV)djH?K2d|TfxnhlKjwm!~};x*oSktW%*Km+U1ynyC*y=-=b*K_e5x(A!b
z$;EvznjqmAE%um*lm;5AbYwB+l1ZQ5Jw~60qk)V1ed@6Je(CYE{^nWjrtQu=;T%eD
zKBWk{?k?#Pgh{o<h1N97p2uFDO}Oq0ZeH!%A^YYQc#c1TuM{d@@4jio(6I+k#IXY|
zHoUR&5;<e>-Zgbl>6uBQmYwcROxZ8wZn>_~+!e#>TN*qwHTm76uNn&pn;a)z*ANik
z`U%6ZYhW-4g?=ru^&{D%#0QtCjWfVJFN6!YQUipxgj_0#879Y{Bx@u5jR`MuGa@f2
z!B}hY&-Xf`{Pty$sEzr~fYJw0u2WkUyqN%w9{i2htZVL_Yr}I*2^|tMA|3iOB0lL6
zSk7<%Tw!%AKPE&G+Kd6yM@Ys-KVVyk$yKXwkQa#mc+>)R(SX<n_*(X{BwZ%6Ea*}O
zaAAmaM!RSLjD{a!52z@N2k)wY)B^~a%m*TO78eN+^QoL6p6wn5f=5F!HZr3(&U6m|
z54nJi2fU7Dz+OSvMja6MLv%)j_h7aqaU6XZu8|skA9Ae;h@K0ypbKcCPZ_`g4xrZo
z*lu&h?k}_9%Px!5iUCdemjQSHycVNz#^Az8xbf<KM&wceFopq)TRi_t9yG(hp<Z0U
z0w+wO?x>Bh;JU&tm236J{A<8c8n7$_$XR$lK|23}{1EY}-7&PJ2KQz>0G+da4Sv`}
z#u9tU00@i#R)W}#Gxn1+Hl!`H3h3mPhWh}ZAwkiy$R#DOe9VtrS0MKhkwBdc*Zw6?
zk&xm01Fs2C8^WLSgRpBb|DI26yRe*X{>64L3W1t6$lWi1+Q8#QS_JQy?F+VKhCgje
z(6#+nN}GS?op1n^e)lXK;f}d~YNQiDZT?T4&a3tLLh<ge&gk3PKQ_cp0D2QTfR<}f
zR&e)uWt;%;e*+T9PXZ)T7D%LOBEZe&;$OSWf#SYBm;zh`oBqS?pRu6gtPSZS#J{$@
ziT*S2uLm4Z{*wsFe~FNs|Eu$ad(oNu^K1H7yk<XlJRXUV3q6myuXyJub`1JJ%Ypv%
zuTM>1XV~1ZuDMLyXUr#LlT27y9wWHmt@S(tNp}H~&it>IcYyAt?MqpKpH#;DO!tTh
zk;tWiF_FlNKXBqb`iin^(dgE*{;%WW0|oY<0y41ej_0=r{jURDO8g`0_v=TPdy!;Y
zf-ZeE!mK^kQkbPA-;(pFI;;(M<1vCw+w%v)Sw9LNFkF;?&hl?WRPlj6JI9I;i*!i;
z5tX-kqXT$<U^UJNkXjb-ubSw8&z}V?%lrp|3^14dSBL0--JQa+AQ#}lbZ04f@H@<1
z%qMLo&NASjAS{1&X;}r|MVqzHJrfuX&VbkabFO942sc@aMmck1b{I(HMFQxj&2T!F
z|9TQqosGwiZ}4^z;pbPPi(DfAcs-F;py5or-mn)rtUys?TmF>={a;zc0c?E+%L2cD
zv6+A<ARq!U@WJ3e0t_bzU^v12rwdQ4k4~Z;*)<-+-*K$P0!}rUKz{`q=09VC6u2SU
z-$b(p@$aNfv|Ko9hOk^{8IN5nKI{K)i0T7jP!M!1w-ZX1S^D*L>n;qI#acFVeLGMY
z&xp<@C0e_RIw(6`SE24!RI#47C}(BU@ONX++Bxuc?{(KJpts#U@OGCTEO7$%Fg);R
zmscCC2xVcHj^sDbzu5hC;CvCM>kR60A>CLtenZBwUtMV}aM^5PjKyE};PH`YZR;rd
z6-|`jP-}nLh}?a-)@<(G8)tupvV6oNug``XZDumvf?Vfd71a<S&;VoXwtIPGiT)Bh
zzU5%EJ!4F5XMJjW($^(_-ehVd>h%=ocAHRI#9dSPk%H18bf?WYB8V`_vvT`JE90Ht
zFx_oMKz`7Jal8MfPALNlY%r`ixC(#W{DhH5@qrf7^*B7%gMPc;vc7(uj<OS3IAGqx
zKpOBx2}ZX&fYP*)=!N`X#My6PCmHf+J3||g#?wo^mfw$DMsPXpiNR3$e5pci_U)NZ
z7<ZssmjHbbhrRaM(g^y>su{PY6APZF*9QL3gn2$|IJ{FkL%Cs;d8a4+NKN?Y4ontY
zr~H^yV!|0jLQE3COPSx5wEH+qeTBTNqiXZ*8NA9Pj;J7%d54D`tqpYWj?*VO){JOb
zPkqIC)%VBTX6BN9zG69e{}-GjAbk!@HsuwC?%6O<(x}^!aQj2+M)3aWqx_)f3iK}-
zo7&Faivb3KukP$uq3q8J-O6XOllf}5Fr)#FDbE4L2h2gQl)RhsRWE{HJ=cxjR}%s_
z4u}JhZC<d`(3>4-M*{fk(VDkLMlQf8KXem5YC?T|_1{Hh-t9bULm9UNRIEPGO8AIF
zu#W0N9x*1uOMD}pec3<iV*Y-Wp9n~t`&ek~nLpTuPDgT+c~%cF>T&Nl+=dZk$21>p
z&En8Z{@D4nUF+G;`=KWPetX9a(u?zWXJvohZp$fBq^GVf<(Etj(507zQJ4$p0e%je
z8=6g5obwMT8v8Ink@S$eoth4}I{ds;24~-^S7NGk7j2UMRN)g?riXw&iU<m*OBDDp
zwT=_K_RA9#n0k3KAQqI5J)_0tyl_j3dAYcckAXRNi;b~ZIm5&i959F&eZukaBi$b~
z;2iZV?IKFJsej;O-q(J3W!ztV=w#U6KY!L3b~*P_8y;D|D~f@+@KPLhxp<Zvrsld!
z35_^^mK?r2_YxnzyLc8EMmYBp8m3;`?}))(+i#9hY#1@YWtJJ>63`B*s`4Bxv%da@
zBp@G>Zh5VOB%mLXZguU9BtVX2^2TcP`VbmrVRJ2l)GZXUY<Ydh&ZR4nxiGy<<L{q+
z&pA@A@%k(fJ5>bD+v3{ex79!Y=3W<8_HvPpb@_xDXCc^fE@D!)7ts@V>!AAkgL~%%
zLr>HH9h3(Lt^9l7csoJ4d}jzN&t_qh?kxY%MX)YRz#y+^J8AxG?JQfsgRi#l3i?fP
z$M;cW2+49z5b0#`Y5W*WbD!za+I1`ZuXX3XaqlA62=gb+ef*J30?mEAXh4fg52hGx
z0;v~k*oo-KZeNe&$ewrYJ^$wGz}iM!+htQyeD~>3`Axv|r`qBBE~AqeP99)x-T03H
zs8_#UCw@nq>u~e9{e}z3onh~|fpl+)g8cn-h`8}kncjS8f^O(9-C)4-(>H;f5Zx`}
zOvfItokS0PHp4QXdKRarHDgsfupQY2F243Iwl@AFJNqEwWw1Hw)Ed+BrX9=6`^Up!
z1cmKMd4||>pT{xN{;k0@mRcr9i>7y^s)-8)bZ}elWLyGP`;l$K6?gI7zi>E+5xzD3
zP6R2A$9iK~M;=uBNwIF&ryvgR*bTi{bHBqLMlbek4XPDyx(VNrJ?;#%ck!u)6u_^6
z$hzZ!JX!RtQ3QxIzL(FAO$36J_o{+%I|*;}DOY-qKE>66dLQ(md8`TM!UC&qByE6X
z#_<5B$e&#cT%->=i20e_$eoKIm8877;BAKzMMD^YGU*SKhd(;rdhB^G^1X|m8;=Yd
z&wS8N#2BwGIK&N8I{;HhSRnb82Z-7h__kFfU3g{pZa}eq%L1Wu@iF}f1d9?3Vs+!`
zPiBdU0{>bQfK1M<ZfyTv96TX&ZKpHj#I<<*-oNOf0dWp^bwQm7Dh(ftI_j}2ylMh8
zFY+O31&)Ja662%kH_4y`ArIl=cX_N|FINMrrmeV$9q+lpHR(W4Rc`b^h6p-R{yq-4
zM+9F9<?XeZ7u1VtTL=^skMM>WYFv*Pzg#I1-E*T}fpw!T1wC!{mH_~Z`LVkfmNXYQ
zBz#?<oDe>;^kd}mk5%|MR7hFaWoG3i*74N8Pwb04P$S2mU3EP{_xEl}3hdo~vBY8O
z5U;{+&qHn}pkI||l=rVZ=y|T2%5U)g?D)tlA8!ju#-a<B|MA6cmc;NSCRk3V-H*Oe
zXppO|8-7j)($O&$^!M};u}Q|eQ}=RDPC#I%N)uSa+X{@3J+7G=eNFQw-wsIK`Wx|O
z9tT(L-e^n11I0L2$1rpZMC7Mcslda<bV~f|8e9Hf57H9#HG@KNO^rI*@Sv5s%e7#U
zupDu2ek`Zp-c`3Dbqp1-2sJ`I<QF|;E#l9z#iO~e!s4mHyuwm9Sdq~LXF@n3wH+2m
z7w4AP{UvmQ_WHM-m&^04oAdK`_vS}mDfM@^{mXY8j!xd`kBjnrt`Xuir;~AXb2>LZ
zRQm)61_W<kQD~k&)C#VSjNP$hH8+`a25wHh*u)8X(w8NbFSBptZ2zDf(B9$ma&@)G
z1N(*McO>nS=e%Dsq<wVpeb~o2?EO9U!U6;0;y($D{6CJa0mzbGjqdE&wr9t-ZF9%A
zZQHi(9ox2zogLf89l!g(uDadnB;QFo>8_;imz+~|68=F%qlCB?`Sqh*{Uy1tXdbrj
z<t9gOtNgQ7xG(Tv&fHLwHV3nF__gAFge9GbpWmLmES5oga7x{BtyU{3qjo*8)eZWf
zE^nzpsjzKTfUC!=c9;X*Yr*f;!;jq4Z}`n5TKJog*C*mZea_TVxRCI0&l%<mh9Ga(
zXZb-zPvvxIb56Eo{JS}*Ee$ixiBBN2;$%c7`5#y9+@#NN`+aAr&i5o2wP()Uwn!vU
zNW(|V<5ca|D94rIr0)8}{>bC0yF3^s-~age7F512@8D4ZrstN0!NKe4BNJ7oqwmO#
zW|cz!1o+f+C+&G#QGH(FOI0T~K9XMA8u9ehuaEZ*p3j49^dDOpxsrW};dbEu8B;AZ
z*%NH@qxURhcd=A}@FvZ#6=&Jz&bQ;1%V4b7xSWGIaPW(u7f9TXKq7$zyWGA~NBk{S
zDErq+aID=I`pen9IyZAH0*mV^Ys<Uqp^Ni%vIvhmyXf@Z0+1*J%FT7C>Yi=7uA9xX
znvGWLwl!Eq)1S@0(crs1A#`5}eT4?Th1NY7{RLo>I2%3hS{oOEY`*pfHL&hhmsL4o
z4%Qc+Bp|*Lra65-TCb}sR_UNnQRf_M`J*qs&yHE$l7C&9AclVkC1h-MNT{1imzTZs
zaBuK5`+g;~b=dax(S?*_(DG7sHw@jJKq^9TZ}@&0AdyH(M(lyNw7sK|IwdpGxjubi
zU?V@?6vxMtqM0fNS$_A{wHSib(jysUzn_T}Ah^U!(Q_7eZ7ckH4A$GUtz!e+<=M`L
zL(kC<%QR~D6_|)vdM;v?(yRZ$G`&dfqok#h|0=4eXB~-#C!5emvzJU1pYXyI=uEs8
zMX)OD^z}$Ks1jIS-Y!t`H5fU+ng6hbUu{AZfNClz>7JUDsWDDj`4w^ZTdA(D^e+QK
zfx(N4JD`y0`MoE%hR%Z#BG1svUr0(*Q_0CRy$dE8KPCGR?=LAu(OWiS$L9yVMu5+i
zZR`ww%@@RTR4s@7ErvHN@^jmjwcP{kc%x5X!Q@bWA`PE5WH5UO=r+XnSE~c;Ix{&`
zQQO*@NZ2W>>?@_5-(P!W$F+1ca(|^`>ZCeERl9FlC;XG33!0u(_p6GE?N)F=F^4Hf
z=VDLSHqW2yO&z799_Ncy-8Z|N>!=ykPL9MFPq{z83$T8JsCnx3^ys;8$Bh2dTH8E!
z+fiz<eZa6+5>gVXTwULG)F%nER*`96M5z;2AeHf=sF?RFR7J6nhu_FMr@o>0m$bht
zez<k0D#g=Q=Yaeq{vc`K->f)9s+<?^KnAt1%V&bIdU`ii51d#oD76eQ;V)0c#heEl
zKy@Y{Qr(PI)-`x81<wNg8a~vPijCx~Rvi&T>p-@Vb{dYFiax0@rs-Zd)XpbHwC(Lo
zDEf$tn-zs?W*l(STgVin=%ru}d{2cl%F{&fcoh*D()#UY%V?U|EJ{ES@2{%cv?|Fj
zL<LvDDFjZj*+O|lCx^6!dGG!^D7tZSV-3SfiM05}6t(t`I084uG`*RdZFOkDr4aG`
zYQ&{{14_vz=551$*!{qeaW^sDdRk07vNV0WU~KPde^AyBIW-xU6coRomrw5OG$SKb
z+h3rr__op=j_MS+7eTn$Qs3}10|99siLRSgt%4)4LKhqL^}C+B2ZjN;oFV$W!DH&I
zxzdKG<jtWu$vT%YWwWFxgrYKTVn?S>t0Hh^h{qmmkL)Q3chH4In;e$C4ib>3AuBzt
z#}HOF?Lh=caaHtCI04jZYt`e<1qeQb?Ra%{l^vxZG_fT1`>~q_Lc2OBQWdWz9jSGs
z=B}b1=yHHa@T_?E{Pu-4h|Hd3dky^{Ir&5*q?#N{V<J3dyr@2w5LNN>uf&)@vyroF
z{Zqo~*2K;QSl(2`F25+Zq-8_e(oO_@R4lH)$kMKmoHl5F(MQ7CY*tB0!!Jg2a2u-Q
zKUCAeZ9{!xIdD|w*T8D@HywlIgC=~i*ey<zF`($UizbOpm*XvL!Rw4Q^(4Hb^_P>L
zTZstZ$W!^#U0_+psTB|79IW}CD_d_~t{-neL4&N;gIcjdGWZ|Gw!Q3w%AyN@eD<c9
zAQa4BMDhHnW1S2f5D_k?oiMoiUO@B#hv|wt=gDxVY?sHrsSYMk$yFEfD`nNWrJ<n<
zt#7VxZ-?6(Y*fbF(;YD_y(s*rr=g*x{9EI<PIIq@GkPi-MrSi{%x?O6@HeD=%)zrI
z)cEWB6_?`DON7ME{zcaKnEt_6qHZgPDj{ykxz!w*wm$O0Zx@%38@trr;`G6+s9n4z
zxFhzDjJxev9%`gJ9BSkRgL5X#na?O+%4Y`}b_CO!q*=;vpQ7hF#37AvicFXZHR|pZ
z_ZlRQph61j0RXuzf*pCc=+Vtfz@RYypy-p&S1_JF`#s#P>Z1<_>&&SeCkp)g^<97e
z-B5$_SXZ6;&dQI_s{aK2a)=pf3wp2EM^7b$WSfIH577Fl@CyA32!*19zeEUW$vwJQ
zt7V{qYQ;oDwqGXra~Ox;BFgX>adXK#OsCg818ZrkSt68UKGM;`OxGJ$_a)r|@Zuxz
zX@K~G@z}n7$JWsvac#|J%*V+#9_8Y05D(So*dAZ%6x)yXW?Qwgp<^Qax?M8OfgGvB
zvR_U5PV(iccKpkNmLn2)_YbExQ2yYbT=lD!4kpTbmIFNGQ=}`%TAMQ`yk8I1%}IY9
z^$`OZP~hlfYO2({nQ+rPR9^?f+QzS)GL-iI22}h01{Mds8_;V|9&~fO=MAG*?Ohy~
zR1<GX{b-cy-y+PgdNO4@adP8~-mL+o>%T?5*v5zH(b(B8*&X4Yn*ruG_SYVWcD+mn
z1}QM9JsR@5r6$U6L(|c`8sve?_*Y|fCtudQ(fN&X9l8c3s$XVl9*ywrJAlac`M^Q`
z^Vt@h!R*Mte0RjlMo`8M@uhUUTpftG3~4iQ^oa}3uu5~TynbbB>BD#Oclaq}>_1?D
zBn(d^rANbc_{DI0+>2IKwn0POOJbT&&-D#jw*_Wz;^3EmHL!~R{AqlKy*93Z69xrs
zv~CTKche}B@0qksI~L{BhXkNJoQ%5TTj2@HSv1<05&k7Su)KlA#iaCiWHQ|bde&+(
z{kGT2LH%l2-J0Ez&iQOAfPqAgE)%qzg<+Iu2Y@w;+4o5dmDdJCFt=ZmF?j%D{k*_)
zz~O-=ZW1L-m6IWGzXsFT!Wzq74pJ&P)`mF(e%E4o*8F3g6e9n&$Ww^@?RdzB3091y
z*a#(VZfU;c4+h-Z`3BAiYd#yxkohl%MH0uTU<DzdxchJ-Ibq`iHWz<|KI5?&>Te%r
ztG#Z9?4aU-W~`$rXLi=cctne%MA{9e@AA7oR%i+$ZIm2<dGeqrHO8FTh~azMFyb9x
zrv6r8Dm%N$GF%d8C{&$|LEP_^cXB;m!*_&#0)p;~g^OQAW>cH(*WS^mt}_aM;mwmQ
z9>Yr{Gg&vUorAPT$9I?V;M8Xu9yG&E;Z*W%G+DRIpt%&l37Wxv##shXin0wf?bV45
zNpHitPP}#Ucdi2l(b%N`rYQVGd!_=bRD0@@06S>^(2$$d+k3rJKWP8d5CPh~qn;g5
z6uoU_57EJ?t|GmZ+xyyz17HL?8|mJWu9ETpgY^FeQBn9M1{I_|JYSNj_Kvm`SWxt?
zm*%7ZY$$r~j(GmXA+ssBo+#T?dk<tE($N0NA#;hh_ZpN6gHy&-=^^?wdsAdSfg$cP
z9-iA{z}ntP#fC7X-FlPyM276p?8%V%B!@60--1Bv)9>*mp2@O6<&E(Mf2;aKD%F|x
za_hfI_Nip+e^P$zAojhkKrO8#Hb`}Am^{58iDP2oQUE|g7FtHapsoe6bhR$Ug8HHv
zI~&)33_L*<!$v=$`fv=Mb{cJg-H2i};ld(y;~ToDHuQkmi(~2jt`6Icg(|j=>VT#f
z#?n=u7Q2;$DyE4#Lh|7oJPk6E4^k(NsihFdEU)gxGj!o<$ecr-6<(LRN6iVlWlqZ9
zjrK>pJt7qt@E7&qnozifgsLNYh4#bTqmlFr@{hhv1tysz^+p|10Mw!CNME4|gaB4R
zGgPQLl2_<2u0j1@!0ZHj*1+ufLI5pblD{b60%)Z`^Z}pr%@RT=#2F5>_J-1zxJUi?
zF}Rle0p?ER0}3q69eI}Q6}nc!gAI7I1yx7<3cVe98w)i4hdc|!-HvhK2d0RJ*aOR=
z7Y3LC(X)|yLxFH!A!|t=V0FUn0l*ulkhR2*^okE;x5N*ya$+6Hdr;bQmA&wPF-tFA
zG46bcFJ@M5_PF?2mX6kW3x+l5!3()GTUoC^MKi8$#yU?TH`1TD*c9dzDQ*37{^<36
zT=e$5Ix==@#oxUrI8$Tp!<l-)y(<s|PkKU-2*4xeRV*zzlYeYey<Z7hdw;Hsj*j|0
zujyYzZss9_BaH7#bAQ7xX>n(;C>GnY==t^-eYw|(BxMAg>^}bJ<>-v-@*ZNyU7WKh
ze{9;QkPp65@(lvFXX;yU)p6$iVE^qg)^ID6SK+sbdgYSI5NL(9JNtn3rUCa-lfTFy
zNdCkmD17`hfpwh~;nGtY$`0a%Qr~uN9~yP~H(4qWHd8cL>9YpLxP1_dWj3ExD7aZJ
zv4l}ww<v|NwXhHUEUUh~bmxx(l-*h6CONI0r!Dm6HsH5|496q1PtbJj@U+_nrVY_m
z$?@r5h(TBP+!f){IODg!v2UUCbaBQj=J;cs^aUPq#s$%i(vzW)KFsZyD25;946}10
zC9pkjnzhgrj2z`zKbVWz&d}Of@EKyT-fT)PaxgmS{^WP5=EJ87TEHQWH5R>9GjV@}
zS073}X-naUlq9GLgv)kdkw+L<?TvZvb7`<|y<w-=z>#*nJEQeIw^J}DO|&C?4H|KN
zQAK?wVQlP0x`9ksly%raZ$$D#HajD^fo9v$rNGm6@{OwG7L-?#<t-5SV1e@Iohtd@
z`3&v9?v_`?Z?F(;)@PdLxOFp{{T7JA`b`)9uJo9=hu9diL7}uD{$%LkJvyIX#@kuP
z6vn&osNd<Y9Gr=5FrG7>Lzi=ey%L`jyj;e7cgArG7Ha?dn7g<o9;eHhrFi9Yba8Xi
z;wd>*>C>SZhjaC3@NMFN6iqy8!T6efX%=#(8@BEe!7pCReIYbO@r7E=hxaob*u+{c
zy6*jwCcc5PQTimg8718aqaM&Kl#&J7tgU=ToH3@C$?$ODjGMbCx4fG>{HuztTY!OF
z*Z6W;G%SfMFENSiEfi`ns9MNWDO8*>%N<;5Hnw7DM1+i|!-lCyq#n}(UQF@&yyu(V
z!1GNA>UIuk7`p-IMzG{PBwy2Q$Q5@vt3a5a#N7PnU&Sxf9E)=dg-5E<SQ4+dpYjSR
zNlZM)jQ)JIQc)mC5Nc(5q{q4<6k8b|@{f!alLOc?qd4O_mvu}xzHdk=<Ix(YQ$J4~
zpFl2+b$*t1L#=ubqt+0#-&P;YM;t^FAGmDSGaYjRX}8ki&KXq9#iE_L;2PfRjC8!f
z(}(&?3ELn2h|^KCMoP2Sf~_N!5Z%nc>g?u)D7dEiMJW#2u{e#&t<L#JYs;a}B)sbp
zT|f`oAqg;<;v~*&A5(i`&hi)xqRhBmk9K203WrJ$H}ywPD!v*^KcuXX<49;K{tmC7
z4w?LcODdf_6+y-uV|3<9xmH1YEM3CmG(j4;s}s_Dz!u7<k2v0OJ#^1=KZM+NJmkoA
zJG{XM{$4R9{AS|uUh~HAU$27=Vm#5xA-x@oS=G=3c(%~Pd3{@Um8_ZRZYyyntz(y#
z+?Pj|gL0<x{lUcJEwxGZw-pakEg8AoZ<JUnXUzTKfa|iP;Ft}}o;2d(jHhD|6Z5=|
zGd`7nWQ|HQQc(+KRuJ2#DZ+Q)G=_v6Lt2(lMBg$G3=rW7Cg;o4j$69uM!D%4ana-}
z+(W>weQ@`jo!EMtaF1>MWAjB(73ZJ)IFrcu`Jz`Sx23zJRq;`><Yhnqj(ogteNQ_E
zVni1#&1fD+JW#<eF#=~_V{;f#$IE5F<U~Q*jKzDU^E;n?5oKR+&($#sS3^0MW@f^3
zPlzY?Sa%#^speG@gNxusT0U#y0Oaf(t%NFxlVsl)AfTTks>d6dh*r|(yokpYDa)cQ
zKZgmamK6^j7l?)@R;J55#`{nZq5egdNgr>{{VtOvIsQ1=uD6{dN5GJuNH8YMvC~>a
zFm}K)T`qblLS>{b-HJ|~pj{C!ke;omxFe)_&tt}QAV-Bo(GYRu&t2)c7cH~;i)Lab
zb(oFsBNM!Rnow^S@0%EWjIFV=s4?^~a(YulsisrbQL`%Vnv<LWQ?-cDY|gSeI3k{X
z`&y;WaoU5CqiIocu7*)w9gBs6cm8M2u;bS5wz;*3cKrb_KoaagRZISsJ^NsOK0p1O
zfM&&xW+f6^vPD~d1-?>VunrbNe7<k&7=6xr!ARg&pFYpQSykTCV<>~rjX^E777vIV
ztjpq}vmspT4AukI3E${QGNnQ49d6nvm*g*Ib40r1>N1~s)wt<W{D6T0SrhsLLGt#p
z@y()7i~%#G!0ufShIyU+csBM+qlXjL2%0?1LhFOMv;HCRt212aC|2URf-L$a9gq>+
z%!a$TO;WN9_{HfA#N_WW!wCL~U&hc_d+qW{3oHevHj4hk&9+#c7T!8f`$x(bM_Njv
zZKe3_@@QMO*pRjLQLYOh30_%V5ZJc`N)KfUppa?7QTFOSRYk2vFZ2nZ5a_xYqnu7)
zpc#jA7I5VL@^vQU0duX=W{d|Zu2_byvDp{(Nm%97v70(u5oe>arFW5c{|3mb6z*#T
zeh3}5q`v(2Dc`rJxvse_Q#_~>K(zuh96a7djvj*9Wo*e{?PS_k7T#2O>t;{gu9^GC
zTdw_jdde$_TbG|Xeb`Vqq>R(3=_Og+?J3G4om+5sur9$C_MqS`ZixejJKsd;5wn4P
zUNCpt>J-K#EOv~o+oV`cP6)E7fgwJM0N6S9z*-A5Znuc9({!9w%0|pQdoT$=&SUew
znj>)00&Dw=K+A9HkHWaIjN>6-V}?~XYFy0Z;p1!uecNtVw6^^-MAS51mj1kDho--I
z+(f*tCoJ$$I{ak)-D0+-sraoUpYQwQfsy|mplj^2{!yj({&f46@`)qMp+$$<EdpjR
zX)w`RDE4-ETe1aE{fbbE#*%DbvvcDapuGGR8&EVskwJl=d3F@c>Az;U=y~3N?DF)>
znOCL_E{`8m$R3qE+=|)WcqwlL0oipp%#GPTe<@_#xoHtF8^L17#QybYq0PH(y|aUl
ziMQk|?DZ57Jzes&aT64N0J<C1|8{@fAl);1FJUmQM8}*?;0E`#!ORXxQ|?GTn*K|d
z!MczHu!GS!wq52l7NLuJ+(eJVfq>n@h*2)|)4~W2;;g}#SySE)hFK9eqK{sShB_Ct
zxz+G+!1`d<gO!(y)AEQ`cI27B|3;S79p%+K*cszhjRbSWLzCo{nS|pzy~7+NzOmDZ
z92zH&g+y2}A%b_M#l~@~zC9^c_EV^0fNi{1yY!;7%C=28BeIYYELV}4XEc`Huk=$Z
z`%eu(5Lr<qBojKsgc8xedZS^OdxerimJ(DPKfV9bbJP1If10#D)gDto;MhOMnefDQ
zj_YMpfcOLv4M2%;jupcQ5;W==2-fa-!X1Uhb@a^?<q=dIq255|W^)r!4)keiaqpl>
zIvN?>u~^Z8hGVUUG8CaM47_xC`Rv*-5XKRn)xUQ0VRH6D!$!*#tf1osMCzp>K+F63
zc@6K&de6JT`dd>r|GwJ~k~ROxN&IT4JV8JE$D=}(no0cgm5<3^jK>s%>I%H5!YCQ=
z6HVe5Bi*hQ$13hjgU>HQ%vF@TUy0H}oX&C^;G*^O_H1mpf4^lFH`s`h`D#OwiK$8-
zSe=b@>PE{$<IdI6@<wBFyx?hs`7P<Ik3D<+?c0ib+yQ-t@P|<=ISw<FIuIo%rS#(d
zAiobAqM<ItxMB=~kTdcVI(o&#byJ(pNcy3S$b;YO_&3#;el=zJsm)@gSu#e2S+NU}
z_-<W?l@E|Z7_jvmF(%FOVHb6X(ibS+F|&P|mREbS+6ouGcT7=sB<Um&gh@T?q2-(P
zakN9}MGE)``*gx`N42J=O^LBe`=((ylryn4sza?SEy*decG`^38}W6wc`H}WvM0Hy
z+n~owf_aEGJVp?h#uL0#Af8!!fWqyvN9PxA8yj&yT@%pktfv0Eo7C5%Ck%Md?D!YN
zEnGB<X_YfRz~$F;HkK)C^OiwL5;XTq?AccqtwW=w+4%`%MafRyerEp>LB61AS<$RF
z^>odP%ao$PQh}sdCfV|r`M8+%8Ha607eLUdX=np|;hOz?G8@WmZ#|dQjl<nR$DJ{r
z`<9f26#In1?yBCz`g|hK)4FhNB?J9(KnM4mOw7M&k3)b>ufKbXhy&iN2k-ZgL+=h_
zoIi)0!xGVjp+;3UL<z-dO?bKC0<RisA-_wicB~^PnF=4!39YWKx*l?datHhfr*)K2
zqG=MLtHEhe;EURWk72K+(YhrC7~$xNhCRw>R%#tKu)~%Ag4{qXJS7fVQUK?w200mn
zkxa8jqkl<p*yLjz=6iw?%I@*c8Rl5RHpU-f3QpXh%2%twM1*JH_pRMt^ED=(f!aeK
zZXcF(z+LNAD@lWeoy8CWllX&ICXN^BPMSfg`MKz_H-b=VVeio7AKX&CpQd7$R4M39
zne^Ght;_oCbXAazb=Omk`8=lbe=wt_m6YZf%|zn4epElgf#R$<&BB>7|08PY=(Jsg
znHJl-^_0tg_p$}!q!2&zvWLrai_X_CGbP37l$(;2OM(}}Xu(O)Y7eHWm1J)D4VPK%
zYG!zQ&g}J%)}kHS`QHHKE_E}`7qYu=Mlq~l0#%A5*ACTv3Z{8q8XB3IsJ4uK^WDwP
z#Liz!Cprix+aFGByI&zC>c1D{zF=OvEa_>qi+}gtUR1keMA{cHI7Xw8q`9N=vW;}h
zunBR5X0ZvC`wN|?uF3RhT;hs3S0g?JwH1^Q`MAo!h#EE9@!)x>w|(+RQ+KkPFkQ%)
zs4)e#3(O(eCWF&bi}8?)X-h6_CnQHQ{6g)^ofys^c5<o7tVsCM{6&UzrvD~1s7*AS
z>f6h}kb;unT%aX53FBP5+EBB~mbknZp0B;Uw?GojU9tv0XHHvXAbQEQVnoV{J_t|B
zbjnhRfxg9w;=d!!v;tb9nJG2LMfg~b1~ZW@HHZ93#KecbPaUiAvu%H#@J(VEUe6#*
zv{oJwJu&|hQPZ(<`t^ir>xQ@5<>~|X`teb%Q+KOEu}n)sR;}We#R*^YzE_zt8EtYn
z+608^cd=T9evGJ_x-JwkKMzUXsA8LhV^5Y!p7b}&2nsww>h9iry>w6D%Nh2{?P+9P
zZ5a9e{z;}0%f}~61u-#SCKdZl+e>-aEK5b<!{zdPW+G&0%QEWhfW0$~H>c8s`80mW
z-z#pHpbP_``i$|fX0I%aC!ZJ20fTc^aUWZmAu&2UugF~;<jT=RT6x`QE7|LLxiJ<_
zbYpqwiAqWNS=pettSn|O71xNa%Vd_2M{#m<yK~4$u41INa&#QKkpdPX=~nq`Oz|f6
z6Iqsad&3(PgBvdc`<{{f9-<|nyVma#qH-(SiaPQi{Aoj@8lhZ*{YHVWigI@Jj)*QI
zjV@wS?Z1?K&4;|jC&n``EQa+t8zoLU=3ym2vyM?q;v9_bUGTLQei?d*A{~PqhFbn-
zRiEJ}t|K#Wr~8*oCzUq>M`l=|DR^T1$&~DYPa;F;*yYpB8>!?KdQ0|*)zhLJoEo<F
z-XuR`ftqFu=DX)2W3A+_k>P0CtdHu|d1C)b;{FYpv0$S8j$%#NYDkqksiC!F6VKOE
z-K3%nAMV)vYk!V(r}{o7`u9++!E;<r<+g5l9&8)A;|-^!L%K4t$M_`j+z%Rg4NDQd
zA#;iM42~~4tmiiUGv%c4z0x%_SUk}|nTU{bR_K=>IzD7VuUfEF(El>RV#34)Dv108
z6WXN4iP2sKvch6QYgZBoLUKrmbDMP=Rtm8xg`3z8>2!TjutVH1VXnkHr7bW)vozK*
zwSQJQ(kMaO#=SgUWU^2boCXIX$}k!%^J<IZbR%PhnzHZZ^Gn7=vfkFcT7Sj%_V+QJ
zy+_X=q8tob2xrZAM&f$(#%jj!#Kh2uC6r9MJwq&+yee3<S{jjSn)M_X@4+=v4ol%Q
zAR{w)NR$$F8zSTVdBsM+U(<P8L5_|=`(YwlEh8c>46X{}H1c0W(x$I@h1j*Y<v6$m
zEi-e-VXaGrDR$l;VhX~&nY~u!@>yXr!?OuVuHoU)opV~W-Xq%Ii*we9yn@a>xFZUG
zUZK<;6t>Uv9Tt)Pdz)~Fk~R%67E~Y>Zm#Y;z3IJg%L1yxsoxBGD>I~b^dK==|5oh<
z74_da1iCTt<IvAT4d*%pYU0TjQZQkn)i0-&v?Y4dkV%6lVoOa0Z5h^*$ToL)Rjg43
zMiFTE!F#XQXEWDjGyWtS=Vtn*E3$8xTQQ!4mr!o)$#=KujZUYToyDv~vm;bYOr$w`
zAZJKnEX6US?pOTt#_)97u)%iLE4+gX-rV_f!zk8mpRwp^9gj_*sWNwXP)EeMTTM%2
z@sj@RBKM#L%J|`$#bvR8r;;Z|%FaMP^fny7ZpKYdUBvr0mo|>Z(5nE8$vMO^{xR6E
zX1^h%_+Kf@_Pu$2BB>A$p^ov9-gixeG*BMxQ(&%|=IgJuZg~%#o5SXW4lw_CnG=lx
zUN{C>g++va^xFJ?N}pYoBNuhr3=dK37B#37KUaU%kG;I2OheV8QmM#it*9?^I}ab3
zCzx#sM`?4?ocZm})X{E<iN^7;aS;GGQ_p>*We6V{gt0C_n+PGGuN_%F(=0-8L5Qy*
zD)CQW`43Xm0_J0wT}C^*85PxH5G*tuKfIuwcmi}%p_#)3M-PXmd^iIM<j08`H#C@+
z^O81j9G6+|ZBTe|H`T^wAhM8<>@_Gdx$<eu#`p;CMzbl0Ec}fF3^;=fOSE-`@(x_H
z51wvCd3C{Ecns!eQQ#&p-EeGYbVPQUBPsdX!*V<(ZY5$E`q60>*o+2OGpx$0F39)z
zm-v1M8BecUa@w)G2D_?KYBD)G<WH7;_uMJ2d1d3%_*E&Ft9+?%-PS3!8MA$`(zRCZ
zOPqlG5fVjW<mfd4N%LN?^}_Z;!&+-rsVjqbpuu^u=B>UA%~wRrPrpttrPEMFjG|{<
zI`Ij?^{=ZfD>KCQwiP%@&oVMCZQZR`7Tj)3Cj{4R+PmKN4M7?zyy{=z*lUov`|Pn0
z>GIjGO)>ZAUqZ*}7IwXx2)BUkvWaswC?N@qqK}mSR3+KYB6GO-7x=xgIrK_0rxZ*`
za^{nxmPT*vYd!O0cz@+tE^D5?^Le19PtN7)XaiA>{em1_;FlbJBhPevk*Jq}(iRR*
zod0u(@Q^^5OeTt)ikgWHgzaWDTLt-#eddS<M?~0pzBdI4hrYydVi;2Jk?kD=a2k<z
zhSDIPQ*hqJ`K>H=53fkW-UuATmI8}S&)S#7ovv3ODlormj!q<vo4s_cE9x^Y`vyl-
zZLGOkfacZR6}%fFJ%|Px(_@&=MhwL`4s}D^9@4>%^iZ61n+bbS6Z1+4!~qMss2=%n
zD2`Ech(mW8)K+;1n=T@l%nw5w{CoQR>ySHtrNjoqSx;4rdhd8z*m09$dg@N`hG;(Q
zSA<pdY9$<V4n*=w=E#ccgdb%FvohzxEO}=bnH9JVqiGY+jOUuu5p#`=?C~1jX0oWo
z3MfH-9TUiz2)TFmOB$%dug)?{&_e+<(`n|<b}Dalw<cN@dQ&UJ+o!Do;FMO;B@VfN
z59@y9Z?{B_EoK<gdi_Nch$FJD{KpORM--I@)OL|`KPATqI>3-=#bZv<3ydIyA=63!
z*L4A=hmIVdz6iSC{E2M310s7n3JL(`d^OG~{Mw%<U8mI?5y&k)o3M)$n@PzGQm>|V
z&Q8z%8#ox>@K!zFSMbH#1&&piTi6!!r`J?6!tczI#JmS7&4k-_MIutxMq61Bru1VV
zuS>x{0~I5Nf2s)^1glZk-=M62P@0CF1qfQucB27R=0fc?LfxG6Xp5k+#6G%0oG${Y
zr(vPeVvw0PUBpfRFa0>CfIc`3k>s$x*e==D)1Sf0Xe}~Fa?trPyV;iGOI1#>mtT6P
z2Rv;FKR``-!@4nv%|T2~;xF%yBtP%{e7LqZ;GpQ#K^M6CeEDzqv%45~H%{Ee6kSN%
z&Xy{%C;1XB4E#vjwU(T8qN`!{A7Y>#Oi<Y8AkKYLu0O#qKQnbZUr;7(m#esRia^|n
zcVTFVEL1-*5LRHTC)7QNdLZO?g-awLpS$~mL>bE<+oN!0aC~4?Frs^wqj|;j4*fgz
zM__slM>=Ed9jow%Ewj|07|qhr%J(`usBP>n<hODP<MFvyyIrJ%6c%@%pnA>9OB;=G
z2vlnRA@ih3v-KbV>4W5tZZ7-HUR50!Nc5VLeI%vcXci)a-3vdx0^;v+&_m1dZyEJg
z9vjp<6$|U>x%x1(HS6S=-`v^vz3ba*M0+~LLqyy3rbPsi1{?mA8+TIhE^Trb*bv7N
zT$uX>l<ehU=hDwT4SJ6RJ&eS^xj7#PQd|Dr(D67<ZRJ{MKT(g{W_Ljcg9eCI?3y~D
zcLFEV-z&6+(y%3k@yZEIf&&88dosvhd1g2`8{2WVq>_8+?N=<nS5=?a)8e7_i|1d|
zs1S5P(#UqNplSyp_~Eq4PMjQn;=rXuWUkltu{CQMk6a}c9uf12s_{SXJL)YWOfEJV
z^oU<K?6zz9Y^Q<5wE*C&0;<4=9>T0-9Spbhl0~eCvpKs>1J+-$aU(;-y7>FkVed?T
z_gtDXn1?Kv-6sj-ymT^vym_7(D#A~%0r<$BEaa3zN%{}(#aM$;y&(LHvx^V$aXKT+
z$nophu2#+q&IQ|*`>_taP#-ASn0MeXRI4z=kkUpQG7Sc>-=6Y_LaCt}xFf30?x#lZ
z4UObPA~aQMh4jw?F`Ffj(U;;?s>3UrPQneIP&&0OT<aP2S!}_W=ot0uZ8poZ-_&g;
zS6pjXSbW|+k1*}Ql2$K7NbkELxmwSHp~9Yt7hXkNhe0`h+s(hy$|#ZIqlmdkn*2$i
zOhSkpO#A;u78tiktW@GIX`(ENCKWE4xJ{fWMXV$mCR%28{PHNc&PHfl{NcmTQe8`w
zk2o@A$w@}pb-Dl`J9}?Fz$ZJ2h&n4;gdgiptsCJyDRg`pe2_LvvtJ`y#oilvjGDd`
z{(2_VBV~&xDgU0nE#g;~OPU#Lm@)WJg``+D_S6x-yB-MM$k1%d+JtQ)Xe^b{a<erm
zD#(UIQD=|6t;|GQ2zerq_Ee$b_qF=hp??|vIpj%(?arY&$7k}}WlpQg+TS#GT{wA$
zoA(N3hpUh(6KbnlO$kK;<<`ZNvAA}Usuxc(+aP7|odVvLYKPYxP47Pb1U*!N%?!_e
z{!q6wO{SW#F)5SuV;uxG^C4b){BkYipEI`#>}@oqE{ofw^d$w`CWGl6-wM2(^l58S
zVGYixcW0aULZ0Jv!WEf_H|u54k}v)bjVv<y<eG<!%yBNncg@*@9u?nQZ-4<^W;B-A
z%@RUVM}5kuKucdd$XFaR=<tbk%TSZ#zZke=XG~MQj)b78VQYQ4Qs(m?dc0-j6vC}q
zY?pVw2;RSKE8(U))>aouy7I)k{(9qtKc5-U4xP_TdQrx=mT<J7k~~ttUe(P9YcDNf
zT1s1GvN~JQm8}DAw#xvx?@n1p5^`0Kzn!y<)f;WuMrWImb%YBUx2C$^>JCR=R%=rJ
zW~M+Wdha~pc~*^KCE6UAt^EZEtXy($Rmbjg6kN42XX%>Ty5-ur;j?KgcFZ`*<MxFF
zEFsW&i7MST)ST8ERU|p_^fwlsyz#g6;pvbHL6-~w<~`6s#NvhwGJ@{N9{!jwnL?k4
z)H`19UDN%6)J_c+8kX7;fqdE>zT(&vnF*$dKzDFG+-9uH-4PxTi2M!D0UAe)Z-4c{
z?Mn|n2WmTtsp{duMQ*p51I-4e;0Rp79#8#w91<(v{}<1CkQ2hbt=*R9I(Te&vxbsu
zzA-n+TGg0ItKiIbL-=niF=Z*L2XC-201`#)!i;$b2~Q0@p#`*iis`<%LEUzsX~#|U
z66uakZMbs=5D6rH>Y@}CerqeQu_s)YjfzfHUMOSDO#TCH<$G<0=Pu0K3+~S|?7z#f
zqM7MOLAVkFI0NHC|62-ls%~Q*Ed;BrN&QZ{0=bFKq5L#y3N;yAAM{-O`EqlVd~p`J
zvx48pv%5iG^<uQrTfZf{8ltW%j9BTzp{9Gm-GTEJ|BcR9(nQ=~MRXwA329ElgxnXk
zl|w5kc{5=(XMl$Y7fadb&T?dPH~h1GwKh4E`T7NFDqX~mOrwR&v*9S1(1ZBH6wpDF
z$_Nqo?9C0gtuMEq9>h4swzM(;xm4L-XnZE<dVQpvxkfE}(()`r!Ro85cWhCpD}bz5
z9~dm<HtjM(En(k<*xUf!*N64U8)O=M8;=EbFC_)imaH&uuusA06V?GIN89M)AbnJ0
zSS`OoKPHs|xayv$WiIy-o!3{R6v$ZR3e~SHF=W0ryf5LAQ8srrHCM=MMPk|-BDDR3
ztvjPnsn9`Ivt`><NrJoG+^V50>n9v9s5UnA@k%V~F=%@vnAKPn48wvd`i7O1mld-!
zXK&o!2XL9m);MD9EVIub`*=)FDVDiC?UuT0lEbD_p~BILBc)<AHa~i3q_J+zmEU;h
z+bWO&QQ%-xbKze5NhN~|p!`0yRUnG)tJ3^nZS+{2Vujg|j8rJ$y032lfKL0IqMc<g
z@U&g@{y?Ag4M@wN*e(!G*NZNI3s?+}*x(&f?eIIv_52Ns)Dlg{3P$a6Rfn)k2kkJ#
zcHIj2Azs6cY6Vgpr9B6x5fKoF^TD3a%6A8ivVk1)qx93vnRd=@gV?O~eVDbLkvC*c
zk2^b!PkZ_Fdyp@KUo8Ey#0uK#AeE)1uuREYEtkt`o4k{vsXTEyOFSvu;Yw3pMCt2s
z^c#K2YOtAO<l`ab2e?^hQtB_spaB^$A#IN&Fgkv`7J?F-{M6j#QG#QaD4AB?;3Zo<
zQD$x@+_H4n_E@!Keu)IV?$2$vvt&&%W;+AdH10y1{kxOQa@tBo8-g4Oya^S)^8oVA
zkIuxmXhqy#6{O=UEK4iBPRW<}O+ks(H-`-~mnh$9jkVwii#wpN%?oHp_Nx<-qDvQn
z$|Rc3!KsOsaxJfj^0v<s_kqe9K6y3OdBIvdhP3k-HMrn9(_k<7?=M&XAjRYQbQ0%W
zN!aHg)pF_g4M|zTm{Q*$=l0X7_R$uIJI;S<EhYawV60(_Sab#r`XVg_@D^+cNilrS
zR?&EUpyRiLryfO@y5mw+a%sL@hu-t((0@Kj;@%kc*7WWO>tBVGzS(NbRUVXXGPOOd
z)ervzJOs+cSIj}$h11ZU|777n<P5ToplcWLu1(HQz5`P>CX>T@F$`DI0~4sBCg*2J
zPab?6uFxu%<GA00fZk8Bh6A%hJzb<c4Q*EVWxJVRJT}u{=6UYF-h~0Azj5vX1!Lt!
zuOLE;%a9~Fq;f(|ViaLcT9_c6CWVR`o|q=Z&XCA~6t%<PFOdGj+g$Ucdj4xlt&0Al
z2gBZaMEV+iV6CNBE8#rVv_>`hS+B<3G57td=iArpn2~3r7I@IV1o@bM$A5Ca-`((?
z<m6}DR)o_ImKxo-<v*oEJxt6^!=uklf7a^MtbPO++CFLx`h5|&{H=1j^69iBeWqi`
zJmP!_fAmhI722sRQUrGj4l;Qj8{?o_dhV;Z(eY2>KT4lTLHyjED({vnrL{r&8&Wk)
z_Pt$6U$-{hZKR)}o;Z2&(lqmx_R418_ePCLJ6mR>t>)(Rz`}9rY&teWxIPRvPc>=I
z&#G}-(k#VDmE&jse8W8Wj0=eG37;08LzRj$*FKW)FPpNz)MawKVN!|c^>e(e^W|-3
z`EYmTbve{?{1JXTb{YS<pKnd1_)%s}r1Md0?L9$gMb5M)*a*n5F6aX&%mylCL9C(~
zQ-(zAkIiVA5K8hu>Uq92d3*SUDhRD>vLAko+$GNJaAxJGehMx1_v*@NfA#dU-R4>E
zCXWWXcsVVl`BHGrdFSVQ)>4W0Az&&v=@L@r9S^4Nb@FTKoOky2wg0i4nkRY1H-0+!
zX#&e2g0MdkSGf`nXsXfvdi4DY#FynGD&yZJ6zCP@j~)?_rne)LOuzPiQTgpoh*}uX
zO(7~0nDob4CG5<3lM)(hv%!QC`;kKBu7IhHl#v4omORFS)>70*%0HfH!sPzK9U3!I
z=^Ne4v{XWmxI-<TVPJqTT7(}>jx@M80Qur)EIh@|rX@0cpoWyIoS3WJpn*&}y(yaE
zRZaDR+!1?BQWgx9U}F(ZfCY?y`}yGSrbJgwC%u9QsgtxpYsk3~mtaeWHav(I2o$(_
zi|O5KByf0alc4dN5XB@$hW9CCICt-+ElSKneU#YNH1;mo<}VUy!;a#=XKwe8o3r9V
zj%}>mo6lz9l5_ve?45rEZG|lm;VfEU@bAZ>?!#Biza8hvEy9NGV4}ZJBFRD=Br396
zaE2r=!W{9Zz5l2)T0uXF=wsWrXS)#Y3MtwWFQ4n%mK=>Sb*o!4b&!JjO8V_71>=uO
zR%szcO|c#8A!XFC_u@)IvAKkXvl=nRzHilQ10-|YjC7E7{A|C7M6&adPWy+wwF?{m
zQ@C@JFnR>Euz~@bwFKA3rFD3dBrU2oew1BYso#Uz5b9)@c=3Ye+=GHT7Vs8eO8yB0
z@0%Q|CkG!$z9%=lNoNQ7z}by3Ck<(B;h2B($olKrX}t%bwP<TkGnTpQA09#5)<}0%
zk8PaUs@01i0h4>SFS@pwef#{LoypZT>=qqnhi6@5msU@hO9X1v{+Fn)A-1kfNzp*B
zUd%EPA?Jdi<f9@S-*pcgmPEO%PGAEvQ#&f&ih?>@7u?xSE{YhG7-NvP(4K5b%3&~E
zG4@pk%EJ%T(A^g<EcgThbfvVpy}M=)K`WP>8y6m&zkhIA|HQU<XsWzBNmTiE1lPED
zZEPrAk$06RdTP(w8$Y+Q$M+P%!!MsZOt=XxN0tHWYHfk?E3Hka>tFBUwTq+r`SjA_
z_OT0t`L>&V6=>(Pcns9>iQme*mBvm{C_XG^##UySZNeOE;%`pub9hLxaz-P+vqh{X
zj5kR4EWC4=LUiyY7mG<0A-Dj=dzb|!><vonhq1w!nv|izh!7MbEp3~OmqJpeoq*bf
z86Mjpv7m`OtQXEhnKBegSCQbM3$j0zakaW*Pezrb36aNFlEcc5{s|<0*F4ar39;}c
z2>tUxc6Cd8cE*d|T^S|Z!XOwh;lW#{R2?SU{-~QkPg6W_HITvr-mgWDh5>L<ZN|Ml
z^a)c27}@sIRDvQ^c2bI^x%(HIaFg{E6&B8g((cQ+K~x7?tvz#EbjrG$OS-!F+ll6%
z#dY04m1Mocg`x5{mh+g!MR=w|cQ;K{%~3hF{AF$uzU=8sr$!}F71^Y}n8)gR@kWU0
zEL`bd_h%_<uq{4PUz$e~(&gR!)~SDZ3NtqGn={_Me76STL@iY6jeW~wBF+YFa&7$V
zx=cf6{^dAUf1adeTU%-Uev3VcTQ!DeDs4c!At+jV`#8+Xl3?}KanXt9090FV=ZXdO
zUwhE5OfiIjdbylQ)^Ytk`7{0-J^s--lDa|1iriFctxbiuf%uO#>Jgc@F=fJ~VW0uS
zm$<ig@umQ_`}kEnSTCopOXBu+H&?}A)S>ef!~G=LU5qnTBbpg_zOI$=>n-}=uaG`6
zUU9(R9jacVfoBWe(|A`2Ch4C>GidUEV@eb@@OCMbdwLo?m<4csoO_UH<iq-yQZ54v
zL7gRD1@iM-T1yTYjcrp<Zi<BnF}$iZU5X9zG-}L8vsU)gTdtwmIa*=`r}I&g*|b-C
zeVE{$dKF30kw+3OM}zsVzxm2Hk4dOt2d8q7j)b$iQ8cDvP(uyFvT5XqmWZx3nsur}
zHmy)G!dx6FJB&O}kq9O#zl!3g*w9$E8jlgQ+TPw{Wnbdo*|oYnyV^FdZ5_C&GEw2d
z&&PD@eGiLB@gOEwR{Ogrby43)qGKJ9Xxswm4ovsiMg?5^N{X}!2KsCEW?^HK9IP6u
z6(jFcYA@B7AP}Hv(ixsr7xhw|DyzQ^;orJ9xjcjGRIqjiYl=D-(5O`+J;;)(AD_%P
za9OPQjQ+s2zU}u$@F6r!FrKV-a*;)H!HE-GgPJi#nJKg<{ho9G#RYw2gB_ZXBk6#R
zFrAniQTw_~%%E8+IbM;fog^Dc$`wvu?Uq@ukx7ypQF2^?M*Tpcbg>UBg`nCZLchjH
zlb))y`t}RT%*<2y$c00#!{$$-NHv4hvYMHZ7ztSwjQ!f=Wtb{iYl2jFtMa1&;*K3f
zQbNnNTd@y#bat-J<6*4_t#Gnpbos&_Q(6tX6B(m}H<KGn5J_cR`!pE`gm!E^2Qyi$
zmhs=oII~Nm#!+)V!n_5nw{J~zY|Te4`IE*$M{_wU`~6pOF)=ahf0A)rGE*4IzG_{~
z3@GfB*_~YLZZn!uZ|3SdimM&n-prVuGlz1n?ditWLGO}|<=$kM#~5=~@>-Y|(=Lv|
z|HQTiFX0V1Vn#Sx`O!qVoZ#UXAeDr-^M@Kaj*)jw&eEDw?4mX4?B<PkLqN}eh{^Ig
zVZvjypt+;sz(q)@!awxrbs&7M{&J)qxgqySIE94zPVzMKL{ddO$~VhWQo{<4u?xrn
zWOO3&8&&o}Xo%2mphrnK91T=o%j^mh(BCRgIHQe12AwYNN45MpAeCY%)`VMHHg&p`
zk@BpO#KvYaDUE0^S%fYlHAe<`bV;{nJ4rqt{$<vab~#FM8Rf;+mwfzLv?KlqQ4~+)
zW!MqZMeBmDW_5~>;e}YbRGN50_9hAIi@K1?uq=`?n<~oCJbpvec^~TK*b=iN>~aL#
za)yWPFPJ=QoaUmZL3H3vch+66II6!~;yj)-D-vz;K_}-c(7OCmJHq~KD!ghZNEd!=
z7W~jS{px>NxSRUVb)o>O$(&t0DN#%ie?(l2nbJx4efA&8O|Yv^u87M8pb>S8yLVHx
zEv_yaST!G<H*Q$n*t#jU`)^qnYyWqMg_?tlY*9kv#J<$iN%wjjeIU!sD}vgAbv${l
zZ<@<8fZISyBDPK>%VoQvq<D1gJ?rf&zqB`;q3EMkx;bvA4p}#L$5WM=U*?y-Wum`J
zWDLXKtiBI9ZRe@J*4fdoqy(-pmuf4A7Vp>0lD3I!M-v3I5heSo^3eQ~7LSao(t&HE
zDP6j$ALsM5H+xvqXAXkb+&@8FXKnd<RGz~<2M5}2slm~i;t`M3c%7w}Y@_~2)>bs{
zSyY-M`;j9BTbj?>cph_SGTDfD+8L$EC`l4kVIcGSy1u*}ji@gjre~0=!kSR0%T1i;
z=h-&uYoE+TN805Ict-$FDxz((`Pp=j!I)D3cG%Bn8IuojSl@db?7k6UcjQmIBJGo8
z$cWnGWKhy?<09>`_vHT!ApbLsw^;O?ONz@95NIqZKAuAceO<y9NXQ08P@ryU?HCnR
zy}9XAAE*Ki!d?HfLWup4j_%G9;9)z8*PN_7o{P=8N7R@k)U!1n3Y`>m5HV#%SA#@Z
z0CF`9#I-X-RR$`AexzgBP!|ux;@`_wx6bY+5{W~G{2K6ZXT*~xMWw}_@=oxC{?zbs
zgIPj`RHOfzH7EOEPJL-3PR6>t7~w=qW}XWjV=9fzm(m!BNt9`g4Y7@N!nBtuM?C2K
zdUJW>tOta-c#-4m?F6*+hw?0*vGt$vECKyoEd4wI_Mn6(u`RG~{jU87{agK+p)ox$
z)!-PLP`<mK{eA{NJRol=`o{tZe<FgG?|}L4!tBa!9_IFMEPOaa%tOk5VLS#t0b#lV
z;z9Gl2_RN|ao_s=L1sgc{jS^#5H}%pKwUQBe0QDug?0fqWlJLc)VoRsEDmVgQ#1WW
z217MxF2Cx(TV1$+bYDKZc+uQS?-Ds6IzR!aZYgh{cnabNp88)35b>ITu-1VfyAnI$
z{PkDzr=a-|2p|ewkZ=7DkcIlg>p|Xyh#!9u3+C?lPeJY9ksky95r**J)`k}#cJCUV
zg1QU*{A`H%WT?jiTo%tms@(+gJDeNOL(+dzJqC^l1oJx#9?V02fpA*kUFPMB5UxLf
zJqG53fC`@I0gXW*1Ks(+eRo~o3lMcUz&`I{a-Ucq11YY;`5o99e{=(|PeIf10{9&S
z9H26Iflc?vyifq-6G8)S^8xQMaE%Yq*8ry-z5NvY6x9DG-cK+65Prtj{Q3Oxb)<SM
z&Kt5*&_1AW38;Dxu&HW*O$FIcyql+9i0A`9F2GQK&UUJ>e*+^#xc+2)3VH)-2k!S1
z4{TNd*d1oOO1lKR6)IToH$(G~Z}EbJ0D=A=x3~85_JKE`-{^qbY)QxiTbLO_@NC}d
z0(<3w7)N9IjnIxr9<!5giJnCH2gtKb-@y`V30C=x;~b(QwVp(b9x)Z$%g03NI>YWP
z4z4<k?~a{2cMZQUoLYgc(ca>{mxFKT-3Nv8lY@tN7(K?J(e2e!=lsSa7TZfh_<Vu;
zPY6J{HQ>W=ULkyMPPC)UXKQrT640`158)qJ`?V|Z^NU3HbHE?*!xQHVe^(*5bnS#N
z%P#M~OZXY+PH~k005VL<{}`(JqUq^uEl8hVyRHx7g9z}{`4;St2;9d11o;hMe07E#
zb&fSWN9-NLD*M_+4C)hA_kVon80gbc_rF!oF*L??0cNzk^{)gXw#_s&aQ{~<{D<}Z
z?dzCOyUgi|`Nn#yjo(8W6GzOpUc2$~Y$A1Tal03+6T2LLas76ll8v#s6<}nSBk0=|
zz7N@l5FigP@Z;-qS0ICL7GHb%-WL43sLAG%cZQb{>Z`Vc{W*>NllV5dl$~9BNC4)m
zP5SB61?@64xb*Fr3c}welmA=%b_ep?RQD71$nOvZ-ru@#GhL^04-sSXbHYpR9S(ic
zH%edqTiDYtd49#enK#`Mzp(P#!ctw`>fRoY?gbl2^%l6`)aRHR*5}H4#lZTC7?<`Q
zvvYbbpmk02Rk25iPdCR^ZUHDB9rx|`#qqntvV3W?oLqzY`Xys))H9{+fBVWouD^I{
z51#KI!CZ0iy?JfZ+)$kL*zu`(T;I^tv}8XS!(f;0m430NWLI}@-_=vT=gi|4=bZ6{
z$p4M83He_yz<uDd`T%#d>01~BspdB232ubHmsbcsWCrY)Z##7PpuVVZc*6eui*G%>
z!m<nB+5TfMV3%*NXNY>CwEY&`*Dkl?x0#p!*Uq^7>>LQoIf^p#F>&>MD((B`qd)JI
z*!%hF7nrZYUjXd8e`@f~-_eKs?oHQU!Qb2GhUfaEb1HhvD}9oARgY)a{)xQ$Tng&n
zB%ZTHG=w`hx6Y|IXXgTHc_LVyuZSPE)=!_wH`#iSecI~&z4_<$4fS>(!QaJGHkVSs
zHv;k9X1W$w3S!VsS(U)EE_g1xn;^$`p#Qx60YmXo-8bj;&Bkc$D)8wX`bXW)js<Sx
z-Op|icPQ-6TN@yN2;W?Q8DOL6FB==fx;~BdF02-}p}z7zmJ#@V@4qhv@2;J#3iQV7
znj151y+MABKC!zc13!d(yjcN1m-k+(k}jzCEo55M_O0<38r8W6j&5Z7b^i0P!T&M!
z4bYKvTifBpwl%SxOl;eBGO=yjw(Vpxv2EM-#Ma;My!YPk&#JC{&a?OP?0vGTPuHrh
zK8405c7fSy7;w2Zt2I<tE0_~kk{4MipVwO$v=3P+TMvaRcat9$Rq5ETLt@+!x3?~v
zV(+|N8l7PO05sj|lsHVqyQ?hqB8uGQmYP<bc7}tXG*rP?ZexeNtFmv!d5<4a>8Oyr
zJ(}`-eCianPn;m9q}?>m7{SN5H1wI0e?PU<ao^;mR}hp5<%;-+%q#2~s_z&o7STui
znR7QtiFAs%im-~@38jWK3fYEiL!c$|ny8N)GMAiN@R?}$I0%JI=`#o|=po3auPI2O
zZ!(Ar(iMeQ_!(^XE=UZL@B4d}T_5ULZe2iK&Wub;3WH=B%skCg?Y#pfpI)G<585tB
zpGc5%5PY9$p9_p;KdG>u5axGS6f#J5BwZtk<4n}<;A^bD{-9w<xo9bx@Z&T3f@dZ{
zF>ZE(L0jUExVlN=QW!~JxO}-p9}ENKemP{_;$1x{4*r(FYqq|2sH^1@KI!MQCS5hD
z4l7H#Es5^sy7TYoNU}s(0&Zjmlm>9S#F4mKTQhYl6gq!w=$X-#mDLt7SyfcNlvQ-R
zYZT6`pMJoqQDmG^zg3Pr3<cW1INGhMN$OF07vHmE3vxQ3YDcXra-}zfCGoI_KaXuG
zJ<fvu>Y{ub!#;|<Cqpm0f4-vNez!QVyXE5*8NJ)4b>rZu@;b#Oot|!WZ&5J%l6vWF
zOy=gZJ=`r{?H_H=oV;b1co{Mnzw;Lxy}KjCmxK;UL};dV_vz~IA4&{K<a6&#I6kK3
zzQAM5djG6U?(m4Qe5*TLeY7U6^?Axw;K=fN_5r9PxVd?*+`hjGFni}FZF94AdsW9F
zd@7HAMZ6YL`G7u%$=s}9NngUb<sOj_<4vW+2s0r@xD-9#iE%qIFG^gA@_h}0$p?(W
z>$R{8)aKT4qL9k)>J{tOWwA;qqP*i}z~3F(FG<|jD05_q9PLt-ygrFSe%Rr5`cD<F
z9Ggb*K@1(q9+-;9ZN<j3<QCY!WU2`B==BZ}$=txE6F%LtL$ACfic5a)>Bo_dPWNFs
z^SVfL1KA@z$A0l)T~8LeKu3sjMZ7Y)J}`-<>~let1V?TtdxzhUFA(Cc*os%M=%JJE
zc#b-yP8aDW<I64`s=+780Y%bibfqB1cS>z52mrT;+m4mA;Y%(*-b=853Ano-joRW_
z`@U`8ff1yy$MolJzspS5JR{b+$PGi0ckj25=;9myMWJ+ukQc80v9d_qbh|gr{z7<%
z9txwv?e@6k1Jh-7DdU~aa*6rG?c=H0e45AX-PS41)8&MeoLM?MHTjhMi>-d(?T<GV
ziDT)MkD*<qM)fHibqF|?g^;h~%d^6Ls+zZnj@8ZPcH8ZXv4;5i-U^e5o|V_SIur45
zQmJ~p?}Zj7M$nVQ;|(-4Qi9Y5ys((E;&8m+k^Fn#W&0R$p*eSdvG^~9&V$J!(EgHv
zf<P8mq#zAKCM4Y@;TIG?-fF$$RXtaFCijfFKbrD%Jf6x<e>-A#G{d(^p=eAc5jM9H
zRNDjDO#OY8zfQ{!i!xgXg-6wEO7uGwl9u?7WM}rzBt#?pI>tJrP#y`?GVo@C#$tn@
z)d+1l#xnfp?4c@^&o&zkDg#Xcjqa}!h*C+k>ilGlM6c@KVAq|`y{F;ni`EzR&@~n&
zQsCcaxxBEIvInaSJ}YVleCMr4E^0~I^X}%~Lrd!p2cBkoYU4GSy`$0pz?p*;$tn<S
zQ$Anl$uKSDPxb`_94;$b(9PO#Hiqv@n_9p0Yi5N)3+SASX{}tc8uX>O^+iu$KcqTo
zd>VGml3_zT?RJD4g5*va-O<*nBzFh~^3~}1OH{KrPKNSS1bRY<H7y3}T-H5-=YIL<
zUn5xDJ(_~(^#8hH%`xyEg;uLq>xaA`zob;%#ea;YT(_uZ#;|lORA|Er2s{M+q1f!?
zbO%jioW58bPh{;K`1A1rR#e79iB%_KLtzVCWQCby5`T0r`V`7?c*m8&t+3@8xq*bx
zlV`mvTc20FCtk`%($DgnD>z$Z(#n*6y=+ehnH)4`d00>(vl$Z+bR40<JnIgL+XB2E
z<nYMduRD-V5R~KoFK-H2@(55aC;L)$cZ)J}2fLrmY?_B9Q?!{LZ5g6NqRj}^KXq1@
zSXh1;dO3JhnU`_Tv#_x8v^6{{ASaup;R`RhqMmE1{jSn&{cU1$H^z*${nf;MupRrt
zSMY`Lv&R&|Bkt#i7(wkV9t!DZbV-dmCz#?XhV^{dALsVa(u`j;Gv9A2+6hrw4D2#E
z`9Ow&kK4-_Ihl3w@vuUTm|fsx&9ldEYr@M<UkR}nXozF;*OZu2C?vwKSA$qsNqQj-
zlQQ!rgv*7|N!*_?<VkGif(JDutP=hx$`lhy^8H}qWiHb5C$(xgJ!BaF^B8a*`6<;M
zI5yVfIwJL$KX<<aSvg*JF=A|PcszA`%`#pGk>7=*?sA;Pk)AyRnwV{b;^~ZlIqTaW
z_==B9OO3s8v2ZP3&h55p$IUwpmP|McV<TJ=_e@_xRK0Q?GwYwZ)S0(5))f?vg|}V9
zS*)pF7JN{lnzxSs>;8-_Z#}<@DHRHxBc!1tbGkr_!V$)~FA0%bXcJpV;%YeWW^Fvq
zL1WH3R6cKVR;h`wl{|=5%O!~HkkaQ^m7zE)ogZeOY-S7$?3ce$j9fbo>A1BW4Z@Ed
zp)oXGYbP-L&c{o5FOMcXfmoLvaMwBsP`-BibP32%qn;qSOp-*zIxTob8duLa`c;Mx
zs^nYIZbwHkDA(@{ur`*#UPKH)?q`07yM!XeZhVT+*S_?-k!oWa+2{N{auCzhhMeCd
zd(G)UBs@zZ1Cu6&h{4^ztJjFFabGXmG(I<MucDad2fER~4in<TlQ3W+wVU-sc$<Ms
z;S!3wEA!2xHC#}w-kESH9wlM<?^3MOjXy>SOE;U!6CHX3?zWL{9h^(yRko=Q3Y~{U
zF^ee^ctDr36PIBx69~N&V1+8WZo!sLEpk$B689&h8^4T&3Fyb8j6D^572<rAa?^*8
znA2l|ID&)~t8Cjw|C8=V9)nqqj(@}T)~7ks`|yE>A%8x*z~-10!%y7zo4zcf#F(K5
zY6wb^_mAu2(>E(;h7FPa_@Pdgp5|`$vF7gO+YhfEc1&yWm!j=fq9L{oAET8F8IBNI
z>p^=4nIaRLlj4+gcM2?}a4ZU`mn>I|w3g%1o;qnMDpkB*4YMlUuC%7(^^shSKvaA|
z5<TL^B-W4nWBGc0lm0->G%u_bOO(hGsjh}^<$%XJ@q<fX1WjDXiM&!mWu(?(3FwtZ
z@o<zVe==D>9pbzxEg^rV!<%fyU<6OWm#5mA$^^Q>T)?0byNE}5tuqbMY%wO$ee>91
ze#!ipCGL1~kq?>{$T@DS-x|v}yrO7E*eczShWO-3gPuXpxg_TE+U`h1buhtw4hs>T
zd~D9J{;c<_77>e{+YQd+{Vs>oi5h_7kS6U=sMo9?n>%akYy&ohG4wlDAxo-tF@1vh
zo$>p*^O0|j&`Y?Ja%`2(kz23LYXWa6Hf7LTaZ!P|21D_<;|_2_+HZw^_B^ID=#uv7
zeKUO{4Q)a$<?$=whXsratRK>Jgss@oY0mGO2dfj>kYKht#e<&+i0y1B=hb$Yuvs{b
zvUGdkj&)JmxRAC}Y4PO20sZ#Ox5fnu!80BQ$PqVX6c5TtUchPN#T&}T)kqFl5R}3~
z?dWIUXiYvxaPysHa>if+6Z4uyx5RH(NIVbB{lR)qd2XGk13{YcTeJ8kK#{~Xs40vp
z=E;1%8=c<^<BdI;6*|;;8dIyS^r~tkV{TawtgB3ex|5;N*KUD0u*Oe9zk42F!&`|6
z=@A(cEIZT<$@UZJLRPP9;98Z>+u<a6B^ZB#L7%eV9*LefK0ngTxY`{FQk3n<ohjl5
zD{c@Z6&Pw#(zlmKV=v_v4D~_x-tbG?@PFzv+-h-;Qyk3FM0OxrVI+Yrgh()IffUE4
zNPQb%YE1?u`MGRL@~~qPSlH&8Blkl`dKOz*sB}|w_JP8DRaB3Eb{v{3y(7+dh&hEn
z<p(+05GIC%YbrX|7j3N}hf&GzTZb@=sKM3V(H^uDOeC^U>o+!R{N3%_s!=exocH8b
z^*a`tw*%5F%{Lp1%ZbN4#!nd24DyFO`-l-Ggbczq_!FkYMB#J6)ZNcwVRmd%292XG
zb0u)V<1tJ#E|ffHpv=VoIG130a*ohqvz-~@1{yBB?t%%o*xtauXjmLT1o+rC=ZxW$
zpSR8kx*$TQu-h}YO=TYWGB-W_n18n=YS?7?5?Xj~Mw7Sw@ls8J`Rk|Ze7T9k;ieb9
zw9;7jF8_39fb^eT97gVz)KSkDQ@o`^zmjaHr{f*If{3?3CM@;XL`!$NSO;)*!L3mQ
z#q<lp54uk<EM{)vuu)q(d05THTgb%igQvG|*DVa)I8L>EbuSFf<J9C{t$TIWeFZC5
z^JqRWzo#!2`jN{x%^p`P{c&;dml@SSV6#7Z7s;)1jk)=DKp*-Rtv-t;31ej3xKjEW
zLhy*M1JTk@qEL4WEd|!6F1B^X&+un9CTL31hV`Pp2ho1N4xC<~TWJ*&TFw}x*x$88
z&tc#g6{fXPwllqI30YiCKc4})u2z>y8Fh35_svBSIQH5hEcu+S#P}JeYY^~^UI(5B
zNAvb#uxdb+rItx@oT~4`mg{=DHeu~ks6N<eWQA1AlsZMu*eYXchW7|QN^a$#I)oMB
z`>kaX<8<V!fRwoX*S)<N#R%9XHtsg?aj92Ocm(zc*P*hk42Mc7Syn_j6ge;Ju5c;Y
z&$*HC9)~gMZ%Qb)*O45o@xj0KkJ(Rh=3%^hIO$GYsRVqHA3=ai7ruy3Qqk|pHGvk!
zf$quIK9}YoUr{c1R8ISnEFoVLUw#39jP}{E(9yu<@o5-Qr2B)8BsRrnHaF1q3ygex
zR{*Ds#OXw&^hE!=F&13}i*tHrmqjEER$&mEOcR^X$0T)PDn1&DufoJTJen)OQAW~x
z5-C0k{u8<yNn}DVJhpl$%UVPe;{kvHagl^3^dU(AQM2w#WudV>UO+^S$e3=}Um@bZ
zq*@S4&wrDhk5P$h;NAh`MB*AS1jck>DFO0RDaL3;H260Fcn|;s*#E*lVyXgTd-ecD
zU2w(6f3^N8KqS0XT!Z@*P*ElSm@XKlr^3HPViQ;I0&GR}X%l^DEpEBagw|o?VL(lT
zzeG&;%zk5pifeQTdvivAD8FxrRPC3L0Qz3ANDBCG?QVyjP@v2xbY+D8f^9Yp(2D5)
zejG5@B`dr$S?~_v#LcC}iuuoW8LJf0Y)KmhBHCsfhoGNgDx%wJm81=Fh0F~y>|5U8
zB*LKWc6v)_f$4>AlqR5wYrMxeyzTrLgX5}u{+KsKO%$b}<h@{AJ&2>;mm{5g=Gg5+
zF5gbg2uup@SCJ=3&@;X-5G<le*f?2M0aiq(1bhfo0#U-GI#e>Jo4M(Y<y8c|8I8Pe
zD&tZ{;;Nxj!sIGNmmyNZh{P+V&mF$Be`}<kXR~-lVc8q_QxnR*Jp}x0XmSx(b~w&h
zpbIXpLG$nQ&wYqploiB(U7@IXQC;VI8m5Iu87ZQjZr`O2nD>WX_KM`~Fpnb>!$r{q
z&yl1kh6)Q7loW2E(<1*n?eiIad(kY*f`1vGbBu4`NTwDZD~Q(<xm7Qsk%jpV8P~wE
zITjZfOj(~b4I@HmQk@r7E}Qw>@T+>hkZ@RT&c297(;c2`g{l&{zdhQmo%6XXn<FtI
zb=unR+=4jMaeSvO<O|I4R4LyZ)sj=)h<}W#&SsD16VE)Z0XnYX84Mb#r4P~@-j`KI
zoD(WPFM_XJ%Dye;df7B4G)aWLNumE$X+b>=I=*4W@4Z+5dD^6c&_v;Yihs<|Z;gS{
zR4VTtkutLSoiighjM&7U_tdFAg4m=!y5L*|l`;~B!XtfcbRhWsFsdFbMq~`?NlpH-
zv2iiJ0dsFmJqs@jHmgBg19tt<nuWV>dP+0#rEO{$Ll^MIpxa$ob5*i5`wbCI#NdA<
z!SgeV_;w+XEUZQ;ENSrjY}<@q(%`?7xxx`thBr|WP01)=rG{*~kON(_i278RX~M&x
zbzm_r+?xtwlbioc<M9nG@ohPk#Nrwo0|cdyF_T}W7FW(CFWZ8~?dG4MWiPf$K}myv
zDV+E)2qv0!x6SXM_bW8?4?p*?fFX=hP5!x2hIu|T*j-)xq^@$Hd3~Ls3&yzZ2)K5=
z(w)eA8AijKXWP=5C?mo`kDM^=PtpB;Pb@vJB+SLFxX2fhU?U(Os%CB7;`6}_Edi2V
zzCl;GqR&#dvuo0+%vKrjP_Uk>%s3B(eVJ#);01yyiD(m>V>StTo$bOR%g5ECOCOqY
zci=mC8zpU7wd)h=6Rz`|2FlWX%1f>k+da0)?zsoVB3kAyq-~M7$1XQbFOged8<ZY<
zu<O7=Q|LC?h@WAEyOp1~we(&BH{Azpv(Rm97DgH~&>bgXGTMVCuzF-(Y3suMPT3&?
zfl*gIYJRfZ1YYBA#r;?W$83YyPTrV`XPVo06*4cugrvVaJ2!Y=fpvH`p`qQbnAYiz
z`H;Rt0}*}UHdD%y#&y9d##k<H`RTwzYYdLU=@dTGG?%}gLUT>!v*HT&1GNUQ<JLOG
z@9XUeK(~ET8DL)<2;9=`<fGtqN3I`V7=vU@f=af|yI{C*DntKSbarq2^rdvq&_q~_
z<+?;{>GckR3ianZ3JM&RZgX)U+U>JxIH-N-Rt{bkGJFZYzCL{=crU(r^Q5_3Y}G{=
zEn5~!*c_i>5bMCV(q^p<{q6~RrQ*S}^87xWeruPzI458rzeY-9Qp#-%gZnccx`f+!
zy)$Kq5@FQ`ZAM^KjC7)B<*5ncPD$tpQa7MLR>sC#%xt=35=Zjf^GC#>>Che$?Q1q-
z<WCb~{Q3w^0b!u$v?sqQn9eCH*+>V_;|E*H6wP)ge6p4bDJ|bCR=XhTSWTykcZr(9
z7ISa&b^h{|O$FSOljZ5mv_Ht+jbV)w7<l8pPF}}gmCl-t$}C4!nu6n{T%t`c1E<oZ
zmxQ#rF$i*3*=cK&S<D1^#lz*{ohC<BK1;AB{?TO1m`tg~(_oCHn*-0Q<5HgA=*^}y
zg(@%5LU_k#UM`YDc-LpZQ(goJEA4ylvMd{4{Xh7M$Xvi3Yu<=htEtl+JE7WXP#lv?
zpv=&NjHaqXm_bL=@7#s#y={%tIGeE)jdzvACR#V+WYM=FrZ8YB(|rSv@E+~XgU4~6
z-;g4K&3^S<`Tb_K(9Rq$j+4g8?66N>$hO6bf}fijbe*qh2=bKYO9*0mvmLq%^E|mt
zPD`HL(>r<nHE9t%IrhDOB178r&Ds?3mDG+^jAfmV6g?u56bI!upNChK^0;a25se9(
zfv6?i9;E&@?y|(%L0Mj|6bKb93pM;?%TYXc<r-4@so3-Q%woDZs-C$(7$&~1P$oXW
zdo|cIq{fKQIB!!9N1*voU<O~mveC%L6S<`FxQ+deR^4tKCZ@%Io3BW`6tqZ*m8T;7
zvB)6RZW{Ot=p-!oJl+uv!5r>_w*9MwI?Ko3ik`)4xxP<nSRy87!7FUN$nGcn`>VDV
z>3SjQrh$~E(o8|@X++xIjDvLpU0WZ6usJD%&uRO{_vNawl@T*?^+~ep!>m=MHQpc9
zdM9sjp)y?J%W5F_YMoN_@7Q)PA*6mhfrpz>degfh-k&&5)>7+E%d1YGa!T@tOKMr$
zefj3AV0Q8d?~g))R*dXls1UYXbw6x;h|s8Z5Ge>RHw<t*ZhbkNXv>Jda8sGW-5zd3
zIn{+T!LwdKyM5)9;JlX?W<)1WJxlhM4@)1i#k#^O{utf4z!P6^rLQlzyvvt{zCfqC
z$_7XV9A>X^(_GO=tRCXIJSk#&_PC4DL4ej}xKyuf(h;~`D7;{m#-5~8`ra=S?y5Z#
z%reg#qkEW|BJ^(nxtY=(C`?P+VM+@q7n_+Hx|7$PEPw6Le`qbJAI*NbM%^N2a)~n9
zFAh_H>v~5S?g>p#NYmr~d9~+e_TEH9*IZSa;Ng@M<7S<wZT9!-g(Ta2j4QSWOSB1n
zMOpXolsp%(EuYTo^ZUcREk8<|g;;07Rruv;;DyvpSNd?VRb^Y`agz@E>U)%)wp?n~
z$=lf%5w={a1~|R*ls`UR<VA7w_Mtp`NXO(6+J4LoWR2`OSIR1=S}twS%^mabraFHp
z2dS0&%Hh<_H4KLC;8j^TR}$&`*8qAO=Smv2a%gw2_iusj&JGRBShY&opv7B%43U=7
zFcCjC2fOarFxrV6g`AyXo%sKV_p(E^6TbN)d#B4M&TR}Jpt|@2fs)>(!^Ropp?VPq
z7W)Oo%DCJ=xoY=lOEs_r>AUxCjHu65u^9)p^Y9#3o8=(2Epum*KtI1d!t<8;ot>Us
z{9%+k0?j}x<2Vw9Q_x~>bVsCdu_ZieN(r*vpM)qVtexJ_<X4_9dB6kBoLZld<uAT>
zNndvs73u86%WPi}%oI>le?~E~w?c}jskCOi(?hU?#@H^#RLS1_g$$a%a~_LJ@d{St
z$KVYy$9VJ1C=RtN`$}vK0=Z6+z|WsU3Ar<jAq9$YFQ9#+{Rm-<?}KgEt^cVpT5+CF
zrH!EC!lS(UAdjYnpt9>x_SJM?za*s6f?sjrQ5I1fTCoB!)u9!3JPTjW4X++$(93_l
zeg9*0Gpcy=C<g<ye&bgV|7!r=uTkX(Kuk?2RXcvgf1>06C87Oa5Nh+;zmmp0$|8Wg
zR)3W~dH)Lo`XYdY|AFUi7XM0CL;#Eif=d10-s{)!`XaxEg7#?}4^OSd)<mZJCNn5A
z0|PSyEFlvt-<jV}4ze~(;NdWhi3sNY+916bBrs5_@Pf#ZXIJTsMuQh5mIE3OLtz<W
zjY7+qVLTSo8{Fh*=w8AhU%@aKt>!<~f6mwuNOBsdIIr6(+OIw;+5mrDwYE*Dhx<a~
zhP=OMLJ?>kpJ$j-5oo2Z^G#wLS~@BC2r@LxpJ(z_Io};-%U@;y4@iEre&cs+oyo7U
zSmEN}pM@ms4ertwRvPDH*FLEv3r^6Y@sT9zaq|8_k0D7ITem-P1&eXUE7AFMKL04u
zMgX^&$qSC5IiY=$hKfcU3ezMS9Vr(|rqaKqaN8%s;bOanZjiSnNg(06FSxpH_Lb4V
z3?2gW!7!KzHV~YWka;m34#U$2D=x{C*Bp>O_|h^{)r!u0U;G0Kg_~lr8d?WGsmD|(
z6@pYw<$wc@8E(|?Y%X@jUP6K$pT)^dz6;wR0OIf!h896naBN`aHcu=J7=aeyHDNc+
zqAv^rPfpwF<TfwJ60x@`0NbchgJL60)VB;XrxF3yB_%5X0*(Yfsn<+TIMLg1Wg_4<
zczP?a-I7xSSra-*G9E4+r1)G-;2B%Lv^Ugn*T;hzGkH|XKO_Z&C-!_cpMTZ+L@_Ul
z$dg$iZ(0zHsDOWzQE$qCfy#*fxG-$rt^@}>LkDo(zdAZI9E5b8>J3&QPr1YD5wC!s
zay9FNQXwxiI(3Obz~2xe3iT&Is+OXFUnms>Z*Wc$#G4m)TD_jZMnnx`u+PXz)Xx6&
zSU%4n0HoN3xDO26gInlB_*&Q82b#r+Qr@CVn^8?TIVX>Qo4S}RTgYEAdnt$$mBM5>
z_@6y{PzVT*rX-#p2|D@@>XxF5B-EcjDdqnx3x<Lm0&eEy#+iYBNXpHhm3~D1>#?{b
zj-r_1gRvkuI@0JFwfYGqFR2!s0#y8qjtM0%2_IDyjDm4|(m>izad<)SX`3m8J4Uh&
z7m?{Sh<=_c;Br-|K6jyOSwttf-zR6dSv}!nV_4<`%aE_UTuouTJ28=V;g;QDJQsQB
zf*geT!tCYhA>|f72>KwT<I?XGdI8FlYlpRQvtHW`p*IuL=X-ITN~y9t`qwair-Rph
zy^|pMg>}eG7mSD24rFSi-epI1NNjIR)UUp+{yLuV*Z|VT;AWJWnptQ$sI2z$qLX@f
z*RnIic^|Hl(=CmMf)v__!rD(2+cA)MUi6*0&H|levpTFYXdlE~%d7iVK^1(M6RcsF
zc9=z>;56onG+kz8xi+fm!N)ttyCelxrHSyw$Wq$5t0$EsTpsy>#2cNZ9a?l~*F34j
zKh>3&adbaD;-?OuWD@EvhE>j@46_#Hg~l{a4S;f2QwjczqffM|mgOrq6zo}E?@yFy
z300Klu%E(}^?g<Td7NnK%aEkJSB@r_7Lo<~)&hJ+loQvM>B~4^Q9C#fUiHJ6jPN*o
z3$qgVnAS{PH(_v^8dxvqGMxVUijHkZNA6oqMS6+t^N8kyv^5-ZKEaM@4;F?c;P<)v
z+h#u@+aZA2tMf-^u=|;6;M{!b5qw_d=AFR3dvjcae233Nt?08#s80gH2hH2MSn(o-
z1uk)1GHQO;3kHs&xPbftvmC1Vazx1J7y5Yy%C=#BFsbB0o8x}z{3E=iV@g%}jMzHw
zR<^8{8O*9EaNTkO>~zQ(AB}%o5?kZaB9J)70eHQ~qp1@11Gw|t1y<_@*|OO=w?I*!
zdeyqx7g`CSh7EHqxQ_$t@Mc5fGV_86Itw_~kJTE3=0{_Y&V^*lO0wO}N#@dEC?_4d
z>JVKSjvtS{_4;6}sycyOV(iVqzWFWV)B^fY2-3CXuTs1R98a=r%^tE7%uO(Gr)tLG
zUuW%L&BQFP`CevBI&Z2`F>R1;Ff<hH#GAG>E$k?0*QNy^TRxQu)H{V^#^7F^5Uq9F
z^j=^~bvhMvnFbI9WVl{wcGrw+jPMMa;YuN&we_GG(#yU}H98e1RMy?Vm*-q$17to{
z4?z0@J)0$?zIAQ!vXA<7zEcb{9JD|zx-;(k89tGrH>5^|(V{_$Vuh$hi;L}abb9yQ
z)#SSo2Uz{UG~e4T7T{s3v7X0J8ccVj_z<^I^uc&QxXci%;{AR*FT!e8__Ehe+V|@i
z8=0?E$HV0mPp2!I39@)uQ{s`Jesi&>Q~FAw60{Ud?-2%qnWZy+2oL_@Z+Y{{8srSJ
zyf?%e?R5M7Cq41?x65LH;BbR!oX;vUidbA{q-dL6ZAmIV%#%aF+ZB>_6c0-8gYXw0
zr(LzK8NMI5rP@5U+F0H(*}1EYayQ3--^=K$4$x2h^Cz^AH;#2L`y`lW54Tlt?r<Ok
zT~pw%CMu2YCzbUnY1hSO5cMDz@7f3SQ(DeNPbuY2P<syWbAA1_^38Q1+RU|13EK__
z^gy0<@HY|<?_c_2C@Y;v>abh;OMeD*k<_(Xxmb*Uo2O!WE(&f<#kn#cT*A69XNX$+
z!*PSX$pkkc{|N=oJ~4!~nC-5dCQ9Ve(q4%b>V#?c*Jsx?GcXDxY9{9xt<#Wv!1W8Z
zP%{`r3xz!}!@B(u@=hbvWiLaL`;qHcdu6Y8&7J4o0Uro|9{nboI7kp{k91X-Un`<5
z8m1-^#t5IFHT!fQ^Q!kz#|^3Pzh*LzuF(VGaQ4ivJiHL}m1fl!3M6xWcBwX);^R;Q
zbMNhu$b`4NgezZY37LeG4<!qtF6|QjTTk7J+@UTztR9QuHMt?A#yRkFGNyQ*Jogwx
z(!G#Lb`r0amGwzl*%6bN3PdB%)BBsbk*Gj41UhC7nt|*XQ_mJY@J26?rze;l3h2dt
zJ|`IW31{bx*6+r1OS-<#ttn)PT>(M2V~5CV9Snnfj{PWJ56o3YoWlESAcC&bbfm7q
zP)pUc4E`*Rq2Nd!ZWxFlWU+CfoU1^h$sg#${RSu3JvmRIFek2wyuLj?G~$_Jfju{H
zD?#f#*b32wip`GYuC?X^lIce``@ikW-3|Osx9*1AJqri9`kCp!XD_p0J27|Sfd|nQ
ze@YQhy;*<?k=-u_K7unF27O)li-c5K>}<zF?p{I#HERWZn|;+5b`=hr@$ao8>SF-w
z50V)sQn~P`gjgb_Tk-d$^>^+J){!$;4}iMeSkv!{-`KkuSpZrc;&+mv85+^J&Jz>5
zv>Zs^u8#h8hMEnVbGs9_kEjOH+kIs@4EMaan~2|a`J8=~rM}2u`K{!}uKTVhTqT<N
zXqs8i`m;}{-RF2Ku@W{w&*M2~&($X{mMB51r?#UUtaX;(Cxb}OBx7IKA_hlw?Wm<9
zWeYJhgMA>e;3-G*&D&I<71+vO6WPaBe;c;%AX7l(-ZbxV>Bm*}pkRT5^US0)%!}s)
z_aD;~Xm?EJdZ8$$<T!H1#@<O9qz1v##h>iVqzC#FSqG-3LEL(!hfvu9bjRn$-d?iN
z(V$`mdwK2_*|UV!q)kDu7}*1lgF2EVorCEmr8pxmTON+jIo4j!Ff|V#v#26Pfpev2
z(ucNdf}w1r`||>%SKIw>Xrfh>vF#Bh6dri@5{pVS)gwvKCJ>;d3CTZ(woyfX2j0;D
zY5h8^0nLSPvcKBFX&fEKUzW!OM<A<Xt)JYqJ?h$Z*it+G6G?o~tGvZCrHVZ?5$QMb
zY?r|hf<=Is<g$z#O^mU|q@jfI(0}qr5Ef<9eQ&0`vGk#T(e$wOmHbCfI8y19#`>k7
zFe2w-11m2-w_l^_9{9UBbXVz~TRGd**(sF@$G4rCd4=&$xGupa8cxT>h8AT@=wj~8
z#Rg1X>0S13&$TC@N+hS&*H1|lv5>Pe;ezfj$rQdw#I?x5w{lXn=VeB=7*G$?-*6)r
zYv;QKAI|ag)IVM)QWd-D-nk#n^?tI^=KExC$$WT-DSosqXQ7tsQJz-lQIe~6EB^TJ
z3-J`MRKB~bSLWMdt*xDszyq~4b^N1TzR%|PA;|-EG4TU6DZu`Hc`JCZuHy@w4V>ON
zF=pPpVN_|XqwI|7JgcEJMAST6h>55%g$l=tim0{4sW=5gFJDL*<ijiqZVXW4av!aS
zbJfzTx3}D4eCnWxzW&%5b`*bY`vUgiPEA2`f#4SG!315we)aS9H3e>N@e^zJn{M(O
zPepCWVvu7{`iO`f)@c7-iF#<|OT0F0Uuw@_9wTRTX*Jxj3IqO&j>w=|-tuASBX}bp
zH4J)#EroiPpERem5+)Y8vQ7sYRg|hluUqYNJ*env{xXMg<E(>zhSZFEp49b<uicp=
zcm2z!o{lF*b0?bV2GM~uES_-6QX}RGG~ZE(=DkN5>v3*Vh$f8?xAa0lsoj2J0?9ld
zSCy+Nb9XH#ueOe*ZNrWouDUpV6b=q}ezoGh&Kv1Z4M*Z}3Cv>cb-2GDG@1`*YM+R_
zf8nNJr@hgn+~M{c3A-~CyE7{bzN*ymdA>w<-^j3lx1t3&&e3fjgVFlhFSsLq<914e
zcJpA%cFx~CqPF1#dsd|_iUxOFLftn{M?a6*BwtV2x`p$$=xdB*;$xhno)%BlJK4OP
zjCP`;Lm*isB8p~5u_BKqdJc3#4Xx@gnivu~V?xy*&vcw_f?z}hP<+towwGMr`MuXF
z!DT9}mpWorrGj!9ZE4#0)G*0(VnIxQ8T8)@we&bGQB8^Em*`EstaA8bTM&IfZz-6#
z(CBfSol0*hje4GC@eGL}Y%z|k8f4uc;7yu_`otPZsbp;<A7=Jfa(ZTsvh8F2=wiRZ
zlpA)H&TY3sx~kaKslMSHQYDzmEw~SKs<$DLVxU^9BnuH}x0ql2Vdj5*ZqPm2D!ROn
z`!Uxd|NPNPjA6eRgZRlmrCG6tK9y-8W(xRb^-kbJ?Wg0oyb+J>>N_~*f)$SL@+W(M
zMf+3-jffdbFO2E(q(`c`=(EK+LLlhT{%O+>3{o-7tqYXIM%m*P$6-OFxD)<U^aVzv
zmTUSTd;TCAljt5q!=$G0Mo~i`#<8_tjlv+loJJS>C5^sr%q7g9?at_wzvvi?=}x-E
zj%l>~h@P50!tFK)-{8WMUQuq7S0o*iS5ApIPQ=wtzOMCd^e>QwN2`6BZ*1#F2KKiN
zD+i_~&Qgxe+G2ep2RSU((+{4PkIY0U*`P`G=UAV`p>%yF^3jPZ-iMj3VN7dg-dvQ%
z5z~K@9%XO~Tnd`3q0=tV9;b%7@n<VO?dc2edZ!IV1`QtH7NepwWYal0j0cOkdL9w_
z@S7#?lA<tb&zV#7&tKN4h(XmjB<)ZEOcZj*!Jwd8r%uvjoJTu_y7*ow>=ug%QSo~N
zb)0*RTYR6)Nx){f*6d>TotgJ;V|2OAnKdV2-PvVNMKfsS7JQWlSJ=Y3hFYT}0A~R-
z{Q;o%gUb%UE!K(meR17i#TEkU!R1k$_prKyHY~Z>UuEl?syoj2q-KGtj>(}}Zsako
zRjh%r^&TqQz@l3=aSGeOpj$RT<SH#w?yc%41)vECg)=;~j+5W|R)5Rp$z6M^Y6Jft
zAm(3=x_hQh<t?S{p<JN4JC6FHH6rz2Av-`KL3f;k|C9B8Nou|gCsf_~2B7}O4aje$
zq=0`|{qFo<9v~}#2XI`_ins$9e~WNPYW6SefI}Wz2k=c1jup1XQUd__0P>(>?l>&~
zJpXU05@}PV9m%#yyT^qNe69=y#yYRcV_Ja6FN$`>+pfb?s&0>gZqS-ccvB(#A$hPK
zQrI5jt0DO)J#9PS!XZ9AGbrp5i6ecxEw?>v2fK&lqRaFzf7-~A@9L&k+};Wdp`)Ct
z>`Epukq(R_ARtQCB+Ktg&Q&iQ8Z1qk8zj8R;clCz?W>oHuts9w6&^j>!+ofJ$MJlt
zN(N0&!%_ddLYHVVVX3$krf!9bRsqhGu!`7Y>QvFynv?OShT#`k?hkdTz(Aw6Gl4Gi
zNsd5mQ3L^=su1_5M)RyBJ~l67^$<$3`on)beMn(v@-0baXH0R2y1)>;hGt({PvNxT
z8kf=zttlE&JVLv8;*xIMpxT^<HR6>>19(`%zmgcd=AyUd8`1>Tv#sh*i9LzOSpkTv
z-xNjey3CCvwDu|vYB;2c3|P)y*$6MAT@un*R~DN}Shd)agDezAaZF@_%1`Y_(6%@<
zum<2GY<K)IJ3TRZg5&tzG5@?MST*<n)~MkzyTctwf$#mvX6mp8%s;R|cwk5%wKarR
z8TIB3uI<0~cdFVZ^C@Q}?T#TKX5d&ARU<d>L|9i|bER=dMyA5uw6nOgk!SWprEx>x
zQxz-tZ19cE&L|I%?3KAD8ttSIC+t>;hfoh=$8pG$%^dfm6w~y}?5jG3D}=nM(F9X*
zCBv%ej#wv2>kUMUl&ObMfAy{m!xws)Y2ahkbl@rjDobpIa~#x0Qt_baPu>p6Hd(0A
z!K!%|tC`0>1XPbYRijh`&=_5Xvq*{i0*g&nklPut%N;_jJ+<<|s>y<n@W{gI!OLo3
z(&n81ZCkOs8aK5@cSm_y`d-|@r2V3^v&5UK)n4Z+|GmEnup2n!qCnoGA{j`IM^wWi
zs)-6}QHBzv3*Qpe2%}DQ_*y{T3>pXg7?*0u!Hsfm&$Q9)9KyY+<@Rl3WLe~?b8&BM
z{dq}gY!+a5MQpB0UTk+Bbwwt>WMzNL-))af`bHSi)^v;mF>7u_5bR}m-U^fn)7EUj
zr`7ypfce<g@y3I_Ab*1&k%DKsNM15)YG15-CIta<I51OiHKc0o(9&TbEF7x-EUM*3
zHMc+PXJFsKw;r`3autNKl%D6f!Pzt<Z(7ei0rlj?abUsBVA%2ZS&w+RvjbU`Mdfj!
z{9sdqRA9A?_e(I7C{=D!w>7%yn6h8Qg)3RmuAZXoWIH#Kl`dkwN*C^46xCqq(cYeN
znf1gu&K<<B&{$^jVm|L<nNfvnDM3pveOZnRDECWbL_Ywn<{qu+RW(XAKbl;d;TJgG
z`h6B7G7;9SM6^~3r?A<(;oii!#&+oetY&nGhIUa;P*ch^Ru2EBkB4q7qSttx9!=&o
zV&C=3^y<B}kQPoBg=znz<M)~{%BK#IT<G#dIU=W{C5S4P<<eN%=)IOgc{PzncSFco
zx5ufVE)BE|VUD-ML)9S6@v-r5RU@~&D+NQzhdmB^6>YpqYv-Lt=%u1bucwYSHXf!<
zyW0cmb-Fggx#gS6ncr+T#=%59%#~0nCoguvLQv|zT-xe8=avL=jqw&lo#R9!70xHZ
zh*{G9I50OBtZ6@<-W_3P8@`GMw{ldDNX*W=efz0)(x0V3ct-N5#S2~L4ld5#5{h~z
zL{mxY`%S$Y=mE)umY~_pdzfb&<AF|v*0rB~AH^s_legDwd3fQMSHj(Zq|oL!^Q^4N
z6R~N_j$cJO3Aaw<W~W5XniH3BTWTX43CyXjGP?t>jPbFpB22n)%U`?Q1y84c)?Z!I
z*L7GNll5&V>G>Qzlkjae)7A?C&#*8_TMyW6nMv#4KqpUIH{VRt`ud7))yeJOcr7)y
z`63zJc-4z))rkZ286D4Kx!0mw0k^7;8`yj_F11fo`?{o#Z@eBm9WpfgNT~$?e6FNv
zTE5c%G^Z~cCN3YfkCw!amb!18ZJ3r}zDkx`%d1d7Cx2B7w-Exg2R2?^ojCzIuK(yf
zIkf_swtt(>8b>ZwvH2R(gxTQZ_m1_k;b7*Te(~?^jR{y|8TF_)qNo)xb|9$Z%59{4
zZ|gvA&Q;;y+a=pZOUm8#bgQerk)&MlvUqhpGv)igGDBzJ*kX3PQ}3IO=ni?$ad?R5
z)V@n>f27>gex($fw$txJR)4E%GWG0>=E1xu0$Y)4{+_-%-er=$YVKj=PUMOSUSQa2
zA3i~YUA7@OL4!{^)z27NMnAOG%!WdLVd>j#9EPEK&%)~KzvViww)$o>wyUl5)NM^w
z_#NR`lLP6kdmhS?Pen^W3YmHf%2LjI_p9EuTEGQ$+5L5Ez0r|i)AeY}`)U!TbH)gx
z2rWeb5{e`)N?bXZ0pz=C<>n&j_v$Kg)g;1pesokeO=Rh0p+UN+%d|9*Jus4hD74J)
zzZ0kkcNGo;5Mg(TD5Buf`o7EN@|K#lT}`baqt{0GTueDlPn^z|$~{eZ9Z$I?0NMY3
zvzabwnx1+ANiqsa+PO`DZ{7Mf{srgpq}l?EYrtl=g@}&GVH?J_^PL9BZcDybvE~`!
zx@F4^tRGqOo0p_oMz+ML4LPhL-TyYjq7Ems5`4<(Nkm3PI;$e>74g=(cn1Tb!#<I*
zV1cpVz@${V7|uDPoWu<;gS#xq&|_f^Zt}ieP9(bNa632Ipg~7s?HrWd+pMs(W8oHn
z>+t_~;pU(y3_B7d10t@o!h-&dIO0TtpAIiO{%h*oWrd*uQUEjIsQ7A1M_Ll!F8iGk
z&cG16Z-6BTWgbyNx-jXuoH2SMPW)dCXC6^jT8M)YE`ESTlG;sEn&e3Z&=f}gC-%Si
z_P#lQVoe-w4oO}@RFxKDXNV2xBpto}gJ=JP|JD8{-eB-=B4<Xp-M=yvmrqKD*sY-d
z1X2J0WZmn3aH0Q*Ui?pV%0fKc{J@7eAuXgV@-HVVEd)?)@poj?V<J>QRSSR=X!D3J
z|E=#f6lV?~RT|L~k?fhV<U`+0h)G!z@A*6KfQaP@nhSwwlMy;8)Q<1y2L*>aBnBAZ
zwAeLyBl4ICw4<%k56TTl^5_YI5-=1b<$E-XmS=_}-6mVb*5yZtBQj_IwejQIfBt3k
zFiD};nV3ZM3x`B3g3c(mh7!*G2SgjTuDh4oTTZ*wGe;&bZE2!^*caB?-Geg!NeLbh
z2|gy|2Fym%q#Uc0BKAEI0$l&hlZlU}B!0YMW=yCB`4ahuJLGJaz|UhM;o$z6DWS0H
zCc}MjViI7oPu-vIX8}0#BT(p??vUofg5(H9s|+mekhMc6l0R<|b*p5yu`{@X9Ge;!
z`Zy4FpAi4RU2+GNEu$(oCU2_uq=g`G-JqZttNBe&jqOLCGDd4d!0dAe6?ME*hT$fc
z$!I{DU-y59AzDfjObkZkLJmm^MhvY-QpxHWBEhUw4ye5ubA?rsFe8GMLGRl%mZXNs
zpY53v6#p?&YwU#HCyHK)Xht*;VlLdif+xaUptPXtt}+_^^T3p;ymYy0g;3LleH8ux
zf%yjvkqdni-}nhlkUF=>r?sdVk#@1bS8;w~@HIFA33h&VFrunr*v}vr^ggbLu&;u-
zyOpDa(P|*i8z3Q@;P!kBgcQ+;p9km(p&2wmCsev8#zY&^oX=z4#H0oB?Qy{_=z{n9
zOcNsYv)MF3-k4(0=M9JjGL{B9=z>-FI7=o(17}eAQ-Uo5J3sUfRaf#C=m?Nck)pK;
zU}Vq*HMaz*VTfMXHQi{rzE1?Sid-f?jr|G}{R52-$cB+P!U3~8eKU!FJWWOwl0PaC
zcDOWiKseBF#&k$HaGS(1C2&$G7AyfTsGWH<pk&8o&O^XepLm&!Ila@%CqzD!qO_0I
zYB@6aGzgQD9B9Wr$;1w^i*)O9`L0K1Yb$cx9UtL1^YWB<M~{$*8{|nk$XvijWdz>`
zeMlJO5bS_RH~*Fvcs<wP6ALzc1nh-B0jj=S89G*e6SZMY6SWPv=Av8bL+TD`Mk0(l
z<PJHUV$Rdg9TYkSIr$QsR1Nj0RU0oLPBgD7-Xg+ud9tBPL<T?Nq>pxYn+q4O^<9ZJ
zfZQ1S)J2)!CSH2pOsHcW3(&Ybc9wDVNBly^O)drx1w?rYSGH8uc{HYC@f6i6)H>PS
zWMj=W1I`EZi)Xuq(yf!*XucHF`e+l<HGzIHGf$S1LsehjCp%>>ZxE+rtu%T81F~o1
z+t;ylf+l@TUU$E&OUI(AM?lOZY*$USk$xw|?o3INOHu!fg5>-4$}LFZk3dufL8!jt
zT>%C(=2&s~kcz7YbhuCmB)|1S2%O}429)#-ME;#$6ET9IXL=DQkBa@`I4u`Q8Pp*w
z%glf62?3%hlC4Zf8n`L48+1W6XK;?+T8Y5p+tE!d+7VR4*tfY8n_lkLA8F)j5O)V>
z`Z@21go6m^y-sQA{+o@9Uw!gJA?_gkHaNn`uo*!5Ie7ua+T~CQLM-q^0aeD@kd>OZ
zZ{lG@VX2{oyj}$?WwHO;q7(?~Juus8bBT-=#Ma7Ue<=`{7xK;tdKX2$iU+=mLp&fA
z?_T`&>H;`U^LXb3yo&%R#zQ>(842xzSX)`FlL7%2fFbgi@ju6pve+siZ{vV}6yl-I
zNOb<HJINDh#QufY&PbHx2~G=mnMAz@in+lbc9%!N9^$|K-2mDs3u5ntcn`ZmOn{7l
z4Nwh0=wF9$Ic~#yCBOlY9|4g5MXT=S<q3Q;r2(pdIspR+{Ktt@TUx;TE}IU3@_7Hz
z0#pz4mva6$5C9GIg`h5k62t+j1dK=%Fd}&ZK>Xg)UpN5(<Ndezf8c1Ah0rfyvcKai
z3jXi-QXu*j2tLCA*)>A{W&jKjK-0u+@C*PD_*6U;Xb<1b+tA#Cn?;4apBqI2nXYp)
zwZsBmua8f@1#maKzXYd=&K|`detn<|lD(4k>v^Th6Yv3{JOfi`sHe#Zct8I;>9e4L
zby#dSK3l@v9pxkg<FA6AmLZViYkXEpA%)tz8^7R)mf7r0LI*2CJ?z>VP%i{GBMd0F
z91CR{@Wj&m6Zi+AjFrd#<x7|?mgZ|)Z+BlrL+U{t^KOK<Y#>CB3RcM5Ek)$U?LZy<
z5}C9N;R$PvJ<M<+l$}ADBg$|AB1fANCC{d#r%RbXss|xsQFlcLa1hX`8_N^WVMU84
z<?(h%C$~a+Vl>!HY=%l)xCbX+V;IEe{OK5rQ)x!u{V{;E`a_42Ekc1{OWH$5fK_Xi
zQ$*MsWF?#+Zzr|N7v6%85#+3&OFhW8y2CVyUJfX}5V>i$Iytt)L4R)Rj!{{uASS9f
zCI~6)XdesSh@^tUpGTl9mcUE%GMK0T#CnlK)-+z!`x%ujN7@{4P&iZxM7>^@i-z9P
z>x)VoJvFL>GcCeeq>28Dd>H7K@qxboSy7mH$gH6uSF2Gk@aw`R0WWD?V9M%DS3L)A
zaK6k<vK4Glu8yoIF*;!}-Zx=$&96)jwbuIG>G4}WuL3~4Ra3%EO0@X6KAbU|=3)>O
zw%Z8LZz5=YK)eh=PaHp@V476zrU2|=_t^ZUPCKaD+^OMR5PcBf6vdYgX{BKE02~O@
zb*g|2Rtuqe(rnctY-jzcN)Ia|fPs)I^AKt7-;A$t0FJKbUY25)7NL?t-i}z2maThc
z%`M`<*K6SY?Vr0%R-r5~N5)MUISR5u-p^=J9UqdnF4Ggam=C=#D22Q%zJ?3rb7csz
z3c2EiyxgMRGhq1JV<7isVe$lB(Mu(qH>ex3n$yFw7sh5uNDnuGn=^nFuN4S<;98%%
z+<KYH5V~W?bP+WGHfNVhK5XjC<n~e3=@X}%10vg(kMrvmAmo#%DqYB%Ov5mCYg<p1
z$NRnxJEp?#{jAs6v30e1dUI9B#55LQ^wtaVp{1(6jN=+q4$zAhIr@n74fKwD6{%Hv
zcX?!@H4A&a=2|f_(VYHW^YHAt8iTm46WE#0+Us-9x(Nf(z*cDWyR#`rfo*-(0@H*X
z3DDI9ksD!~y9~oLX_QGk1MDYDPqF(cGOc?R%8imHmmV$9SPQLO?pu)xc!9NHuVUQ+
z#~lZd%+Wt%CDTv(sH6aY7$pXTV7Fz2Ld#9hS?d<^u3}ZA1H|vHpdQS-U>GvdpSM9y
zjg*C+&bG0Iq^dWQgQnrOeIqA!*xpi5V&*U0t$S{iTco}^TorKSU2RlM;!?3Pwqv3y
zD@qoseL32G?W4B2&|9x3hjXi@V>-5ED*?BJc}#b#HKCo0iIkWGewQ&<z}|_C$T$@f
zDO`Jqjj$QTEGW%P_9I1tvU(CNWjL#Qe4`%Oa)cYR_EYUNyKlbXL(iLmJ+#v6z>T$O
zIpEk)Ym9;J)=c>zoR>D0+fgH6e7Cp;^Kl00{^75e-BT;OqxMB`UD*g&yx)h_?czCE
zm1U^b#_qbZ_X)JSZP#^QE$|A)hxOTDPc4uD0l<CUSM!Wz1|R}EYPnONx&Wx`hMK1n
z`CW2c_PTPD&c``2Qm>k`>v?fU46A!C<fR3mF^to__C~Y=Xhi6lV*nPZShT_DZW|n|
zUCQdIwR*R>8iwm$^X#~;w2$+H+O71FEMtnm+Bq^=6P!bW(_e(_vFO%mnzjJbbG8bc
zJF$PBbe!BUUB$G{$w$+*tAJB2S8ZO=Pc}ciE^hfSfL2#=?5$WjR<QQy4q;)NPiy_8
zr|-8ePyUs2_LFCE*5mcA%o$>WnJti|Cf$7K#{&(H<{7wco1fM>sY*kZPvzCJJaFc)
z^!XxMi;m4Nj``U=X|c#ge)X!s_Z6G-%9>4h!%Rqg{Ya@ab8{$bTXx!<SX#Pa^yy*r
zRKrLN+yV5?o$a%=wAHfpuq&#S3BV<jFG_1)gA9!$C#&@N`ss-QD|uFR;g!Kv#NF|o
z3(@7YCxgN@XD83fbaS&y$&D%N!bgv6n};8pXYB~{Owl>QYH%~ZMU@te0*^6&1nuZy
zZ`R~)p~`(**rPJlWKTQdEgY0BUFOQx_5I^CG0gHR(Du4EjzY`r;vBHoH~c3B59FDx
zYc;8H3-fu6$DPT5^uykZ_bn=4ynBn6FZ6hLnx?7^M(!!hnTTV?*r0>e7FM7Ne#o<b
z-fecdRwx^%t8i=!?HaGL8O&$2!f{_`Rq8P>2akD(tHoV^exi`y<?0zI9<a7L)8y8D
z*;%58&<*#>jX~&OTbeER&BnwXKd&cEZZiZS^K(YRYAt^38-$CMVr&3^@LQS547vFm
z!PxF?T$OAz%|QYt40-urK2Y$D`pOP?*KIdzUBFXW^ZH^Xgwtc)Ltq^puZ09xB}B8)
zda`mH!-H1|U=2yuA4cfe3M(9Ce^T2<u}aWto#cj?jY?H%vmxig-ZNENHh~J*pDM&?
z*{<+;sL>BmETY|4w%$8rs`IIL3lBQx^iHc2GbTOxYae}Godt4>(Z4q_b>*{ah^U+$
z($;8c&S>MnI8E`3mm+)V>Cod20gbvm)@&~|n%YI0zsRvjl__j;Jb1Uk5Zb7zSh}1q
zUMn40uP@%OzzvPSdq17^VLznh(-b9bvWP#V=YI<3=6Lu<pi-!z=ZQQ=<bGh8tEMcB
zN&KRnF8F^OT?2GvO%vS>Hn#1}Zfx7O&5g|sH`>@vHg+a9C$??dHs+u2KQr&m>#nL>
zx4WwQygBoxJL{l99e~vF>dEd2Vai$d@_PHN&hE-P)9#zJpjZCR_KolKR7pJv#EfI#
zXKJ@69QbouHk^e|NwO&+18FOi!jCC)K^5U{asbuux0e20+N*cM(0nh@Xv*uAd9(=o
zk+uS1i%{R%xJ%~YVZPUof(TcF{+x3r-V0onjy$i5@J8>N8>6%AP~?>gJ{O$+CkR4D
zEWv)XaV-T4NW>qo8*2pr^6#r4yzMRT@K_szC|S%z=+704X}~6e>78W&umk^ZZ*8dx
z^yeOR{<nXX5#IFA(ZJH2floj-_+!kdr>QLDBSbC{ysJ|0XRBJW62jY&vN{-sF~2-m
zXW4Mv{Wo%BjX&_RQI`JPu-ibYG=wA+#{YVDXTG=EBEU{E3*LX=`*Te<t=(JaeD6~}
z3h#Wc7TBYn=oYaAy<UM<t~~wuma5l(q&dMpd{MZ;=YT#AFe)n1m(q}~+_Dg3Vr-+h
z+_!nb2O#WxZ&#BI!1^Z7Doc<ng9szbaS)y>5KYFL&~Lk92WC>DMCZMGWwcDJzsx(u
zZXE*yiqR1+&_Cq?U8nVyQTTb0IE%7ke4D(UX-j%dFVqV#^gVq(8nF^KPg+%n_yi!j
zk?)0BLH7_~1bJ-e)pTzkxL-na{wrI`4B*s`>D=5?KDc_bdSIO6C9k(tJ$KenaBm*(
zH(fC>n?rHZ`SBs)llrJl@#sim?+`uU(aTP1@N-pE|Ag|UO~E77I>jWY#3%?rZU<hY
zWa;@2x~HAAphi`DfW5Qd7fD(deDsOy50@g1@gJ_iO^kyPl|loH1BQ8ZSfyfJn%q*e
z9LToXykAU9E|+Z@`6w(z1?}6(n6tbXX1eipO?dz$vP`uX8T{@`i|JKg_m*BQZo;KJ
zY+2pUf6dj(c%V`vLSiYe;>;+PK6or`yQ5gvN^>u4hO#S<=YQWA@MUX2TlwHQH7c}I
zD00(DK9k0(c1~TZ$k@{L5<IH)pGV64=21Ct(IwJn?e;|>P4tIJI0J1E5rp(99un^-
z#VMEdBSpcASM0|!+XajxWLd-E4w9{7ffw>hTjS8U0M)gVZxFLQNuk|YsaVL6(mZ@Y
zuW4zoitT;)a;(RJlMg$kXfBSo4oVH0u2$&Ur}Ch2_G+?ZmYe@4hAaTpb|H}O5itX^
zt&SRHs|R56g4Wn<^){IO`CzjYMA4bQ#(aA;_h^6ocaqd8_`6rbh~7<kt<p;@LUwcQ
zQ#rBjrmjwQybFfi7W8kUAWou2f4li_KP7tCe)#L($+UUm*ik&xyc5i|EZwqO>Q~am
z@j5xGPE`fa2W9qLaL>`C6{F0#&seha<ch?^Bij)y1Yxm4td-z6;c_@AkJ(Qhepa4s
z=Xsf4aTxKKNys@u+k-^H&!#DqS#INA;^ddAcfrs6qfim)Ei`#WdfDPB?eQ{Aqbw_E
zR|lV3wb3u~nT6kFmHh)8C@EKucsb4Sg}ml@)0jHOGH0B3pf^wT<ICqs;543A_)zZ^
zOi%m#ux98>y!lmaprABnRVzK;bE4xl?rj-y)`5?D9T?JtG}!j9ufefEG&}pGB$m0&
zhX%XSO<St1!n$vN>&RN(73lE1$~HI^MS7Run2e4~?)S~aO`}U{&%)a@Mo6oEm!SIk
zmbAvPQeXS_c1?X{4av?6gq-y6TxcQaDU@DcxB1@9PQ!G*-QCD$X$6Pgz*%8Q=i%E7
zr|B;X4^6|ZDEZ80yW`WfwoZV<Sd)Af4)KJzZu41l1~g7g*2?cwtE?QT-I5jJwLWs}
za_YM<`J@FC4fijD+&yEXCQigYOjYiX>l1E()wxIuOdQQA8uO&SJLyf6qM9Lc#`}6<
zNhan}XMGNINGjz8+VBY(M1x2cX&(l(sWAg(JD=rlZKNOkMp5d<)Pf4h?elmc<>dhY
zp=_4oF!8Cc`4DI}aN_YU3Ms1dBka#H=?Z(EI+i19`e9Pvi7FdP3l6o|xetFwyAlnV
z%4d~(YQR0D>*?r4YlTc`dH{bB)54|LoVZ6+X6eX$h}%_;rd{w|qWoK(8JXQLS>^M^
zyolc(Tn5%U2h}=n>u_J`v~KIP9_zH4RnY0t$}WwXFOBX<4Mytise|A8knF2XZ0M(h
zRK!?oX;(c-$F<>TGo}Q#2qg7yZ7G<83C@&zN&0)iRZjW$F`BbPRjVVXQ?^hf?{OBc
z>R3l9=nnZ_CmH3oa#(E&IIRkrPFYpfiWd^z)|{=X?XA`dYIcomWxZxaDzr=b)&y3r
zs~a;XQqA5IL=DPT41D<COypi2W86x*TkK1?fWzBjQ@kT>8PuAJ+fLOx_u+gTB$(80
zypq%&ypkoD$x=yvEQH#67mLk*%bajpHfF5c8_WJ(mM8z?7kvw@^R+M`>9})^RvqLT
zsDcJP!%nR~u{nfJ=r{(PN>wd}36$xlW)j?yxU1rpm3`BE>R#X%wAx?X>dViIW{B2?
z^mtX#g=W<dXVswcs0ilOm_3f?gPo3_CK?!yG8xcUE5FR2wX7zWf25f`uOg?n+FL!%
zsGnfB@Mam{8JDYyW@9NBPr}Q(bTjP^85Dv1RU9LG_!O>v4;;_miPf+y>97d{FP@%%
zmP)ZY@Gw?)mYT<_OTH#g+mxxH&uQ1-;4Tju+H4e(R4zKXJS7!G6$!lzzE*%86`hmL
z_yY?Ck$~4QBi!u-X^ki?l^<s`n}s|XW*NG`Jp@BU!Wt<vG}LywoPYUjt7NfQuh6ZJ
zr%BC~O>~3iiW_Qfi<`Cg&-|*=8&nP+2H3{p9!LLv)}8@C?I6wO&$n}#C3?d1TWz7i
z;m(_-@M*)hU_(STfp6A`2x<Wzmi&u47DL)+6^+qjKn$yctovpNt`G<=?dPH^&BfM4
zKGwOuSSq#`*JQ|V?}&)cN_6NoSC4<StBqUOQz@8&R>}?RDQQm#-xw1MBwEryhGVGq
zB$U<cQmu=m(Ha91p8`J>Z0;%=DM%ps2mRV~jk2_38x@L%RuqSZbdR<$)ap$inuDVV
zSO#9{i^G$g{KX{@(XBgOPr6bTv0uIP+BOQP5mu;pi!xOhG=05WsA@)wKvdRgkE;^y
zTjMUp?Vi6UZpkO)w|+9q(T*KFLU>3K^x7&ux7xMvVwTa-Bb<bHDPGOB#w;Juq5iP5
zs@y`gvNYrDk@|b6mc+5Jl<IyGYoFvQ#)B`*G2Jzndg?#b{Joj?hrZ<1M;rdCb#*jb
zlf$o>ovtkh8+f+4JNIA$eEcO0TfRF9P}>yO>o|!paUh3-b>Sf0MqUy7`1oKov{HFh
zXSg)I7LL^lDub1k{;|muQ)8>?Zz1P()S$-jO;?h8;bGRFH3%|!Dw#*JR(FHN4Oh>*
ztImc~Lgcb{YA_9J>%UhaNRk%JTKN$mPy^V@ppuFni&fu9G36fP{WBzX(*Tkj0%^lM
z!4aT!EfBZ4tW6?rZB&irFD*V1EjDht``r(WmjR1*Ri#Ck8{l)3(r@%p@*5>_n%Si#
zRRw_u6tqY4jAIL)%00H{$44cHM>5aCH?R9#y<!;irJpe7*KSijvO#tiCZVsoFi|`v
z!6T)N#O<%nHF2S~5#F)x<_7YPu^Ly^aKy2N4dOs9vE32O74#l{rSva`Ohq9d!mw6I
zzjII#=wheB=@~n>N0gY@g<4(&roy_NxW1gI#k!|B7?HX`-@97ZBXq-L={VB$zs4-t
z3S{#^b*^G=;oC*z%+FwAo`9|bP|G5_)&rB3X-Nb4@<HQv8E?9FBk#X09*6MWMo*?Y
z7B1>*ZB+GJnrdm+mp65q1Cn6{HDv+DuG$1$c2`D~uWbynqLFNTSvV(}3?Aqabq(q9
zos{d^mqzf#Vx*X(x1N`+U;E_n-%0dP|K8%Oe1k+Q<%WeuJAM)~?zHHyJ}PX_<0_MX
zfQI8z<sB-Kzk`_@GJ?nCmzU9^9iiaThOh1$TV3bNSHhOtlGB<!H0im=VIw+m-xa={
zDU_e09x6->V0UVPyq!R?6E@)NM~-_H^~#27usyp%DW=#!+dU6*?xk#W7MwW`g)w2y
zM}V9jDRlLKMU&q(IG-E!U9}ZNM1_S39BO+IJ5b}TQWd!1%h5Plium}^3zba?p|~1Z
z_y8zs{ce#bqZFayhmK>H5nIN7qK#e^6)}<E68uuk9f+?bdg?DIW*W>T21|ZPQNj$O
zoeLB=d}EL<NPc+`>z7hsprXH^2&sWt#QlA{|BYP9-9k}uDvQ5UxHL*wQ!g_EA!oW*
znj;>)EMO1LtJCE}=TGoYZlU}wz{GVAk@<anA)-p*X5rWzb67Fb7VL;i=}+i=L)etQ
zTjw7e#S{0HouZlXlD!0E3%#s-Up5>=IF>we1)YB&6<vvRz(q8Kd<tB^>=6fsjATRI
z_e=5noVR5iE*E$do8C{}zc|#fURYKX_zy9@4Du|rjzu{QvtHg&mN5?iW9lf`sPgbk
zK49*%tHdn>piws=1uh6dd6?2(QNf8~9BgM(g&LSW&N|VSmi9~3dYUxUddT`Z`YrTz
z0s6&Gc!#)W9K1Nu`aLM3@9+}svD9Q`>PJHu2Sa&+*)rn-$Uskjk*S$CFXIEGVs8zL
zOSQ<UTORHOTR>5?KH`kHWtK$ECl77Cjiw@Vl*m*9!p1*<n2xR#3v1kShOi1&j3{{#
zh$2g<=y8L&Fum7}=_|RWNTDHMGDI6Sgf9Z;Km=NhpJE5jbGRO{H~V)DF^@FB>n~TR
z6iFw_YvYMP>B)KCNBdC#K?Q3IieA8megg$5(Kjp4$ZnjkEShD*p{A(X1t7>R{II^S
zcIJL~9jh?jFt5^}^J+?y939DR!p^Xqx9@8O6K5OeW0;WxRaQJ~l0GL%7J<BroMWtn
z64a_H1?z`+-$<xqXEY>5YG`aZCcXq0{d#uk#4A=<Ma0^vRbc6FrfF5+Aaay#RIj(`
zS!6`am}m$(Y|>%W*lJ#Lw!;>gq_aFiE$tGtDo1Ux6UpUn{H0^zI6-<lz~WmGi`Y&!
zYd!J<!KSVOzSfuT-Jf_^8&nM$K2;Szn7Uc4=qDqblkAn>`-Ka!_e9VsMI!yuR0*F=
zPvkzg9VOy9xFSs3e#d-Hj#^e&LxuVHoH0bu+Tx&AAGm34IInwA1j_80^W+$$y1z;{
z{Vwox_owR|0~O{KN`n0_&aeD%x1F$6fAcp`w}@<3Ka9>LSI0XHD%q-WXuQd894mM?
zF><n^^yGl5@_875K6$HBYk2WD5wF9@6BmM4072J)%y+)RIPF#z2?-}_w&xyALIy>Z
z*yk&Ie(p=Z*AO5l=6dp<;<Y@!9Xz~0`P3uJi%@xL)<%luh#B9=%Unn6U#QZqM*a%b
zQDa@v%RiJjEJO@Z1wul)#JKh5L9N5C?Ive;2$i?9-VU1Uh#Q=kyJG7m;DmZK2pKqQ
zMsxJVhQ9E?*_hOzMtF@-mh!S&80r3-Z<g{Int~IM_%+sZISKWSIZ8`Qo>hd?eL1>)
zIoY4L%u`y5SnMt;m_ERGeGy}M^okMg$5Z=cr79_6qbgWQhS2_o?n*<vR-zs^)hX0v
zAv8V*oJ&MFQ|(bZSE8mnkBl=eKSU)eeKc^u1MupX+{EYseoJ2JcTjMSMsw~>9&zps
z*7DjjwDkkW+yZU)YsND9F`L@8XXK4|aW>Q2GD#RGZ$3NSKZjZ4K%A3?S4~HZgj3J4
zn4eS_xS*s!km6*l>b2ny&yq79q3X;n*S1xO)LUIH!@TXBop(KG|Fk;`+UE}G$xj@4
z=f|tvcNyreC(74VGmDRek;%{Ke}<*oB)dM*fuJ$zNx{nLbVKc9pAQrlKVU!a0z#X>
z&rR+2MKEG2CNogAr)%;Bq;$<&%mh^Fd2;#u9xfPq$376%CBDlTfz<gBwf1*C+bwj+
znV;nPUOnOtOWs_rHqdbNB^(|YbB81Kt~Mz+`BFG9s5Vsb(s53ga><wJ7ks-`!u$z&
z!l!WXLW7rZ-Lz_!`L6n@FuAtiGJ2I+6+?0$IbJh`bDQJz30?>HRBo_Z{d#Ze*E<rt
zAIwp8Z8*U}Xc809U}tOm1sNJde57gl_`YHu#i;(zU!Hp}_Q$h%J+6O!Aowk5;PlCP
z#PsPK0!nnj%h9+-88_`P>=ES6xrzPxOiPQ`r4!#}fzABsEbik{jtffZ1#P1ba?(s1
zb|&4B$iAv_3l?PDuu^@Pi@X(_X!xcUB@S7YJgUPMZd_;n3bm_>y;Wr}>d=47?Klz{
z1PS+$9K2VS#8u;jk!-?8t{toRQFis!i}V=wycCr<Zq^x_t?&4?X3Oc13cm3VlGJfC
z9$e`g8zK)ov<Ba_+npjD991^OHmd>!iGBfhy`%Oylkc84FJF*9xa30_cbjM(M)znN
z6^r;B@a%yDCvXJ%qkB4?CC(c#p_za)``VczJ_dk^2Y=GgfyyJTr`~X(Y|maD#nDes
zJw~x)LA|X#Kj>Im2TYlRL(Ei0$uxu?%Z(_bdn&N<@We~3%SLk!%EUMbp6a(Aqf3vr
zcW5&M97jcb0>`a7J$s26-gdX*zkQ7Aly+iKgf>gJZ)W;0I4%-zpP};ff2$CaT=5$A
z?A`sw`RVCXWginbbhLkdIFU3Ix_o|GKuj{JNwXU|71jur+5;r>Vq=X*E}}t!1A6b>
zp0+D(<$)6k@Q-7BVYIwv0)+f6l9l(+UykLIbY5%PWDQM<i-fx<me8u!T1G--UCp!q
z(8EfipG`fV0zOX=zMmqf)D8VSJAJuJpU*D)cwlG6vbNEzs4(bkD}LlnHSiJ8pKWeh
zJREO%q%=&uFvIIHUztE;XS0Xrj9i%oud2!-`)vi{D*ZJbdz_LU&37llfkVNtgpGya
zvmyEJM1xqY2dTW}+&cNQyG2<iU7+R0K3EP^@P})x;Wyfr-SY9M`yx`SHxzw8h}4pd
zxU#G<?hel?{}0y|SWe<GEpn+Ch`lR_pc4j~DOeP_M5B<$L>2tC+7V{x1Qx0bBiwYs
z>V$2%P50W>9C$VwkIjo*;tL}<0dhk;OM<YS;lLU}U<gBAOR^<KOR~@C+>qr94I8|F
z@!#RYLen@Hckc)r;xef%$*M3LP&v84YCkN=?6DhA1&?w=*6+Z!sbG4NV5K)kFxU~e
zq89lAt<_1yTZ~WHGB%76%*4JZH{@9bEG9zM8Meb|K;8NY_BAhz(7FYu6DEk!fci$b
z%Hiynmm|a{aZHWM=nA%p2XhPuAMIeCPGDPoFtl$;EvRj=#@w20iNTUA2HZBm{~D?a
z7GFWr_cL-gknO985Qx0jP5(vp-vKjB1zS~v8D<il(l{@{xnbQri0F%-yZ?|3!Z?N6
zB@@XK>R1-Gar)H>)xQAN{RTt7j}L~L0ACXjHN>?`Pm6TOgO4R)8wZKCFo<1$Fn`I=
z(#E(mmRYf{Hz;zMI5(;m;Ij9s6b5v^Agi50|4p7X{rb1HCTfBvIr@EgFy27%MC3CI
z82nQK47Lc4A7~le{v2=<uCN+VJ-8d}_Bjyj2yRe9nxZ{hMb{)Q$@A<e49FVn`a%2P
zh7^Da<-qHNO*xl74a;Mk^6#dLbV?FVso0P$nHGja?6%8;i7<mb#DMdm>%VAmH%RVh
zggk~l<LB{{`)%eSkv#?e7gyE)xS)a=zxDseIP^cmwY~+N;m-d9L4w7ec1=16tz*$_
z%0hZJ;Cxy-<g#?Sbf!R>*=_lc{M21}V_c@ge||&2>MdpsVD&%$fsg%%2ENllXr+?C
zXAyALvjdx>yn)d_HvjV``CotaGF#K{W8+y36DICa{pT(e9ONf(>pZ{*XOyZ9S@X<)
z%9;NuR}TWfojdkl`Rc!nw4na4vnT&6GiHFxPt@RX!E5;+m)QUG8NUM>9SP1`e3xrh
z<{5!FhvR>q!@!A@)At;<PSE=t_AGDgBlaHHm-cr(vN!FoXEReVnSmF6S6@F;=0w`y
z6F-=Ev6Jm)5QQoJ5DD&oArt`jCki-LQ?6nB_CcM4dZC_VoH}Wcq>~tjU&6W^!AU3!
zPC`v^khZwSzs7khf@PnTOfW(|v3)Jk>#-qD1UJaW>k@=+xXn%kw&z3thlK}c<{mgZ
zXo*gtoB_e$P`&*(ZpOgz(j;q;>_39PK)azu)@ecFFi`xzMFQAE$$zc1E2%}YW42A#
z8Qw7Zuj&7Fl~biCFs@zZhh+bQ2sb%{TNuByIrt)*Drv96VcUC17@Qpfs12z8be3em
z*}N&?Dve_o)M|{^e{OgmO2ydD{%8%TIbelYkLS&RHKVg7_`Z~~kaa4s(}n+%<~JC?
z04y&7^BzDx2wPXnBScfg7)860EUC@WvR$@4-1q4cn-<Zdw<HtD@>c}7wMgt&q3?#Y
z7=+>M5`d?PH5%~11&?}t1()xWts!2qT(YH4Va>er-ut_~Pj>wta9ho2M_lV55&I-Z
z5W<b+&L_^UEZ9&NJZAvaz=LNFJZ^%)6N)gn!`HwQN+$)_7kGXY7&FceT4w{FJd2s8
zg!RtAqedOf+zYJdi|q1!{}SiYjr0-=d?Ma)tcGm0-7$dKRqpLX?tcQ$f6c1XBJY2|
zc`^pum_j>U``fg|o@ga>Z3T!I|LL>#T{E|lejQh<V?Pl6)%lxy15I7|Dwbu{nWZ~4
zK1VY;QWHvZJA`~?{Fl#C3BFP)J@380JQ;BX@&vv2Z1nrAa{d#jT8f4XxRwrc{ubz#
zwpIFD2F<m#O|5wwe9q9PeRdZ_`@yyzZoch)w{6k%5BtG(#vegetXXbElz95Cfk159
z>FE{*ku5CYrK!vAKKXdJ`zN$PC)u-C4vLdDw~Od3&xQct#6Qz)68Bs~KE*kW9JFKA
zt>gEV%2a~2Lu+-PsTgXhlZ{#C&RBl6Yr~0;8;|J_l~=lx7QI^(7Z4LCrqqc!2%6!w
zPH)bi_i3mr(tU4hoEs$pndnUd_7htBKLk!EN#9DN9+Eq^yhtRpWB;)l8lc5cef)MN
zq2TLVI|j8vgK=G|q}mNX@1S9~(I>l=q3kEfp~v6e9c!z8k0sUj)2RTwjXC;#MytO|
z(*w`Jb6%(H?bhf=voolqP7Y*C0Lt_Pdg5I6n&*!?SDrW~j)A;J{PCU)2S3L69zU0*
zH^^Tc4rEpA4xZ3b7hC8u*IPFjUiNM^tL@B&UZ_Vv`V2O2yu%+T&LHi?$`b@C&>N*c
z$nHP#4(fV>_OY^`ooj&SY@+JP+Tzr+si~GOrwjuDiYs$oZi701ncF94|0~iDf3g=G
zXHc)f+x_j$8H0xVv0&Dl;?ZYkM~}%%zTlxvm+PmYKuzFP)>Q192PNoZ-Cyv@U+t91
z474TlsY{~M^R#Yw>JMM^?layv-4)Tb?)XZfr^wTF4&v4PmGk|TB>6Pw-=j;2|IVIm
zm*Hra9?9`l;rV;)Ede=Kta_H9=1$js&WhhQl?5mq9%NNH{;FJ64_bi-?eJaKzfXQ1
z(YymLO%^2;XZ=!X-UWT*v=ue{wp5=<JcG3j6JM*Np4)?gqRW?WnkmlF`=oD?zNsy4
z9}y+{oUl`_wLlGG|Gz&cw;u5BrU(vv{7NQ7RrY*Ft(T&HCoEht9|~%Cr!ZI7u7={?
zVF5l6O^Hn!QP>Z0G<b3Ez5#sGm`Q3@Lkade76f{6yDA@TP4${qCGqzv7qU&G3N+q+
znEEuVb`G{!G#=sar7sAq5|+@p=P-Y2UHF;m)vn$}-Sk>daSnb3_(sii>0UrfblEk=
zvcH-FeD(fDuME+i-_j9ZU|>B;!@rq@bSfD$sw|iMI3Iw2Qw#YRM<lSz(XJBAj=NZa
z_YUY4kTkAVTTcCX?&?<_@AVn1Hnnn3H}ue?mgAp4o!8g8R=g=txyE{SLEFY(zX{A+
zoA3(xSn=jzaI{N3-3jFby%oLtk6dha^c>`L-gz`26u#HoEFICrohQ0_SDhUz06w|x
z-_A15e!Z+P45+Fb;Iv;{Fo*cUEai+<dk~Va5o;Fur<9~FbxJw4cWjF1h#c`}8yHyZ
z{%9UMlSQ0K(>KsGK?Gk*%N@D@CJKL9XQ(DY+jalF5vF6Gx)gc54XzE%WPdv=>e4TM
zxT7%;L}=|yU2B$k)YmgK=-S2uYtjFQZO~(**Ohv<_q4(3>GgXfpcxbVEz0!bxI9!-
zYu7R9Z1@Z@7uezVXGqjdq7^$NJ!=cuQGWYTe?8_dePvyMqW7HZ;*I1n8@w~qUb&O)
z(2BrTh?E$dVBDy;$K&|s__@!;<wez%v$u=4$JzN%t+xf?N~(gQ$CRxL|3>0v`pVlh
z_jfXt{2yBI{$U|fBItpr5&2<KKSUgdNhyUWAt;n6qe7shP~p&T`p`tK4Hf{Or6(&W
zIlkXsR)H5Mn$-sCon8-{fYb7&GboTikCimb64~>f=vP6xYaCd=$nA!x_&4$%!|yGw
zkwE!t9FN(D_d6J9l1m@{4MYDY(vf;+;?Eqos_Y<;VqllMM)!d><iBBZJ^m-ZeKO@7
zL*UQt21K-I$p@b|3>5RHJ@OgNsm8HSm8tq_G@w_EK8a`->o=h2Lzj!}bo^YXBAV}m
z-is^>Tx%!AM=;Fnh5A%H2zy5Gz2lE8cl?FQR5WNN%?l{<pga{R3oJY8rvmFg3r6LE
z_Lv=|CHap&uKP^S-})d7`M2m#N{`w|sC^#zJhHY=xi7!>EZnULN;*EQyGL%H{<ez+
zS6*bAu0H(i)ts{El5%{|d(<47ektCMCA&U&>$&2i+ZXdQ(l`z6qHK8=03_x3lkuFk
z?vY^V@jJYCw?pgkTfEOfX-=X1*B)q+A;lY@%{&U@UI$2j+5q~KEJpA&)DSPIE1D!e
zz<Gg7f`qySzj+^nkl4_W`nj6+dP%y=H}bgjM&0i}Dkp-<#~ngXT~t0`9w=~CgYusG
zc@$d*#9m_CfEyD5AxHMh>2S$e*TPOMavyf!+}}XPgA&na>yF}B!9bqt&eqru^kmwl
z2X4aiG0nNh0|fXFMRXFs=xamO^BD)#R_phyD^$H`g8LyT&Aq%%5l8mPnJwCfc-9nu
z&FyOR&!8(b*ZCGo{GJ6a3qe^&c4W=)ZEM0Z)+^~$+9yg|R3JX*j&>m6rb;AaQq~m|
z1(E7}i{8EURD}Y5wq~3Wn72K42eYNV-S98%jd(#adh8K05a87O8NCe>_7bha`oQT~
zXyJ;LgXJFi>3sVM7f?M&z$*}6brd-tdk3$VEv`rLDtChd{_0)?!u#QfCwMwXDA2Zf
z%UxcAZ+Jip<)2Hn&2l-wiS_CXy@OJk?NOTjI}n67qrCU?qfsQ(faF?>F|2w}^(}n=
z&Y}3EOBdtWdOa&=cJ^LS@@WX`nP|ix8YuNSi$9~e$IUP4$o{#^9ukwZc?4&u-}TxM
z$oN#@7k>?o)7EUT-`lMPwBF^DZ#uYwtlE-NFv15Y^K7k`Ktl5n|N7F^c>d|NmB;oD
zwQ%%ICTtj;s7~I3?{B-K)TF1cvfU-@aTaH}l$z}BhV_nf{0T>+8J&0w4eq=jT@u%X
zl_*j#FB_uI3OkaZM9syOOK{(I9V)^BUxdG=-ei6To`RUmL3n|nKi3P-79c5bEauO^
zL7U)zx6D9^MI(AF?bh5omZN=ELCGQWVHcSA8z{*5WWXQc%iJb#f=0xyFe#55yX0{2
zhVMf%{-#;Gl%qbHY;oKrcE8YK-&L?Z=Jvit?ZdvgGzJfB=<*kR_#J&pv6a|WzU?1-
zDM<cmP1qHRTyg`7yA&+w-co$pH5QhPZsr7RqevO@yK%LDlBjRTcXG-3eClLOH6vS$
zy-;KEIQwpEmS$V8ccz9-W;qm|_(<pN&3>%G>rGm8Dc&z|rGaD|*?}{PuvLQr{61pc
z3tUwsTnUvH@WIuCw6=t3URprIxT}#&aPdi3ko?(40z6ru1M#bCU7%h#cw$~<_}$w4
zKZlRoH9CO#+l<p6?P^u{M|ahj?}kN>pBRmtPmvI+*%N-ga;>o)sqYL_mwJ<y#^?U)
zRmpojr6(S(d_<eD^zAq*rKeVFc8YtUysvRG%Kxtaw%2YS1&|Xe&QTA^^|?JLEIqj@
zaFxI`?VS4f;}`{h2)82@x^@2kZK`UhztUE5cJ0dWaIePd#q?o6)%ez=o?WT&%Z0y#
z{ma1ZL(Ez8AGR%|VuGME?plj+eAjkPv#(=kM<%bgJMV|HtIqeqAtPJN>a6)*Mhr%2
z77o+wu2IL8kwW}IKq8TP?|`7Mzs*c;5iHzMpM|STpWKEBRt-oBIHUa*07T0i&}CQ9
zK&wzPGnp@Bp~ytrneeD(RHRiibsLnV4ZJ>ydJz4zX`YA75#v?t5UgJOXx(Ad_lU>P
zbSw`c^u+5OBuNDIcX#)0@Sj|oStG6VW~q}D<)&}#5}x!&=?8-&>FIiF#{`$dPX#(0
zvDTQUxc`2H4rtZM1jgbSM~_Coq$j4su_lkU1h=l72U{0pXDJ1JGp8Qstiw3HGaa=Y
z*Vy;qj!Ll!Z3WKEya`-4bzSIk3v>kAE(=dSRvb5nx04i<$gR3iTd)YO26frI3v=pI
zAktFA>D(U>JOG%VS0KBcZ1I9^s2yK9NQIV=>^{7f29C|1<3k9hZ(JJB6?nAEe)$5$
z{x}|+`5sa1vEt&9xYjh+vWUutaqy)Bm)rxZ?7}H=a7}c*^s9I4FWd;HQWR%$lfDSF
zoN%ffbQN6TYXxQ7uZTKq_cHZC1Tgynps{v)-V+TWu0=NLJ?he#*Ghj;kE>hzOlv8?
zgvu|qR8f-<#STXJ%^|;M8yo-h*#m^EsSBs>-@A}#8|vs<LHMe@w4p%_B3U*h&oNfm
z|I^o5;GE3a(g9W&8t}BYRg}yGTOaKa;RewO*@<OHE-ACZ6XK4?cpQ$0EF{;<cWP86
z8;ia5G_nP^gu6lA6u$<n)XZAO(cN22!8f~+&auCuJpsBuh$ybBV&lHQ)H|E#x_rU^
zj$X0OY6Eyp*)VjsQ}f5M8ovHJ!Vu^*JJi`#DXeP+Ym%2cC-sNsxQP_=P^Yi(Z?6eM
zp3vc!9}T}!%}^b|KVyhN;*$_S2;>Ii9`X&9JAHrnmz`B5nOe_})W@t-?L=c2*ApzS
z+~6?@_mH>4lhTW6u(r)dO0gxJ-E!a0(<uR&;DunHZFP@z2oo?%WMnLw!<L9kcjBs0
z>>+A}z@ez7k)#m`kg5BYqS=D$Gaq^Aq~RhOYB;JyuU0kDm^fR&Bqa*{T|OE!`H;hm
zQ#fHVAR!uR@cy-GRt?xm)E><>AXr6djYP;!7pWEDnp!*63`NUtEbzNXSYTMqWkeH-
z=`xCGa`hx#q)7;h>0j>VREUAbB?qh|SFd0F8UdcL(Y8K}7Y^s6X%IU7%*9#E>{%h<
zU%Ub|*2D$rx*%6q=jSj#Ok_-J!kP3GjK{d&`n<=>44$aQA{v>w%&+bYi1K}^nby$}
z_&<@`p2UJ>`M!P~ls_4j+Z6RxIy#W}Suh4q*(VhQ+bq$bLL4Ur%YcKa?;woa!(%M&
zT*~vCyZ3kRV0`{WdJyRs0XU?udI#>wY3{1l#!QPGmfKA`KQ307h2n!g=O^rj8XePU
zE~3+7`6L)I)r{~%ym9lJD981<!-api87f%UqfHLk(?plk=HV*oIZbd(`z<CpEmP%R
zZ01&5^1gYI44L&V9on69M?mAnSQidY--VBb;j_;SXtX5eQP!kJb+7-u={|tfb^AIF
z&jG~y`D!1twiNZw(^k1nDFpq@L_l;@Xo=N`&{2Y$^MhP(T*QSS%BSKg<WY<^deout
zX7)z2_rUq6bPf6eTiR&5y@`+>2vaP#grOotZx7$22mQASrH$TIFbDrM(;Ws1?(Ywt
zI3-1*V$Iym`;_cmPyR8nyFK<tw>7n3t<`mz3by<H4vb=#pCC5V*Y^}drruikIiI9J
zrN_RL`{@ke8w-u&L=Rt?N_yGinai_YR%V$amwu}?)*)^3O8TiBe`s0U_^(UgacA{c
zcWBdA<l%mHUff8FzkinPw<2ja#%!)W4Oy#J>|h2L2!J&~76j2wABHyb0A&!QNtFs<
zv3!9La4!FPS?%EoA~Bcm@}O|!S@TWdM8Mtl0jdGi02OerG}kMO)2-ncY`?fGtXhh%
zAjbRQ8s^;Pat<<eCn^KolPY<Nk!Y4oAW<-x&m7jydy5u~D-o@jB6T-s85r#hw1DFZ
z%nZg<KaE#cXGeKVzeqad+giu%%pMMgOcXWJSFS*L4zV>(us@@@YFjNI!pf#H+FQ|{
zO}Nlz#wjr<4(0UE2|-i${MaMiTpc(lav96i#VA>>eMk1hDn`?~Rz>|Or<mM~Aocow
zIt%?HZR?MO6_to_Q4WkMw14YRcT9?xkxT$uaZ`hr6@rZTX#RB|U8Aif;ONVsbnClp
zM8YH7AJ>|G$7y@sC2c0h3ngBI#}x=(0}c%J$gcJhdBQQyOlT<#v0Q~e&y=(;=gWBV
zeQ0`hwoJ@M2`%XcM#FpPB&&$y*qD^WPz((g0T;G?Y!v5s#qyp`*z&3vIJveniO_cW
zd`RYs3U$=iUE$QQ*J5Zn5g6CaVVY1D74S7Qv~{au@e40k=zwtEt{K&`!F)Z5WOXEs
zz%HsUnzG;C0eBS#)=A(6=pw~6Di`oR>DA8$tP9jCu}tx!SA9(V+^aYQF+l&K{U@z-
z*7s+=Ey_gi>RAD`qTz#kBkPM2zi^j{<i6wm(WJG8m~V@KW6el851rZV)|SB1rk#mR
z6u&7j&_pAZ{;E;4A5kpM7SD&wgo&3h+gplC^!7uBA$2Y=sR-AfjNhm*STR|?P`EEb
zRbTh@ng!JoV%a%RES{-uG?l@=!rv?)y~@rL5elV$;W+r`uBzTS26XA3OK(4)*4A7{
z!oTG}4yQvcKbOicFTNZPd~c|%AKkwsR!HvgroO=C?r&o+ccB^&MJ*YvF5)zZeKa)d
zGH(^wIJ_|?-EJh+hJI9Q{F(;YsGTmHuG?Z=_?tKRZGCPf+IY#Bs-CmB^5MdCVc_b2
ztGBn$Grk=x#0;(=de;A5>`G>qz8I6xXs$Rq)tar)ee7KW4_inNof(C+R#@CK9?|~g
z_q%bL_@_8F2fH~U!YaKT1UyNa@AvwepK02=7Niljql8eA8601WM;o<)YAiy`!ZC2Q
zGzmFA_P3tT>-!LyQ0o}fe~3*KmHbZP#K?1U5^#qj_)uXmMGVZeurZjPb$Oz5+E9zc
z2Z&H&QF4F8U>-mfLH$KPBTbMgd7(sw5B#T)nJUWkh&%JQ%=2p^#VD1uR-y6cT1E=p
z3pWZXx_B=uwwIrkx!cP+<y5>5&m;yzG3n2=Wn_q0dck^U?X9+1TLuR^dh6z`pQqPE
z9_GjXpG};tHU7=AU*7(KrY1BF@mcHZOF=e{?Sh~#m?hvVh#~dMV+KeQ-{(^h(q&4=
zttsLTw9@+O`{vJB)olJLNN!9a`-Q+FXLWb#7fy5XvN;I!UjC=)ef(g8ihnRyJ&?0a
zipfu3=+-RLTyFn%#QFyS(x<L^l0&x0ZtX%GMUSZ4*Aj+auwiJNveqA&uizh7(5Gpv
zruhAxNIpID*g!tl(HO`0bs<y@uD1tq;VL18L^@6yn(H?oe&G7a^Qst3;q>KOr+_!Q
z^X9|&(|9W<&zd|&r>3fYc;d)Lv##()*XFE^;iDkC9<sq8@naW+pP?DZqWnws!XbX+
z+at(8ufaG;KIZ<laFGCtpXn<`;?X&f$o1=>5+h6eq-Ew8R=!BPAjp8Rk)7P7yC4D2
zuNJ9I8X8oSh2P_;W9?Q&srM1)o`n}onXUwNR-cMOnpTcnC);z+zM~}JVDe4LT$qmk
zvQ3!!x8htbomhc1pME8-{Y)A_Sb=3O%^9bh;?6rNitU<!M5bs&VW@@U)@c5OG=!A+
zd$Z|tYw0-_v5$b(^y$VhRC~*o@AJ%!5S0A-apSMZpa(KNa#|V3z4vttKJV_{S!?St
zsg^?C)M?eA)g%**YTmnVZmr&W)L+`_VHaIZswKS+3;w2<+N}H-+I05xhjiGNeB-`z
zI{xmPqgMc%gno!*!4waWh(*#<Svr6F_P+b9r_5^jRlsH?ECkalDCI0|{B~xlKjVCv
zsj=l3>r`m|e(;bhQV9aqP1+ozjp|$m2mgS^Y8W3Zw4~#kU3&P#WRrRG%0N*`>cipS
z>q|1i>p;KE_pOq(9=Gv=CJSyZBJORI<3tlTEMvH~{4yV3Hs!SYsV@=#&gN9C`tgb6
z+=n3tP74Q&f8BgPz8UBoBiPnZrn`Xu)WXd=ueIQ0*dgAj1ROa37R03IWi#IFZlsys
zP{jT>%jb<%;~=~hluvJmW0l``-PX^}<=?Nd_pN7y+*m*#vz}xUqgSr(hlJnco`faY
zsI{qvHKnO`N=PjqY&^5$TZvCKs<|yzLAquV^T4wi(4BXZi*8VreUK$ePD5glX4TG!
z{L(=rzNu3<btCX300{g#BK}v<iJ@pCh4pJzQ#oX)+xWLur)Cm4>rkNS&2O<>kAHa=
zwS1Lx7On`H5q{X%tguF>pLZBv-`DGl)e^5}ZV^+%HQ|J*z0$jb_esgNH-Hi7bYpEl
z+Euu=+7afMoEYm0EAUeeatR4sVWBd~*2tI|T3CcO@$a<+q)F-4`Onx1F=tFq*kT~_
z9V672O?cELhKVDELQw~cBTwG#@0Gn&VVmn$JyB1TGNs)1KNo*$OjM9jX$;OYrdMdh
z#SkfFe=I4F9m7^7E0~*O>r^_s>7iUC*YhMKF4t)ezDh~-)OOyo*OiZ^w`xw}#bdlg
zA!-aYbD5I4nG!3DrA9s(AsPlVd*LkV@iw>Bf$!*bR@ZelgLUuvWN}g?w-p&3>;sf(
zD1&2ffLP`?RcJ@*VovLI=LYnI<xx8TepJ|{$J_R_BM9CJ8fbbI(DUqU0rq){ouIg5
zq|Y>j*mnhi!JKu?-*Dbp$}}n5P4i}p0~_8skxKbz%5%KVe`4HqzV_S)dR8oRYf7H+
z6c<B)`ojTxXW=?KLh!UjV~NEvMQTPoRF&SdWqY{8^cdBKeu5lPxQLw$Iq}lXxOfs@
z>9QW3u6Vd^>M#<lv{Fo4CgpWcx6I00?J}31IV`DbDsONlnh#vRDS#w2(DJ8RwUJEG
zn+H5ME8|U<O>g_~;o#O#0J%m6=fpnrVUWr%sJOe$9^wpPpCnsB!A-+8W?sIGM&%51
z&*0^FC#Kh8E_Al_!uYYn1+o)9WZwtm2x;&Hum;bM&gGNy=96ORh>Zv>JIGe;_k`(H
z|Lq1}KDqrvJe;boV5t9@M!c)K3K^7ul}dLkb>wK-0?F(1Xg!V~MTbgp?5Qe{55KyT
zw5-*e3`HIFJe;;R?w&WZ=Q2tMk;(a^FSha9M)qnN<Z)uKOyMJ+gjo`C$}fyxMNEF4
z&^T#Up4nR#S}@-i8=ff%P{wgL^yO?QsYfa)TR{7xhjo}4X`KZMrKOpxMa*}o81U8I
zAUVr!wxXoIHXLT|+Kmu{*{+R9LVEg35&9jZD{(nGp3BYIWw%X9*%xQC1mY_g)VO>5
zoYEsgfmxP)bm`MP<_1+4jNsju637utnKm5B;1u-j)@bU<TPM6kUO0z+q+#}iJy}Vg
zK7w1MhTUERg91ALE)n9cUWMqBQr+1mJ;F;f7VQSc1w3sUjUUHB&v>S44f!S|CB@6>
z8r#cudfJXii8;o-ZoJH_^!bA3_N>%q!%9lS*#m|)3N~;5#v)d)L%We->`9gz{mk9e
zM_*0NBEp~{LeoT!*Z&MWs(BU}2V@y7SPo0=!?q`o(*{KL4rX=|Btb&8#^EB@J&!Zr
z6w|T8{#A0adS&0_|DKv=EMoucScU!DrXGftp#W%pMhfFdlJ_kkDEyP4-I=wx(ODgO
zfRY&GghCBtqC;R7lX3sthp_adB_J+=d|VjX2w<AuB>d}=EnTg{H{jRcso*`S{cp1|
zv@mB__dD?@9H+sjUX-e8jL&;qH?Gs++#j@Yw32KU$&Z)z_{<@YA*{l9)5pnO18pR4
z%M09o60vggEe%4<{kOCuih6_y5|5C?OOkuKG?<X_>tUpQtAAN?a^%<12>w)1V%`E*
z@b{Pl0K%AUjASv5^}_t2mt-Fw1O!EO*@71#3F6qCL-Du$%-#D|t36eJ0=jvr1@8cU
zK2rL=fiMzIEz31&8K2c|myu}n^&%-ugwxy!rfGPfKLllyX)vOPA@4LLZezI4i}7(o
zcRRWZT6xAF=0J0_DayyL^G*aGJxMnvKTU(RS&7>H4#(jjf|a_4rp*MBc<?KNihN9@
z4kMK72Vf()al~5lyss$``YSb+2(Ac%;hnO~+;sKCk_!GRBbhXrJH+oVUl9+CN&m<~
z>SI8DgTRHo0rCAhob2?zvNq@a_WAHhH<h)NzLd^-^pBgBf(;a;0WBU@%7XfH`|m6v
z)gLO18z+pZq}aR}LA!pZS{|cTBq6+$zWU58mkvmtsVrRXk$@Q51WK%7`Z>!Lbm{Dh
zcFBAtWqjS+&<X}|x-hDGEBnF}56rfo9N!?xUd4O5HF~?^Wa77ebtppLDL(}#{uGEI
z?wL@I8l8$GmM&%?=Ki68#E$4I?~9aBCB}l*7#(hE(__8o$RVzNWEAkzCoD(78CITy
zfq+joem-s!^0!x-^C{(Dc8j~T09$gRwe&Cv%LncJiFqE$B=)OQIcijS#8h*h@BQ+e
zYtrC<o!`+M)Or_Pb20`s7Sgb}R`l9)LuEV3!0hfXNdr2?FlV$vzCp7dT##Amx#DX?
z2)|<5lx3s6!UmR=6y~!38~;w;fO~jQ_@n){o0}S&ak1>jvvuPP`_rZP<f_0MS#%6R
zPp)qf)j(Yaf@Y<#9!fdT5$DLhth!oZDXp=2UNh7t&^g}`%aWRnHINo(+JS79`%S~}
z%bA>wREJ~%heMTqqTt8sM0t7c0AWA)ud?{ggZPf$tm(PJ4%L6{p<^&wjflAYF)<<>
zY3^U%E<KU0H;tR|PtFkH7y(GjSYod_cEm{a6v=8Hq>|5_2y2MIENu5jffNEoJpEXw
z0yb*pJPS2cU}}eg5DM4P9H@n4t;0JF(F=QnzTDGDqIztj)*v+Rl;o;hJSwdR!N9a(
ze>bElf6m6et?dv|HN>gXxJq$<(rHVL>Fld9)}r$wZkP}jo~{tGtRJkl5({P$)^wLa
z)p^bR8!nPwA<klNT%!}IElQ6@-*xZP2(ciUcM)vTPVI^$gMrm}zyo!j=lC(lOsJG#
z;Hi$+Afx{>@J}l#mQ4bR-5B9}d;oP@?kzNaAkIfWeHSYF!+=uRnXQQtP|F%c9T0%d
zYX<@Ael0K087U=?>t@e7GQmg{eR#z_1AmMuxxtxLQP{?JTKiF$A_n*&;k-m7lsOY2
znK2Nvd05Cq`hz~KwzoD(mgOavq=D!3wod8~;0Im)(mX0#?c2(^mJ3EgdvGAd48Qe=
zg&38zvQfBJ#y)%yFU06@gug@l=}X=Bl!mkMjCg#Pd<Dfk*GrxJv#jvK@A|SBZpCQu
z&h#U3vf337i|7~5l=()qK<Q}|X?{z+@Y881GUUx-^HTg)0iFgx`Q#$A+o4IPW)M6k
z(Svx1sIQCCwB^6<tWy45(Z%Lcr}D31Bh2i-t%NM;y9;zQAm?%DA*Q!3V5bKLqsg?*
zO1pt=`9%yTX@A>|Dw0L#LSQF?7QWVHFP%NO>MUcat8dKiE>zhi*_{2Fx2U%Hgo$na
zt^>D1)3R)y2{#cHSUB$%X)Z9-T0>Xm)=<@xtv8Lf`BFRn_Rr`RibVEdSq`uAg8mrU
z;<5va#!|M1HGGz{J`S+{DpgDCRgH4eX<r}4$C_oY?bmwW3RLIxO<hyTsU=SJ{xgkX
zQPk(*6xyzpke!aaRC%J;whk=d_ATL^&8f9-=8(;kG$@K`e)7S1@Qy7aHko@Bo2~jX
z`}YsbIX-dCCDcZo&E8W5I$00>>aBVKTRL0qA6kg7i7O&b3)L8ZI^}6ZWU#e@^3EsS
zwPn|lldwN#!Z6a>pK0rUVKh@Ev}0WSa`Vin)Y9@-6fP|KO4v)hpcxI9(CcZJd`tiB
zMkQ*@p*zG)m7wHuonR7Xrt<MaVu(0^ArA^7KXok~lqDRVeT}fO$y{(JNKxWGSa|)C
zbJ%oHS^0A0zdD%(;wBJW-J_<3Wgjk8P4O1=G=p&n{o1OME>jbcG5MU9COWAnp*(8G
zE7?%yk=Z(OM~7BOJYqJuN<iRowYgEl8gD06?9Ne~*q6SMr`DhhSB#Y8x`$owF{YuF
zN?yC;v^anOD8;p({z8EBO@#xKDk6r12ye5DnO9^5aavi@JlCDSrk~1QG2^TNFRsdj
zZ3$=aFNP<Rh)Q-<x5%}LYGs_}3wVn?@m!!Sai5z=T2RnVttUscf50)Znns#BLAlU3
zBc(KX+1*uqUZRCcJ}C!M`v7t)!nIw?R{rL*R%L*C6*PjZO?4nupl2%mmzFH5WO17s
z2i$P@jSPg@V&UM(x>x#MT{44HRE;_td_41tSe+s&fVmTc3C<ha+^<&B)ug)&v@Qa%
zl0W-!Noss4bOF6Z&|?pB34W9#mDa02zj{yT<zbbl4%oM=kNNG6xZ96Z>MY_xHS4Sd
zV3j9LCaeMu+_SA$txqHg&#_z}kJM}bm`cj?7MND=A(<E^eA#JYp-M2aWjd$+z1}m7
zOp7?;zCT11$MsDUH^z@`m<W+1BC|<<6^?w$eFl#krm#D>byR}hv8SF(YO!*sOq#`)
z2%#uM7Td6IMJz2mOq^R!y-h|Fpq9$fG2K*>+@^Z25@qwxi;YOVoCg7AqBu8XG~;j%
z03~l)e(~yZ;p8zSk5Msdy*TPzs@OSNVCEBr<1nJ^rL_{XA(a*IconE237>5C(+~kC
z1W-(VIcp@UBRPE6!)nvCIlw^=u#tD=CmXh3Y2gx0qBc`j7yYw~*4XLb0L0*aF=Hh;
z!t$n+e%pmhhtgEquiaKxc=qvma?QiQB(Np)2@a{Jh_<Lj^ZUCjORBd^>2U*n{nc<x
z!wTTy+uWi&_KWJA>B7kr_MF@OHw*HkD}*UiZHQ`n_#LWf<jpfas_f%5o@l}qsg%kW
zu{>sYF(X1x-Q(O1GdZY%#jjgyV>9s4EF=4N-&YP2@L=SV)0YdeH$Q80z|T-k{HoO9
zRI=Jp5IQr?f~3KgXeE87W@SPKZ{L--fKM3xI{D{&Y>bUh>0E=1^1Lclo5eTKII@ka
zH05ZsNEMQZ>>S=PIf_r#q-~Q{aHR`fvv~SsHX0>Jwa=#Z;9S7CsHQr8Rx?Bn%Q#OU
zF`jXhas&S&V4C44@pl%_kz~CG+c{L`?^d(&_IyJp&#=-*KBVQsCD^;u5x7KQSfY@m
z_j_vDqSlN$`(9Wu-Qdl>e$pYco=Vqqh~iM28YA(Y1+z$&JJ#eVi&BsxCki{RXGGT;
zw`YS_ch3pF?TBHLh>@&}@4k~Ai2Wn;aT?XlMqRF8%%){W%1uX;%BXt1dVi2u1-QLK
zsN+O}jh7LLH)_LLkf$Y%JNKLpUi6L8fkOIPBkWFR-mI^1gu%>9-#`!8RL?n)wgecT
z?cSqfv(d63n_~7gw+=>HV$8Y$e$6AFr{bpTs&-*o*LDkJV7Ll!?|wO+5d%O3oD{0N
zjNBjPZ49O2M6#_CwdR$Ti$vSfK$L=)&q2kR;L5PU*bhs_6MjWc7zqU5Zw-+3$W5LA
zPHLI)RbWZs#aT%IDxm@cJB6Z2Aj+r%mvm>8b)n~GkcDC_eOiXGb(Q#Z`4TaL7L^=?
z{{FP9P1dU8wJF=b*tM4Xmv&+Kmusl^9K(-`Qn@^6-};)JOJ$6dh}^Er{B1I@jl-=2
z_F4{QldHjRafX2H)Kz&=(bML*61K7PPmhH;sHF6xYt9G4tnEoqr}0g2O`~D^W2^G$
z5i~hDwfBMY&!$57e*lR<cE9iJ-2@m}Psb*z-mtB+|HdklU-!1Z0#c>l+Fa#vS6$5Z
z!iUD}6PK^Q^5Hi7@C%>iAPYL%?6sNWEyi>~JU!5ak7u|Z&}JZ*ut75cqE|fVHZ<Eq
zs7fhLHh}g=Qfv945CzR<-=e(<Vpef?iVggBlMb457<4@D9>(!)5PI#y_Av8JqNJPY
z?GDGyG%}?vk+jD~ib9prZ@+>uD=VW;*Jk)lRFT1ft^M7-n+AKf^lrLYHOukJbFKD`
z+pctfi53<NqHQA`vc$Hy_P*5&w|fX{s?#;pI2~%?*GbOZQhlmF7y1uu9<Xquctk=y
z(Rx}>fXw{;kLIT4wsY@)oQ+?x<9|9&fK2!PN6Ydy$NQfxZEfe?|2iwbKgjzZ7KYU;
z49kB4WZPDg{3;1Pao~6bgc68{K<Hb%1v1dp+0z5w#-UgO-;)?Q)z?9o{o&wz#yI~W
zlRwD&q5a!>pnL5vPOwbHPk!)rfc6B$LezpK%+MHbuj6s%U7=V!G{m=MjgE!naWvYO
zOT*x&qTx(@JP;dBMBi-IE~-tBM4>5s0gQkXtP2K+LxifXzGly8TrTNrs+t>`s;*hP
zBT%+_J*MpE2d@?9v!+S{TvW?F9+Vg22CE=f8Cz*G-?>Jzp~#c2sp{^%zO#4h76@ou
z8z@`oRRGptM<765p*@0P6&NT3A#B*htB=jqwX$nguU>7|EUKjvgEi(}GXs+IqFO*s
zV02d3;-e|S03GghLJpK=Vj#F;ih!_6IwOyAit4uKKRSZ{UmGm)`I6~g>wJ};=3H{_
zY0f{2U$N`|>8JpvvHxA(wu0<`SG2BZZf$B=f$M*B(>eM7to){x{}p-lPyx_ZK{SPX
zjb_Ls;%nt0-E;<*8Vjj}+7R!zNb^2SCIYmep`bbet!+2;Y+1jqXTZ9#!t@r^=}4XU
zu8%~=fB=hYSw=${4T7b@E5my&@s(?jtTA%Lv1F{y#?B2r>-yKxP4*f@?apB6(6HFf
z5n2tdD?YtRk<oNI$-2WZ=|1=*d@`&{&@AzyG_C=~|HR9qs{%@x=0&nPy}D1qPNpJw
znjG(`%<iZ%s{&#rHLF6YFh1-}I#_XyC6L!-__o2(k=r3)2W9{vocg3r<>QhC*gDkB
zQjKona0GgDok*mh1*N`4Bx3C}DO5x|ZI87!uH}n{l^#Qj!zjfLn4-nDCa<%gqZLMK
zSBMW`MnoSpx`F@*jS9qC(mu$B+gol`zJ6^gy$b=0q^zQFy*zp}IvR3A1PC7_faVfR
zqHq8`Za7e;fy4j2Pym~=IXVh-d%E7xF=7l4VTY61L`Lr_*%2lesM^e4E05R3*^th2
z4vbPAsquQ-3pX88sD`^BKR2A2LxI;+OH@@2-ZrE)ysRw0%~pF6wV^14a}yPbUpJG2
zj=VZh>JCS5sOA9Dh<Aoh1=}sHpBi@w(54Wy@R&P58@mFS2`qm!?lwLYVJ;y@jSUBS
zj}Akl{|iv39)f~uOF2Fo66(VSjNe!?O@=8V$c~Y+Ale|85OCNif(A|`;i2O=get@F
z`(`i9O-Fa)U0$`eei4DNRz!EIeu3{oBwGQB?)2)G)dK>qonVXRVm=XpSw)d};y)QK
zfLfwQG}@R-g~kgZoO=!j8qx}6FW&_dO^u-1piBg`79a(jz5#-k6<d2PdHxKtm+M|}
z7XY?e$A{uY)SB?T=t^6u9d+hOWh>Xc*7*=7^3beN;F(C_iuZW=n=4HqnoY52bc37=
zdDJu@Ujg#Kfa!nFd9Y3c?_geZ(tx$r$%k5{iHHn`Mq}}D8O^KA;HaJSV}>f7;Stnd
zR8Mj^_)I)d4GJZSBH}fQLLpQk8A%>mDntNRNC8(hQXx98Wc8)aZUl?qgY@1zH9lx{
zqXvShPhL$F6X1c-7Ph<N7ow3DdYMb`e}z#LRDiM)DE!YIpH}x`enMM|<Ixu$Hzt^|
z19*c}GVY3Rv`HrlWzcn|@X`fbgcAVxCJtntSl|%`AzTrMp8|_8fRTu!7>6V}NbzRv
z8gu>igRO(jL7f7mKz>3ymWYMM626Ggdqxb812DpE9&B<UWpZtDBTjN@6LpU6^q^0Q
z@YsRKF@|13EaS{n14nipv274+q$Lm3s(eU|o}eZim0sP5=%b<)F_TMyz0Jj~#xq@u
zi&%+0tKl{Zg_mR}s3KP98LiFJtju1;yEUZ1672+2#A-d0wR*ah+N*fCR=6$M0dFMq
zB&R@c0EQb;RT?~V349V^)xZ;hr1GoWDNWm8la11ZM?#66Q6(_N<5@uuYt@N7I`@SC
z1))H^ISX?rsi=%^&4P$wK|arjXhaJ^9$06Wb7wmG+qF}L0lr(0(&85y8ygIdM#xlB
ztyUP|&HX5lqY+Q>6{@&FnEvmMMKU9D&5A_FEKg!=EMa41%qnImWT2~RXEb7yq0k5f
zD?-K8ne<Jih;J-Jd@^vw3E|2{;X>I%-`EWv_$m9VP+tNkIW}hTkr3eBj|v|Ad3NOL
zbz@_3amTG0vcAO{NqCEPgDJZz1Xlf>ux5^Fp}2`N2(20&(`7ObjHfDv#(*P0-VZ&Z
zj5?Ei5_p}da&sOmg$i*)3W*ao#I++80f9kKXrCt?xWLGl4zyk?L~?KlyoS?Ug8!?r
zq@cosNohPra&^DOyLxM|aNMY)ZM<EWXpSCMWvL0cOr_0YuYw^1bQpOo)0#D|Jd+#M
zYFvVx_7b5UZ*FL|9$QUOOruVy@=y-CHxnKaZI&FUg@8e>timB#dznkITCQBA#zt!W
zI|_tS_T<j7u<jQ|YDf&I|1jz%tv}I!vz12X7D=SnZqBAK(#iK(2<f)sDq0|hYi!JO
zsr7)A$Mb0jH_aLW-L=bm;T_J#ZL9CFni9jJp9=CMI^q|@m*k~)=_y;u`D>#WgbDFf
zfo8d>*=^daHr;p*fS?;n^F9h+8);TmnRO_7R?$_9W$Lk84@1i`QvBgKtr7b%H#y~(
zn~}vx>?#u=?4uwjfX0XDv?-cO)iy(Yv<-^nuw{N?MnahoU4j*j*vMyDmEGE52O1mf
zKzj$0u&bm=g4ps>HVHhPWGWg$O0f~kL43>N17pI>afjS<gCN?2Q!A06Xl1UppkC8n
zX0k9~b+>BhD9)o~IwJ{yqAZRju#7k^nOa}eC_-P<%E(5dV6pa<O=D}3akW+qYt}Kp
zr>%B~N~;@f0J&!y^{!RJ(r6nOMG)wbJb*3A^)x#DLvP)lk)O-{e>yx8jhrFVPx{T+
z{<C=ne4n%bJR83v#{YCoKj}B!`Hz-Xr~M~<p0ocvE5Ea0|EVlH9hlV+woF3V9<IG1
zyoSsZbsJA*;Hj)TPtlh1tk`BkA?fV!aBPp6%EqJVK(*O!R+F*Zj12wC*mBd=n(&ki
z{5m#5$J$Lqp@di6&M$P%&hL-<R|Ngfo3%47V;cJ3v}_rk|7%;;w!EnoNI%g3*4E}_
z=ji|0_<7j>h47ZjEZMtNG1&%UmeNe|3tQ_=BP|v<V&^UTc}tigj{{FNVUNY2t4IOv
zc_2`VB<oRY!uIfunPeseRJc0Z2x1S*JS23mW`)w=T{0?!(*H@x3@-B@{~y$JL8mPM
z%>4XUQ!^w#$N!&&Uoqo<x&pv7^uM*aZH1Hnhn3(Q{XZMOvw!{znPo(=`c$53KUxng
zX+7#+>d71;ux0)02Uy)FPmw3_GPLw~IujjjNS<j)A<)&iZewTvz#2POZR38)D&AEZ
z(4>Najq4%ObR<eghJ*u2+=P*a;sg@g@h{6Gk5G+{r{5eO6gMW-*N5@gcRH9Ssd%Nw
z8X{jV8j7FXoM66e1H-}6jXLD-o-t42sRyZIwLH^glXM^wgo9{!Bx&NFj+so*<sUUt
zm_gL3F_U;&2^x{k4gu#<UALpEp|N^LRo$XD1%ds*H0^mv24GQqf+o_{yjVn_OoZUh
z4?x^D(1{(NFx$-fh}oiQQV%T7js)HdH=K;ele_U4z-VZkl)>guG#tX3#xn9Ylr)*&
zq|N5#4Gqm#w^tP^rq^tS5rL%j%~#WzH{c`Iph~0x>yJ6}CQE%*hp|GzXlbf%fe)TL
zW=o3(cgS3}e8tL!s{;Y|nbz(x17T?O`Bu~sHD`VEM}lNZoZCendgH@K?KUivlTznA
z0|6_A{DN%qRo-;N&YjQ(b+v@K+GJ^3Vyr`&29cR0Y4{X`!9}PRva~vSCs6+MLCsuN
zhnc%1zm!))S|4x|*JUEr^V*XFf9PfI+|T1z4F6}FKc`T}H2hy%^NQty|8Hx>vw(Ob
z!18nY-?Q=)k1v{4P8wYm5I01ZRpn_1#O=*=d5e}MT@{dGy!~kg;3gNrvb?bVjQs-X
zC|$j7R&9zWf$P$%p<Q8Gbw<k+cS4e>NtKz^jZUfp;zIv5Ra@y2Z4r*=8<97VXDV$S
zEHBv}<EvK#ssLgFs?4FXDrv&ufKS2Zt;r#?d!rdjr<36rT?I%N(>lsAYb~o2eB3?g
zQt+!XG6R`VB7&>bt34iM!&YS`%Bn`k#@J(Z*9P^NI$f+78cSsp$Tq1;RgtzzzUolb
zTve7oAY>U=Wk#q3_Doko#IuAZtpq+_iZ>*PdaEMQ0I$mIEu$jIGhK<jr&;1?bhOGe
zN4Z2ihF3clP1W~xn*9R<bkP%Dp1*m0aN3HDo?=B7n)>Mv3u{yj$+El3+*4MiC#>Dn
zw=D>Q$r!$_Q58@H2rO5H)A<odiw2TAwQ7jApw5mijYxysx=q5X({6(WG9#Blx-x^W
zX|7L4v8pir0%dXFiBMX(EjnJ$k1UUcVySeUxF-We2D}>=*jajvnam;RSatgvQ$JWk
zZ_}IMEc9+76CbB}YwoCmdhMvfqDAGfNCbg{<pd$lR>F{t4vpg_m9^p;YNQd!<z*AG
zOj=li42Kv15cX&zT4%-OeU^Na<G8%6Un?O{)|;e@D)A;px=kc~o0TIL)&`I|5vbs3
z7R`VZ&IHN;XLe(L{Q?qbEZ-2xLPy}t(hNnNw#XqxIfurrR^2;1#uux~=kD45CHU$P
zFA}q=%(6I7bn@v)=|07Jtlni)!jQOy6xW>eIH1^`hCta??%O&O1b%3@3(A@jW1}&;
zJ3b8~7>@?Z7+2CLXOE1J!RTsPWLyZOO1=pl1gRk$D1kGCzVN+<u6L{N?FMOXV5>DV
zRmYwa3;y80V)%b0(4Ar#h57##ZLQ6c|6f5De>Sx=uV_8D|94h?xc_Hfi}#}A6L#+R
zsoUS|YYXV_`zemoQj7_k^ma}sK;R>n$hk(MyQ1;r7}1ttEndDBD((m0gE#u(vjDWb
z?8FO_W2^+xfr?jSaBqT$<4{z-?=zMiio+5VNsfxOc6XE(H+)YutQ6kicqlfS#sw#q
zX$XjWJzm8}{p>+7^_*cyJ~2!^op){b_Q7qv+Xgx}s>krGpxW(0ldt18@e^<RB=h9m
zp;Q6@@eSJ91YM6Tul8<qsh)UXk;`?vwzbH&gv>SO3}nSs)5GPrhydy~{K(fMnm29f
zqx)BGZfIyI2B7xP8iFk@ONK0d#fwAYwR>Qc>qqK@D%o`WT%G`|stc$yw)OQArk>l%
zggeSO2-t1710bJ@<0Au+2(Vf^63#O1KlKK^v;6w&vC?X@J+IQvR|YxXjzmEzyIn!g
z)(Iu|8oo+aLAL~Dk+f(jS%My_Q`&JmY$YOVkWWuO7HxKsU#*rVusVE3tGjor){jyI
zHLaHdZ)vUx%8b*Cqq2lZ9bw9tTL~1bm(Vm_2@#hPda(pLO=d5^6cXo*3g~qyU=#}g
zLBt8)EG%HosN7MPa`#}lB&ysbkmY7B*HdiNi<EMrRWeSu)0Pei$P5HTF~?6|Mx=sI
zQ%qDaPDN6r%%>|X5|AR~#T^Mbe`hY?S<^dSC->3T=rRmX4U*Ax)Hrn+jk2Grm`0gT
zK~fW4WSUI@Ja(g<Yzp907_`!fBNoJ6(DRj^kMK3!O!imPt(<r=rvQ}Z;-EAahSE|T
zl$OF!T8o3yS{O=OaZuU{Ls?cFl;wq?EH4hq%EC}q6bI$%!cbNgInvV%_0>g!nP!xl
zi$ve%Wz)bZ7J*kx1E)w7Zl0!>n~Ox^mg(jtp>fkHe2^H=0T%TxCv{c;7buir$lCKQ
z>-skh_HF?vbtZoDvaGzoAOt3yPqF~iK?I^`lz7V~9u1|$W4}eeVUrI4V`uONj-ZnG
z!c5-tLHJ@RtTXPfTGSQ)a-I9Hn0Kk_Y>&}c&-w8~wT0_1U%qK6_URk<hi{6m0w)Mq
zg+Qs1?3ihL`79kg{ZXfNOL^<pYHw%*N}H?fv9@{Poy`7-wo3w%YCL0ETJek~(V<Ws
z#|lLw)9xLM*Qt0$@})W6kdf!HMNha5&rIeNjgF<v;P+GHbj$Bi@SA*8*DaQ8Ra~Qm
zskT>cj`q&Z1H##xUE{p-@@R41Cf7Lcyfl>VSt-gpCtWJ)nh|5X^Dzc+&iuJ)?VdSD
zhw~L^389YX0C2P;yA!*JEmfI{M=*K^*3QueX(@A31yaNm$Ho?>8$|<Z&I2SVOVJ=&
z@<7mBC^~;zUVfepMFUu#2Y{zU(EzS?>CLg30BFwdMw%YQ18B_yz}HR{n}1neetG{`
zu@F||H-Y9v@pzDrlnh}smT;pV92eRn<8Zc-^jGLzPg9cx`KXMEuTYu%5EU~oI5FeV
zM6G<ru5VrcKxaQV8?IZosdEsmz=P&BW`St4-6J+Aig3hU-`PLVy`|Udq8gf#C`-$C
zNlAHIxS7a!m?quz$*n1%@eZ16@sTmCm^?nTBO#vVDwM65B1ri?(*igXfw$6E6;+<5
z8+R!c%k(T&&$_BGHW5;5sDvzYOd?|bpwVKpdj$8VTq{lZAG<lft{yez^h=lCJY79)
zGwcjj*gkV-P;*Ddf=!sogQnCd8*^>v!*s^Z9%EmW?Xg~cJJaWOxy$kDc;Uz1dW$J!
zB@V1#E8lsS!=lGo@4re8PLH*}0q>n6G&&U9nT=(}-HKo=;Fm@%5aQOLLXGt39{r(R
z>VRUp3JMNL8eAZ8aFlMhsTB|R6-pfB@%J7<`6RN5C5r=(9Ofj)0lHI#6~%+}L@r2s
zEFx=TyhM(#=wE)NmPA1gSdzB7pSrhk$r7_;C>4u}r@*l|(yH$YrDCKUUeXxICXlX+
z_(6{6B4{or2#Fb}kUhDcR;VVIid}+1J<Y@_TTMoi^oVnGbS#+)rN;H=jFrAwzLsk}
z#VqTGnqKM=w`z+jaaE9KUKP+E23G*gC3u^fsu?|h%asGU+zgRM+ziRr!9x-Gye{Pp
zIA61XJbb~TYe`Gc;f2+sCcF7+-!-N?fS}1ZX$qutO~%M5VexHo0FesE82W$!nZlkL
z`mMnlF^NQ0ybwQ{2ygQdR}PyEgRT4j5RG6}RazgUu9gR32gQhL8Dgs>)<f!`KuE-q
zT3%gXky(vUSD8zf#)4u3$eF=&!(N@mwG0Xu6*XhL5>lmXDJKHcfNjFWXk<XsQohwK
zHh<7|LYuOffdMbk`ddX)=_20hdPOp=-p|ui(c`HzSlR+EO){ZPF4_g|ilS0e{fkPj
zv=1{$UOae915sRhOY2ZvdZlV8F1b`N6qi~l9A=W-0p4|HnzklbnyWf<aefSz?;2s1
zg#QXLNlu!vI~B{|4Pu$(AetuCUfbQ%35cG}pwF%*1>AM$O^I}90la$LbaTGGjAMe5
zj_71W@RO=YU@XO5Zp2?wsdA30`vkpm$}B<GDawsi<KWq9i%8}*U+KyVwOrB6z;tBN
z+5M^tR2udZ$e{2)kUtPNn_%=+Px3{FPO=lpj*dB-GXv&YlNiKMT_wOc*c~ZWr!x^?
zCaLe+&2T7@NM=l|gW?+K(JWvRM`>t01ZGLtU9Y=fif>^~zKVRBzO2HOP<RvkGj$v+
zpiZ9-q|Gd<5Nw+cembZkaUpn?%6>Xvbo8grRs9UW-C%19|LK79v7b73^|S|gZNp$X
z@O;do&Rv7;0bbj1n11xeoZzkH#sj>yVKE(aIssGXss{Ift+qR+1C8fi>fH3?9)Pv2
zlIcM6ft5OUb-4uZNkJ7HujNb}9&kyZ!>4Hu^v8Y|jUo&&FS3Va52i!AqPC4W4CU><
z8Ex}u#B)_1O%xH}A5M-%6SY<uRl7Ci)hUoo1V{G4v}RyW1DDV)l$IWpv(A*|7fVB}
z`zdToM*jFQ;}VXev8;^d_Spu*jrV2Nc_um?58m0qxoCoGDmoSqg`@lq)v(E1KhiWU
zhL;*3zb}j;f#WZ7BbRGKo>VlALW5xUX-tL8FWS!uM30EHq$#iz7p%cd2hF8svzneV
zu}a-!11kfCxEvlE7dZfngW6n>#ZTmKR%fgk8bZ5-<ZaPJt%GwDP1OP+*($B|B&yPz
zae^62WP#KS4@T4B5L)xa>dad$bSH>fD|oU1NW4lBS}~HM(X2CT>S|a#)}g7{1tJ@n
zzNfP(x}-vkB&VW*R2oZwZJ%5upki=+a}ayCX2*^iDU9mkD3vlC<JUe-63+4B4uUOz
z_KJgVX|q*z`CGcw)(9(VLVVFZ!ZL6On4sGn+>B~Noc5|~xx0fHZr0;eREVQMaZ1K4
z!ALJfHC}+D>erZ#N-@Sr+|*SVkX{fhiRHJ-57Ebh^IW{xti6?aSLoaPUKhzkpJQ@7
zs)wNf8fs_W>>7sY>$D*Pu6R|zqGC7Qx^yWT^bmCT)cOu%sWKQ}v!kg-r%Qyo+N{t}
z++%JGA`%oiK~53j|Fm$sXav5BpzYl&5Rxj6^=o$N8{we>^#P*goHW;JWLS6@OorKH
zI2=vadf4&U^i(37;5ckiqp`e@(4IU24|C%N5f0cNW3`lw#`i4ga++QUO<3GeT?;}4
z3=Zy};x1V}PRT@&K_+s=Dw%c-T2t={YBBb70t1z)wANL2B$PmH)xK(tY132&v44V&
zT=@m!&z-ZXPK(aBRfMdyhoWY6#M%ya{IsFe`oE6qv&toh9FOMw$cccLk8~M%ov_-a
z5AuM*pxQF6&C688D5dbVqryXUD&0C1&FqdsovI^bO779i1$wxSYVYhxk1n2YC0aUi
zd(z1eGBzQaXfVy~jdvNAJX~~k>8W&<s>95?=}h!cp*hbzG@2~3gVs)ubEnurRE3%L
z&#7Y&|8Ff=lnocGe7W=V%xIaMi&E@>TZnlGlQ_0Z6Us<21j}`^rvea{{4;OdJA)1I
zL`I1wnqcH<hwRZzX)4@giPV<dq|3JUHO*BJwTrZb3474!zy@ZO8hcj}E5D~(PP3bt
zH4xSSbz*;4?*FsjzT@xKzXiud?r&3**wdhyAIQ&DR;<kc!SZM+@{ADd0eLC&j1fd^
zz8%8pV9Vk^esr<=?6fS`ok)<UD-*5vvYW5f=a@x1n=~^jJ7DY9=|ApneRWR+xG~-i
zi*T1i1hj_IJYbUwg#wyMg&3}B1r4pHUMkjnyUCbqC`uSrHR8-*jage$AFl~k3F*MT
zO7cH@EBG=bUZq2$Np(4w)*N@OJW3f?d7L09Mo(HJ&KOgbBQ-(EHgJXLZUhlhLy&8P
z>Qw8xykWxaHD^$}Syg2j8#uUziToyNM;T7Pg$Rrq*!#!RK<hNd>9-GHQS(OsV9j&k
z>>MgoanT^3_h%v7Zkd!Fi-hn1a!A&g;%E^vd0QXVFVf6wEJ+I(ZiVuVA(S8~plDff
zCuGpd@5o2iClu=}O$?4p9&tcAIjr&4Y(N!D=U06!72P#MSfO|-8j6gYX^{9dNNfT|
zXRGBobG|S^6${+cFo2BXk!R9$?hd7;-T-+Wtyal&IyMAUQ5dIj)i_KySaSk*1tx9c
ztO`o&APxbZ&J5$;gLPP*Ppjiz*jOel?t2lt@EYKdH~>f)>BF;|ot#kzbe=m4=VYRE
zT#<}if!s2UmMxLw<#_i<lul>E*pfeFbtz&&ESXJnBelGa<;jA9t)y!N95#bmUVL&E
z^HKL$C*@<}HQM|P>5*hA<B@~n)KJ6F*VNY_%}OK_QQMd}DT8X`a5hC%=U}O>3W+*M
zlR~->kY_3K5@h9yyNWR76w7bT**w9?rq^4d%~XfVZ;3VU>P#mP&A!CJy4a`1O*iM2
z?`8|ifbvqhA7)iZotMFqP84oB1RA#-0{J+l1DgugUSf~vb%h%RGr2Qy(rh+4Hb|d>
zm2(*LKrr=`-2WX;W)l%>cWjAn!sX-yDUjK}ykZEB-M+>-t6+)M=rC$>HT*!uU#*jV
zKpGvpf-ZVqYc^fA24{<<r*U7XZ>g`ZM%M6_`s%Q?P;(VV@%HV^2(Epw0*E!*IS~31
zT4!40=RI_%vb5{l69pS`#u{)2YAu+LW;Rd_#p1XhBy?-Uo#@D9o3{dlz#M()cc1|C
zX>u+)_hQ5;1%*@`ucKLT>C#pCrO0%8kd@Fuk2o<;B`2w(S#UGYklnFNc%)Vm8gb|e
z7hrMJo-QAy0w8nHTvAI~mnA`-WxSD<NyaI<&Rnc!oU|y>_N^3HKG5%gET@ajI#cbM
ziUL<@bLuL&mSX4YxwOe8P6g<ZmR+)1Xf4BJ&bGu+hkUDrUG#JlV~nkh0$<wgp%XS?
z6gfF!Oo8ahqhwU}%m#yytkxoN+zpE0@y2X0=oAU;^D~wJNU?~j5@Cokda+MJtI^X<
zBJVD-^#;^76;-@-3^h#51s2g8y<6G~6eGGQOUPld2tgZB7Wfm?^yC^lp)WzyD&6)~
zg3!|Is&~zHy?Cs1mhOsmS<BGK(w0x?`@2n4)RyZI@Pt!v;vPrYF*V9JnuUh&=B7N>
zk*-kG4yMpi6OEn36BNLIrszJlTA3~u9}kK$oX1x@*gCs00DviQaAX<=Pv_zs>_kC6
zC7+RdH95CeiLFwziLZOI-Z5eP;;hu!Y;xh`xj2eX0$B<OZVQkSo^f)fxGYt!=qJ`w
ztyq(v-*YjKlElTTh-X@k6*&URXrtNAcFB`SW-Xlw(pL^mq!2Pi5Ra}<IywMER{ko0
ztow22M;jI$*|hf6IoH}cyTohba}<uqO(pv0#;fJAG{K;Q5NUM?5}mPySQ~DI=(ZRx
zJ4ozn_x;JDhE)TyDlcy_qwETxDEpXV=pj*LQfP$IX?b^At#zlS5?BSXmeVSzlQTjg
zI%0y^-3b`cFCjQvr^~GKjMl+8kE3y!(J^kDz;?<IaE=P3f)14j-K??H4z$67i1E&Y
zZu!fXYbrZfPQs_w+_P)Px9}%U-f<jL8+$HjfNg=B$R9JM{L5Rj@Oox6+67q2QRlGc
zjhM>{<f>sPTE*b4VbH0C+gZ&sy~uc?pyPauH>TwYDH3lalG=F{#ihfn6V{D-x7mfI
zrCNW0L}ALKoWedbD2xzY`9C*)*i=%9CYA^>#j1`YNs)CzV;*4MG}_`S#ZVO_zG#(@
z-J(~+k!+@&qu0g8^V%t(5>>S$Q2KDzqYZ-@GuP6|sQP-BbE9s=M16RSj@Jm`FK-Nj
zH=!x2)_OE2g;JU7$kWz(vkDA=EWy#eh0D;6W;=^<^sS?RsgLtI#m4fRZY5h(@pOoy
z5JiaVi)LKZ=we03DQwrclVU<wAXsv@*MPbYCtIK9ji`-7@Sc{=Q`|hw3gZghzG!O=
z-q;{;B#$+4jFu<TsCGE<#@Uhrv)kDki;zu&nvM%4S<b6<w9$3-^=PI<T6?HWOl`H{
zsJHBEQS|9sX9LNNYO<HDmhyskB8sKyj9N}HZlC_Oh-%>suPTFSw*gXx!WrI=V@@NL
zR+a_)3V_!F%P)QoifJ$JG0U@VD9~mYk5Jt53rhiA|1TlhjfnD%n(U!Ur>%^ZY#XsS
z7UFVV_@|jNIWJdXsG2QP{tYXv%y$?wxnH$nCDpB&JT6^Ptcz6?uWYXO-sna#0gp6`
z9lgy-VL(FGbp<%=a$|vf&<|Q2b!#h;0QUmcm^Ihb6cAxG<8^=@-J>=x7^i8+Twsad
zfTi$=8W;5d0sr12&*7GVw24SpXV#3=m=kq+3dgZpD`}G>p*kt6LA%}wjZeUqHLDa^
zX9{tQRZBADOd^h=YDp6LwH|XMi8zz+nNTf7uQLT5?bH&CIunQknq@62$)3DY_>83n
zCGJQ9y+vn>sYfM=RAU-uX&5Iv?aJEh?W#tu&$bV>C)y+Jz3rpzd)nLM?NxQQA+IwR
z>-Z8}rOWeSROI0K6<!;Vigls|7zs<aC=9Fth{-WU!3foeHNZE&T@(SdvH~WxBS4qu
zEEtkSiwj)!Wm_;^jo4h~qLo9lXA|alRtLy>Ua!~F`GQ4JRn;$ld$ntu<=U#pFYFb}
z|HxBsnU;P_vl>#@nB;G}d0b#p=MusEqjS++T<d6bgPvHB%{nm$EQZVpCU0}wvea#I
zsN(P^58FeJIivDbXSM`8lW2m$iZB;3Cy(jpff?d~k?)C|p>eTpfSG1sw7XyG++vCy
z8e8gDZEWOS1<-3}WpENaIP!IsGc-8rcHij+N4sdJ&Ml_c!Lg;*2dApJO5KEhx?>|R
zdVo$lD`OK;V?&Gf85$dPd-8N+qg`rL=N41!*w|9r#-{1ajg7n|13K-jj7_f^8|0SG
z(9p=ssizwm>-s3?4NKO&AJdPEp7_-o7ID1@bk|uKl~FY+1)f1VLxUym1D|fJ)ZJZm
z`QgQml$Q3@8YsT31v>DojME+-C(mc2&d?B9H=#{4NZK7}b#5`m4wNmmZK%{W?`M)(
zkk{Zqf1Z_Li(5nwg*T5;vJFnY&B9_!qmwr(E=0;YG%^FOOI_dtReqIv#@;ne)G<Al
zbuhmRl}Os{fAdbI;>`x5AkEoy5xQ7<bA!0B$F6Oliwm0UdRGq_%yHwg<D}`#WibzF
z%!;0HN@mwE+H?!^DP8Sn<$?saXySa0veu_oMDcvKQ2C8U(|8-V6M%i-RD{&>k_eLB
z*)E3yud|Skqedf6VD1K*B5O8rTOzqT(Qa0Ur@8Pbd`hQ42up_$ZS>KapdAd>E=c1s
z3}R&jDj*=tZJ*Yi)vzvUBv4)#HF}=lqFe=LtD;xhEY*~`EWf+MHkYm`1R1-eK(xAj
zUq7v8olC_k$yvjlzclX*r*=+HJBueYx-JGLnjwYP?NIKDQLLb-h5cS7$3eq@?RpY{
z<hU=REGf`J&mybMro3Qt2KDhQed64E;xqou<o)lFQ!JzK``<0iZOfbU-~Vns_x|_U
z_|f%mbU~gPWi-o?b~#(c1>w<@b&DRK9ilhBP0S&e8=kt9+xCw5rrvGlCeOFTA@xWm
zGuGbNxO?~RhMkFQLo&6qF`XRF><*=(jcTW?fs$_AV)ky?YK|r&vEgw(Bqi=+jp4~T
zSntIXpU9Eob$=N?LT7e_vKioT9F>9E^(0~7Z%>-*qnS)B5u*i4zP>G&%P#f!wiWU!
zKNU`<d{AG$Ig#sxFb4<2WASVn{{qoHxXEf(ZK#5kf<a+6Es1Mu?2EsC0U+&pinqII
zd$5+?4##>BpMhpryx|q3W;!~>4j5z!$8+6!dka;GUV?Gf3os5Cg^Xtf)&7J)up(;6
zFV4)on&QzJub$J)|2@hV0L-dI{n1@9Ox%voPdA&3tg<!WH?U2%`?O2LdLLyw;fVb?
z0p|{L-rZ~J!78+-T(Vjg=d$0!KJA$o$%opFs+|vEdBH8>g>$v}HKQu%PiVOn?%W*m
z21?*exv7F1YZk1$ts6ZxnE|;ccb<q~HZYX24XazT)U#gpHd%nDX@=T^uvg5v(_u@U
z-wv7ll$&9_E@p3s0_@cc96F_{w^vF%x31pru4%3bI$C6D?>#b~YKyGr&g?BxfD5%q
zDY)}0#m~E2Eo#bZk)`wY$b70Tvff|IYY_u3)FP=D&Z`r>CSRyMN>l8f^;BD<-h#_(
z41q1w7NIuHt52bK>kG9|>Q`q3@KoC^pRmhocA>(iAtFjiX>Wn2ZdB_HsZ`AC6ic63
zEYMT!5$jPtd!KlhRJc2&!Ivwx>q<v^a(5KRs2R1)W{9rtrO8Kfx{M}`7u&@~v!n8k
z33V%>8<@}@Z!n=fu3&64PCZ@d4#u-voz>A^;k9?z4dR9b0)!V_P$Ji!aeLC&9hgmP
zISo|%3&8I1B%@InlJ-=vI~Ytb4GjH3VRs;uVj3v=yTk5Kcs-l0zsIJ5!fQxjDB9IO
z?m&ok7lvWouHy~@>0n_9`h&>sFet@zZPE079*v<{;eZmDFzs)Op&V|h^VkLvgrO9@
z1%d}+*roduhqH;W^K~&-D_Ld!s!Tj>Q&zjqM45$(vs2@m)*A#TWzv{dL?4y@YAw|)
zM7>JTEK9XpVz~BroXyQ*R+ZJPmE|xq^(v1KZD&S+z@h|_cZ=9=6uCg6+XbzT6e`Iw
zQY~B*%BV_WlNbER+jMXVo_VwNF1A^ty4FOuK}?S%vvKiAYTkwgRW$@;L-vNV`A*~{
z;g?@Fm1pXbc99q|31x-8Rup(4b(Ir7J#2c~&<TnSHV!_&e3!rrQbV^Pe0~-W7J_6p
zGnUQFpa6QD^XB48kZX>uDD9l0T@oglRS3$#8Pv%BZs^U{wOd=A`UFdVXI;M(cHC0s
zrhu^L*yX9Hu%{EqjEOCD=hNrvw7PYoeRm9caY*Vp2$I}rz|Dy$(5Lat2=>};x)%$I
ztuuSK^m4Sq@Jup!n@Nvc(8)oX#YJ5{8z^YHzr?m(Qn-K3TE!dS&Z|37ug8TFI4o45
z2LEJ6#Bqt%sjMFw9BoCI!8$QIt|`2n*Mli4f2s??nL#|W!x~-E9_L>-TpNUzrZdn*
z9*F9BQ*N=CTIX2%i@GiMj9~Yl8bXUBMeZ|IZir~a^Hm{Ch3=r9$&eRy>9jS>1EF0~
z^PI@g)e~avwM{%kl_WMs&T4vgiMB5Iow^2{K8VMj#IQiDb?J!y%AMy}+KU#sUd5hw
zdH(MdmV+7fB;LNVCljQ-tp=WMVy8>)Z(%><_LYD*OxIY(t!Wq=wr|3zdg`-JzH=q6
z(2Pf|k-&GqiXQ=$`qdeMX<i~bh0);c0LkZ3xnzCfd60!)<6f1Y-_FGq$G%Ndo{eSm
zELafQX<&&|5{d-Vt>}DOF{pIZ6R%)!Ck_?~MF4z%X)I+wqnyqsn|A7JTfx1nKGSF7
zt%j|`bFFlhL}VP0BNm3%jTg}-93HD?)DG;I7~M`v{ZgpK@);hSjP!VW!~xKwiB<&y
z(+TV5lIb6dcOPHv&J4Htp{h_Ln~DHxL-BNy4x-zeYm;M%YZ^hq7Lv(=MJ&O^ApsxM
zEOugxc5mc683+MHp>%f$522vA1$%)SKLnCkBpKz$q#_EYqaA=8(fDwYEw-R|ws<B_
z{8>Z2fo~C494waX&c!NqLvb}G-p_WEvA8mt88+I`v<u=?2gyg#&OB<RkPeXzhIR7}
z>nXIU<3;&rdcnU#8*N?j5B=q4LNVcOH(^C6qYn)JLmTXz;a@TKUm#PZ$3YkwZ3v%I
z8HMe?Ahc;Y+J7}KTh`LDq6zK4TF?yaoc-5X_~lz3*^Q9^Kk~w0x;ete(~Y}Bu?&XN
zELIp}XV^wFb!J0D1Kzt(wkyQ;prNr<fijB6sh_xM{kCfdy5D#+1k!^XVVJm>mac)S
z4UN^A00m}qSqjfp?@A}c1(=k%PTZ`+z96NogAy=3z<>!|OCErYuWp3-eU37QB`Zu>
zF^zmGBW$i-11#$`_|<OSfJUbSTi0#fHUJRB>+gT)Q9SiuG4r3lqxG=5)Z-e(k6aW_
z45pd?tt*<Eg#Bk*YikR{x3n!=w*1`uKO4VB;vxEZgo%G3E<|ieGfT@eBcL0!4Q}e)
zHaM_lTmObmJg}gaY2*%JeNM+BYzHY#9tp_?5y{Bx(Gfm6o_=#2BcxKh2pY0VP?}II
z;jR`TDzo{<fj9J+OGbbj$A_oO1Q$Wg18xZ5p<z0C6~*CKc`1xP!^spy)7D8Uy(=*o
zi%22@jg4kwk=h{A5nyZf)HdTODiqfxR}>D3=MGS8&n8eBPbHI?271}LjH`z4Go?b~
zP*frj4QB?^ks&-&K}E<Ee65VF90V;6h+sBkw+1$@HzT1;2zoZkC8#FILO5V_ESuq`
zbF3&wmTKi%%|uca%Bs5h<ZwO5$3cG2j?s~pXatKa6EwN1S6Av`aYQ*dTf12uqcbV0
zAgU~(!XUknZB-}$I2I@aJZ)|;-<*x6K%m<NrG=1{q;bN?#z2`hnBoRXD*y*-kg-jQ
z#CVXUObxXOSrQ;lC9@zka~3N%)j_tZI%~}lJZ)2zhcKcFYs_X?HL9xc*i5=g?^p&3
zoho6<%bB4DndBf#EqeKwnJ&hJao7V=vHegJPC-BeQndgI2zpH$$+G8K8iZsj<O4<B
z>sSL#Z;!A=f*FFDzdI34iCz^L5B<aaC&1T+F)^S%VI`hmZ)t67K+JD|f1w6kMyl$d
z!ev?~FR_MOo{ocmRzvxY71>p?WG4ppcMf#+U*Ab&2TBpB$13OQG)os$G@oCCK23!Z
z=~z_mkV?Qo93B-%<1`jcjfN8FBauN3(7@KtKC`*q6h%k$E;JWb$LS?SNgS~L7Wt}5
z$D`3Pyq+~Qh8O&p^{HLE%=&&4a@q2z!D?+VH=s^0HHsGP*|gj-Oh?m*x%F|j$nWdk
zh+`FsjK&hsr^(x5Q8{E`HCkZ?B%54k)<X|6iLPx5s-6z#but;Q_isLpP7Y7ozp8t8
zB&dU1+D%bx^wsU*wgy=9_=r&~BRUQHL1H*XS!g2qW_Ku+*i&0|!@B-nyl)Pu8t$Sa
z0hZ^LoOQO>*wNZnM5mt4bpz9*lj27057V9giKk-tKc|8GOvS)y_`l_?O)b{>pA{{@
z|FuHcIsWf#{FwjSiW6M&E6BsqT7hIo8}{_8!U3vdM>FOApit7M;fxV_sG88&=+0C<
za7E#0yhb$$Un3%OL+iF+T}#Bt2Qf8WuU6I2&?v(6S)jwNS{0DWTG1}D7Gpix64G69
z5L=d#uFBRB3?K&88w#730n0{uLnO&UxaAlk-3%<892@6HJMgAS0<GfI!}Rb`iwS|U
z*HtMQn4&Glau^uLcx{z@KtgK*DGJS+>U2#NAGIYAbPKJy3g4fn|E!|;sx&ML4}X`5
z+T0e!ib8yyd=Ur_mXD-rDX3;m4M8O$ja0f1JCgS%lF$je5g`FYBU{Lhg<>h>{P2gV
z`Gv6dHMVN=UF?Fr0Un$f@hZIF0AJS<w!WwlHK{~040OVPuq5!Dv4lA~G>)3(TCrP=
zYazb0kWIug$j*s+42KxTXy~?R<0!B-ajA!94eYpF&$U9R!LSIG$%jC+QE?87)dhS}
zyBHPlaQ9)?E?sK!V<8km1=X+RoBz%A>jt3%h-(}M<QfWvZxcqx^xhZqkumM%IOZg&
zYFF`10JTsDaa)8OVMd0$l6trFcCIq@;MEwsdEItf=+dRsQd=4H`b;7;%JTwK-K6EX
zImkK6#6<@pvtH6wvaOs_bfY*HZ!`U-<f0$>4T7?=3F}R7tWm;hHViPNYn2VB$gy!*
zG{nZ@;{->|SzA<a8t{C_i2^V3G8PVw4#`QDjdKr)%N%u}ZwctlRalOgBU=fdoFjn$
z=yd@k52RXSR)flwPsC0qXcth=<CmN0oj^PBP()pqIvp)^q}HYJI-w{O++9<HIS@+V
zm7@H_s>a~i_ZMt;F)k*uhDgJVu#&U1^X&@7(m>Cxb)(MQKDgyNEpIAilh0Aa!hWZ1
z70RYBtS}lHEUOr(j_qC%8dj^<c$pyqK9mBvJ}(0k>IUlyg;in_D;OG$QGCCoGRk5f
z)wCqNPUscWuaJ=x#$L0$1yIY!wbAAJ4X)B@zC0tPPXZ>-=YgC7+_+sHr3C8a8wsK&
z-{jDs&XS2#>LixzF>#K`mPs54A{iaqS!Y^pE+9sozN|W}7^O+*I1Nr&W)09?M?HN5
zW(HO+p#MqS>x+#>t%p@<LmXKs(k>Jh!3qjsE&oGA+|!Y8PhW|^qz3oV1%Hk#Pb3-C
z2%=sDL}9xbq>a2eC1KTt*K(J+(zU$0y&3}WpYYzxAH&a0{!fl2<H?=lgLsKBEa5=m
zfrO7Yo^lx|<gZw^%ya*@N$vlvXj`^?Io|)VY(=wSE<1zm^!)t~m;Yxl{+nS@K%2W6
zmobh0r=_*IjrRZBnwBqvgsA^%X>K{E|2Z4Kg|G~dji+KeM>2R9xY^Rwbaj18Q**Pq
z0fs%5GzWIYZofSWACVZWC-E?#hB*+%#SUM0PMWnF1~v=?0}IWbSU8$Mo1bhV5@n<4
zO?^GPS{fh@FWMLgY+cvCsdMXq*qZ^I%BK0vqv)OtKD;0|qmrpeEI}q*Y{NJNOh;hL
z`qvNODh!!ntxk*Vb`)($LRo$NTQ>Et+dL51utB5`hXb(4(>Jgi^}B`x16`dxJ>YE|
ziX|Fh0*nMO<GQU~1A*?|4I8`rA+DjJ5&j81RiLMPy&jCy24+m4t8?AP&i;Wl=Azo&
zv3MjLN=4w4VFVkOG>ilT$WTFUOu|5dGIv7=i@mmI-6p6kIRL-DTje+FZ@>-N`Wx6z
zc47TkG!jbSTUR|B0TiW<NB4|{G6<)CTMzVcLtodH-W!1#1_ml2Ot=C7r5%bF{T~#;
zWett?aR>w`qTO>m0<qyl^v!1NqT2LG6l%76Bo-bq_hf)a2osBDwybT8M0Yjf8}z|C
zU=%t~DX_77Gb&G7%+^(AB)J3Ghww<!tU|3L@_jXnR@ac;aM7Z-K-vjFwNXGNR5=1W
zwQ2`TEqMQ6ecD`I+t9dZH6%r-dx1wy0WXvY?qF6OW3f?GX@MT;$WX%=i1e5QurfhO
zqkBLErE1hixh63F1Nc0!R*{XsgiyBO7&?(;B8o0$gF2vG*tu~P-y~As9N-N)tX@D~
zAY)z|OQq3-kgl%5IiX%gNoC_86s|_=w6*OfyA9!*Ky|xW-4GTIj2a?-5L|&r(5Z`R
zH*CNV#u@l}6XF)31mFx*ts@Yy(zn}jQ{v!~hA_eq2ux5nue+`j0BnHZNGzol84Lt6
zp`B?Pq$N~2PiSGxfHhl7)gg3j2Ygsm+rK#onUm&{w*u>oJAr1X0hMT!0V04Ffs=~f
zs#05+3qh%Hnt%lY5s*t6E@S&SPVf|dGg$w<sKT_{LDSm*G%thoziq|x=H)BeTAE?~
zU(wWb&i?PL{2H}W1vm@obyXC)6A}KF7Suo$qoME!2a4m$3O4|TJQOxe1TQ4s)I>Eh
zrldv2qU#NOphZ7^6-!G?mP{y?U>jvo%=&c$-96^^%{>jG96H7e)Tjy8l)YwaU?#Rk
zR1<x-Nu6TBWsvQO-FOSJePh#6sz`H5zCDtrB;A$2)uUyFPu#A_2Txs1H)>eQQCl@*
zXcdykj+!>Z7Ipaq^H~m~7fXrB1}b~^%G-TwL%VKjz8PftmZ~7WDdz}ntqR(WoKUIB
zr~Kq#Cx^|NmKxk|t7#=&r`SOlcaFc<fJ*F=b~?Z>Xk9I@a1)vvN@ufvqVk13z1aV=
zIuxy+G;5?lStdrCPA+2#U$PKPRKUV2g2(+vapQ9<HAO>RYd38P^_5BQw71l-zP53R
zxqa)h(Z=na%SVMJU7F8KB$Co@i%j$dvTwI~i8q8YvHG=p@Lm?aFqQ+XG;7SQTl%*R
z47O}U4Xw4Opx32U)Wtn#tCsUEwgJ|54%8Tn?E^$=p%SKjAHvl&I12S^hxwLV(u}Sm
zg2Dfzg%#R=(R+woBW+WFRun*Y)axZ;kroWA73mQgD)url(qY@?0_V%1;<L<l6Ah$V
zCAX~@PACNg3^#>(*9~spYFB$hk=0GhvSw{{Itb&FHtnqskHj)jJ{nP-u9Avj_&I7Q
zymS{fZCydmo2=4rruxhM%^QFl<--(2RG9+SZ0oD3BR#7w35|xxevn><Omc3-Rf#pa
z)n9{cFOkb;l`S=WU~pR>H62N9ERl_>PP!_V9wgoWpc;?bM3{QLKwl9Z=Pd(142wI5
zgy%(d6K`4KU<ve~a$AH6Tq3MCEd)8TbuffEs<sjGN5mZwvYJc_+EKNs4)u)Er8sgZ
z%be8%!1<NTNYo_6v-Qs^&6clvc@~Q@t6L-Tk8S&0%D}=i;f6f`B-}7g|KHB@inrt#
zG}*^NCkuLYCXIn+!2j8W#Tgg@O~?PWE^Bu1f6E~B9RGJVevRVToLt@H;M;gJv{=q2
z5_DZ>!tCz7zOxsWjvM8^$)5=SHFHjB8kWt2CHwywV<X9$vU=RA8@Cfd-Lwq|&<$Jq
zuLC+K4024fs+%uk#`~JBLe$M0N+<weEgO+%Y~8pcvBOd5hB)4BRUJWs8V(J`0R_q6
z8aWx^uM><-BAVHqOp!Sg$O{`_fgc>`TsN?FT|aKo4Mal&Fwtp)Zc`L5#H+G8sD5oK
zxmy@)1LG@zI|6_wNXIx#x7@~^%XqY2MvH@T+>SEEjTJ5<A09Rqx{Fw9XuW{mK<kKB
z8}Bk)Kth}Z$G~-XP!-L_Tr+itT-9`P7QRKk>U1?{rPWK6%5bbUT88f`85UCK=v|j&
z`7lWG)Qr|<K48Rb%2pAgx0~3aJ@P-jImB&qMaQkr+wn7KXR-_9ovqZeDin~#6a!or
zw>D%`G}ZEJgO#ymX_+=lF%6(J&C@o{v*Od&BCp~uZAF@JUGJ6~dd*s%jKP`o|JmY=
z{wMrq!vA^iyA<XH3-f=?t;?1>`M(uy%a)zv|IWsb`M-4;eDaFUj|nX=FV6KsffKf|
zgY>vCE9JTs3JBGl;u1o%Xsk!%GFuyv;*iEKjW-2Gc@K}sCMp`~$^c|WFodrP5WiG3
z9)o3myusWY8XqFX<8YRw@C_-Dh*3@-#VrI?J-lQmv@_jsiae>fQvq4GokW-h1(#u(
z^`^!qGu4tDjMX=`E+9rJOxGf%lgw<zXvZrXOOF60Z3N25MPs&WySH!dY^SHV#0|va
zLWg1EjX3310nLWph5yX2c&#g!N(^xp=zD9+AjV}v@j<a*A`QjcLQ)xI9j{uWPGs3q
z0pke^-G;7p0|VUygMFR-gB#YZ@9yo~iq~hT^|&6F-lh6-jL_xN(;5>;OC`O+1z>uh
z_Xaog0O+Q6hXSmbtd_TL-;N6ruP|(z454C7NYPe{I=7&eXjdo|3nemXQjM_sWNE$n
zEp_>_f;jnJbA#ZT@KbccUIBii+ORbDo+5PdYgV89)eOt3*@pkE#K66t$y==CD~m|p
zs**RicqVUApKW&N&C3fXUv7u)p1f6kuC$UbFCzKXcIYi_Zpqu^XLE}kx_k0vD)};%
zyrs!A`Er%qs(VYDGx^MrKx*vlq5(bvCitRpF$wGwaf#WsN^(xOixevz2Ie}*7GYM-
z$3JXSdX3q}mhyUO1WQU8`8k&7=V)$GStu?~<`gHITk55hBc?73Ps09ENj*YEXQw~G
zyf!y4FVN`LLXBQ2BJGVJaBf9dW#ogp+7(nwKB$&99n@P2_EYO=_tS)T&$JclndX*a
zyJjZsX?r!>v#elymbta3rRntAv+UJu&+>xpaYbEG_fu}q@-x|<w*(55<N;qSw4wlA
zKS7Y?fuDSSp}E~Hw8b^FwcSk}X0MoJ{=z~HW!F4mh2*oH_I)!xU}R$wrBo$OQb;is
z6^0Xv?{pKcfQi}|O(A2;_V`YgliBf@lMx3IyHl{Bg84D?RelAvaaZxpAbbf<G>vz_
z#pt#`y2UI~M<)~UF8wT;&Z9&gY|yW<J57x)%Jayjp6bRpa<?l7Q6szud12f=ts@WW
z!IZ}&U1eAsUDHO27cZ{Gp+$;&uoibM#VM{891`4}Qi{8@#obffokGzNT!VW^zC7>u
z=bW9{d*;lX?6tdBW@pC6!O&GStYzx0Z-*h*`%7Z$&9WcABMqOw&exOd>nM)#gfeL6
zxo!S<SX!O=N^c)FGE;Q&(wT&kvsXSqgtzj!E&uczZkNl+QX4b!u2er|NJUsLq4=>2
zJBC2MjWy;_&1@>GO=UWl4ZA}i^v+OpU#gg_lNo44ieIIq@QT%NwYR>>yK*kBiDV{4
zHFu?$T*}c)zj)JG&NUpJdeq)gIdigOv-%AQK6|{F`>8=0?y&cvvqocT6BQ_7+Qpm5
z{cv@4ZN;rwiX!(nh8^vDHj?G=^55n&-UUWqyR#zYU)g3w`6gsltIIDCm^@hVQV#XS
zrpr`a-~-A@9IU^otjkocIez>}EXc}*Y7Kom$*f~1U!I_G1=C2r@6!<`8t}02DQ@eY
z>nsEjbW@IdXcHyOX^J<hekZ-XF#X<uYwu$~13V^5U`;JA)@^4rX6>sd7g1Q1Yr|O;
zrJcQa&}2V&w{IfA#LuXvTxYK{?Hx*|DHfdu&{8Mq+N;*}ekm~f{ZmY7z|@?t`uOy4
zejH2ab(4)!KQP|4x?<tE2<B{Js83ARJnhP4&$S*-u4sU~u;RIEX*@J8F`!hq6atuB
zD^L<Nf7<wYQCquEb|i2T|2|t7Qa9ohu|%WHnqBV1rrOU}HNVXHm%8`sv$8ao4?F$i
zE?99wLXPj$fkLLSPVX)+Y*t8%CaRWA6dMaC&0=n^uQx%wY?EZ+bU!z}dOtF#Ii+#^
zaHo%rvVtzzHfO2)oJ+vCIJm)&Q(a%1&4Tb&-LK6y*B4qZ#qJn=acQ|(^ngg}0IbRs
zx13@<+?>b=MJ7t{<W2WVbSV_rUOYT+HyntU7u)=o&BGd8XXom!NAtm4@K<2-p>;Q9
zx^_)0Z@z7(%S!U%^Hhwt^A*Fo`Ab~ZZ$4{&N~DTY`P?ZyXd%FpTIB=XyC&q8!u&CG
zh%BT)Bto9wfGYdMg*wq3X&>{Oo9%cLpYoe`%P974K$C;oEy&2iCO(lxZ*8L?+hXxU
zR+j_wVg}}vx8r$?8$oBZ#LGR_Ow*!Yz9pu<R0M=)Y;Bp#r(mCRiOQ5!3Sv`cv4mhh
z|J&jFbQ=4;Y+>hBMpKnZovYnkY#!r1Up=OpkB97wif3@=Bi@2*d!bQ#0?ib^><<jh
z2k8|}((2=s!i{FJq7?OHepB8K<aJ*;QhoH@KT#bPwSFXyE7S8sj|nspaP+fQ9Ql|7
zgYKj-SX~BD3$FCN{u-hpZLu;v9c&zh#ww}CIX3%%APdE?5Y04Sx9{kTj%U^Fv{=W!
z5Lp-P3zdAUuozzl<^7vk;`qWzP6w*s6849eeNe|&p<VM9?Y(hUzYE@j4K{1Fi2PuK
zQSb*_@0*&VwfPu(M)m%;5)0E<6I}sI7^)>hbP`u3nmzSh0d<0^o2~aB85S@vJqv$4
z=h5XL<2c*j(rO$^A5o$s6<x0--(Fb5Kf?TsAQ>S@Wq#}{6z_H@GYWZOx|%wQgH`Vs
zL=a6VrN~X4u4}eYo9A+lP3}Tp!oezQ@Y~<qyFFfA)eB2NoVj<JLcwJbU$J0*w$unq
zul{S{a!OK|smtqBUm?<jcXw7G9v&vAL%E9k>)uxkZ?u6YN+~*@o?B<e6If>8$~UAf
z2}hU1NXWMox$1ny-m?{$_rN2`k2q7UE)DIJeiTy`Ac_mG-2TCjshj=%=x)QhJ{ypJ
za}%g@2w<-c;rjbbaQ;Jfa&V=&%L2x<|L+a~DobX~8uZ~IKyTfQPN!T$CX?P%-y((6
zVj5q}P=l*ye1PKg0UlehmwlCgW@`0h4>B}o$s#OeKX1CRX*hoK70fHI>17j@^Ca2t
zag(4Y9{n6SYKAa@H!45SoT;B@#Z8pS^oi0PHlI{{BO54q`9t-)m0p0eIh(w_^$j*!
z6t!A{w&m>y2eTmjZi;)AU$pCT)lJ(A8$6drYVm6HW06G5x^X{Wu2ror*dOsQelVTu
z3i2}+*jZ{mbbEWP_8{t}Hoq9vYX+K~!i(+xcrZ({?yqFB|0`wdEo-qmU#Qu_2=Nmf
z?e~sdE|z>vwTC-~MnAzdJ~VHb6GxLsj%H#|WWlVVG*V9q-dJkY4!1cj{8g-4dIJ~@
zc8$D2-tYa2<Fd_q1T1HnexG?_a;th}nw@PLg0x&;{R~4)HC%k~HMAAYEear!lTE^*
z+<c46egCy?iJ=FJ0KDIo;s2#$$#ESQ<gx9>+1UFF(*5@n0ypIKVPa!8j7z^|%+=jd
z<otWgVA)Q>gr@x${eYF&oA19h-{9Nt<-NJ?;!36<9*Y$Ey-#=W!n4Wg#?1^u_<}VP
zYqpc?$(|V$$D+u329gvCE%|8O-8JBZEC^a@sqUjAragm&IZ7sQ=Iiu5I8&6z(!{?V
zJu7-xwzskyTc{k^d6m`cQ|&nx?r~WSok&w5{P;Y{ZcHiCJ|%=G`)%-$t7FKs17E={
zl@R&ieT7&GTnt|?W&GvGjTgM%LK_8dge%aXtvz)gv(~k~VnEI9Y1E5UU(1^^OyXGH
zysp?y8*%Fjl{OXCbdYv-skasJ5=q&HJriGgwnjm0*;tJ3E*WQ(Kyz0l^@@uDKPrIQ
z&@hBVIFIRT05I8*K$@EUFu3LYK(9$&>to(%fm^?PCwZerN%14If_0-=c;i<~hE}%R
zdB;CEpEPFqN<Tka`+921?&`9{xB8Qy)Y(|nn$2B+sH`ZZr_h%xaE1BR+Sr-DSm!Yz
z85?VuO?Jk)W<j?9GW@T?#rk!lBI{&sl;A?93cYU2W%2{fFzw%c+Ppa5%jbx8zce<6
z(z)-A+}NG+-fRmba-ry*eTRRGw=g~Ayw>C#wD%&b>>j@MF{m)a9I-hs%m`N$ALvDq
z6YAr?{*~6M&-sQ!$J-70eWFZ6IHaDgUGh<iVPBE%J-!2m-I2domOW#UE$x09njzgO
z4b8u4$XW{pKltG2n3x$WoK24AXfal3u*q=4rF@TKnZ4A4HTvJLl#h}P*wI2~Iv>gC
z7GyEX#!h+dL0sPxy;JX=U)C?>2&DXDnTZIkgFiGeizY_s=@yNW#C*DTIp3ske9x<P
z$ZVoK@r%$m&c6to%^^|HML<!rqYc+(m54=h8rT`bPEs*-c25Ls?ZoGqT}fD*>#(!s
zTz?a8sCYe*aCZA{l*MFY=F^uM6&}fWuVUxt%IJI6&vBF$oZ^{@eq_ow{POqfe{QUN
z_#1Uqwo9f~ByKb9h1Xdq2|)CUZPh1~aw&_q2>XBdi+_V@bPE0AROqa?qa9lo!Zn+l
zi{Hap;|oiUqo~5k1js04pr7fywQ-8!B)N;SSDSpAT%L+RF2cS@3K^qO`XMbpsDDv+
zak&%>L{^pVtUiGpcElRa0Zp&a%9oH%7Bh{#en`j##oi<+KOM3JMkbwXP#2yJi<}kG
z4q}Pc6hCE)l$<!@=ARHeX`L%{hi1TYN8N(pT_m@E$n72DIk+<|N26*E_cg0@Vm;C^
zXRz<yc3N{{x%dW}t#6l4?RZLOMvKZVj^9y&Ra|}lRca??qrNAU|AApq&Zx#T5RfGw
znwD{a)$_OK_$?{$`Jh7!cL^$l%UYudW}B)?T=0sJ?;O(#wBgzu`u&w*{QwfO*ZFvE
z*oMvT%Td#vP4mPr@QL7?uIH17o14q~g+mjhais|mhG=QH2>9SI)x<8Ff004$LU_dh
zt__1Tu)w=jUuj0w&2ohysptq2BsTJ^N{<92U14}Bn(d`5Yp?Gv`yiuaH#M&hYTGj6
zzRq4d!CXHOpa69~w4#dwJchEBN1sT6@Z2C#pB{Rm(`=V1W0niv)@((p)isANr{okJ
zQ!}#QyA_`07Q;sM_A(U*=Y#7&niPK4S#_Tfm6ks#HK#b4xIl=+BtmLP$`y8rk@wZE
zTTHH7;@vr5N!D!rdyB{*Yb(ab+xgS?ZJj{B4~mjY(+ah9x3%h`*Y3qv^-OQvn>}&r
zF>~*t&Sfow{cC9vM!ifI2scJA#|SXR=A+Hs2V9UklsJlr`{m~N=eyq_VEen@RM$T#
z{rJMbZ_kXeK=OpJw`VQS5T+X*Z{S^n8^CTdgtPhKo3Ub`Y!osuis`XXBhZq0l^`PS
zDK5y18$Kb_nm-CX#V19Y;#4m7Kn$~(9&e;40G5y4&k%<jH9c0HiVvpfpn+&FiF`w2
z_!HC2@CrSY^BHh$0~f5=YLdsu_gFZe0?hOL&2RvDIcmP8!apRskBh8Uf$Rr2sLX#E
zPeFwBx_u5urhEE`_aZG?^S_3JjlXrQk|Oj;yBBnsROf(4Do8_Ucgk$O1AXPOz!ij-
zNIV12Ck}H`rdQ@b1;D;az;Hku>kphJ?N(!cYGHr|^*(7U-w}sP*|#PI^rrxSQfJ7>
z-g;k4^qx||YH_;LGd5O{OCz(lS2Pc&Yl&CJF*oZcxKtk@z8$P^XWgk5{f=E4%e5JK
zgc>RGRbL+Bs5hvb)ok|sE#n=$CGm_O@0OED94EYEJq2L{^_J*uu%QQ>=)x3bec(AK
zGx?x{iX^jlRMA%qr%mOS&|v=C6u=&C#?_Hm;#ra^%ZJTGVq)<_A+A-<=Pna}LCB3@
z2;ZtY1N1ibGk6yrhsK%g!js!owAlkjLmzD~x%CyTERKk(jO*Kab3?b@Vp)-HW?-KR
zZcBL4&xAq^6?+U(>F<RvO1vEUWC_s#1s|Umxf^3Ff48UJhCI9elS7x9Qt|xXAL6a+
z;1@$lB6MLj5RrdOYj_yg!Aiwfj?2{at9bQ{?lL8UVPXIJ1w=jC>ZD$b#SgH&lT2et
z`KQ8Z66Ib}O!I;;s=jYfKz6{k;4{FzAlX@|-j;We>qLOYcBIa+m%0omKcViQ$^5^F
zJuX67Plx9Op3|Z{SdjNZk*qn~ne7RE3N(sY#I(gTx?C%0%sOt}LpCI*8bWsnmEa!}
z;V*gOvw6c5!wm5FfWAIF*fPUY0rF(^%=F)U>S)4zr#kB5;y*G|2FQQK`^8g07^AWJ
zCrDf+M(TC4J?`zE0NyipmO%~@3bE(S!i~RbzDL@{_p>)1W<-0?uz-V_No3Z`+irw&
z#>#i(6W=ED;)8l>_y0!J{kx|Iy~T^Nv!r{a=qtj~$C@S$O7ThQq(TtIowY^go=Ox&
zWH>@4c+K|m6J4M{UU)dA`_?1GF5(F8@{>~5n4o69SH0;B%hWfM(%l)_{?+_<#8IG8
z8l^>b1nQKRY^@?eetCqC6O_W#H=oktXN1RMua~4(93`-jQno0<vLqtO2}<Px4QM=5
zZ#<J~JX>^x`nW&|8qfaYG@S)BdFHlsX%m1o2taZKU^4=c4gpw!0Hj0!%B2MA5QsIM
zIiMglvLzAx^S#6_(747kR#V?5%DlN=l-i-DGur<dohDya>1iIx{kIq8PrB)y9&ca>
zrA19dwrMLL(>%&@V3{g`$3m~MGnCiVH;Quo*MCMoAhh!-yUn!0rzTGBXk4l=%X2Wl
zS(JP}_tVvg9bj4CDW_5v8wtS#r~Tb$>Upi4loM`weLIuHcC`QU0z~kn@+*;%4QV``
z#CEtJwvD8JND*e}N`h+_F{#eZ1PmjoA5va0bcMl@vA{~$%$o}&>qE*FeOCnhwwOtE
zaz^$7MDnEKoamj5oI)wxbtJ!sL;%@%l{GKGm`^HtiAJeN?<-br3Fl-0ivLnM2^@;}
zFk`8D!igk#*wcR;LB6}Plsn-B0Ur2EZqks~`{3whZ3JrifttLpEEQ1a2p%jIQ37OF
z`~^2@VEm^HJ(Qg375F;}LNE=YY08JAUs-CQka!O@C<wBpqX)!};sTVBN2pql%E+W?
zkN;$Xt}Nf86tztMZ!(?+C!;h>6@o}nSnVQk7K$30{@===w;)0k6B&&#bq3s@Duez*
z5*LB+P)txFqI4d30Va6z`ftK~x<*ZqL<BYpslV@0asv*$D9PcU0KoF#7uoF`U`W$x
z#8%LUj(iEn;O*J#aq{$S>B~QF8Q#IvlcA*nu%5QRA|j7LOKQbO)nS>bX;F^OYsE;I
z$>qc+xnFLms@uZ`-L1G6^si5{a)REx0A9RGZbLPdEaT~bJ*eG8lsh1WO0I)$=KA{j
zZqFbxEA2N`eh#$EsM6|;h1`U8=}Z-!9aH;x@GFPKTzJitLW+sp;!4sQ&Usq?GQr7F
zRv_-4Ax!>9Z(8n98Aswy_qMV??odbGL1)dCy{pz9CROd@#=x=cg{e2OQQe~}Z%0(t
zKbh4@7y}D(?P#g1JYy|@8wlzD#2PTOwE}%a(<Axi&4;}P+#tdoGsndJCxS(hd026h
z)hn6n`-ucYOT)`d*00vZ`AFZVc1p=|O8?eN(@y*ANkIEI1>;#{@=?*-wX}Tm+hMS6
zN*W*=ezHA>BTEP|Ti&xQ6V#HU(KJCbI1EpDvRa()KR0tuteA5E@IDa!r7=&-AMH|L
zh<aAwf-FzwlQzP8lSJOHjJ_%4W6kWJ-*&`)r{K@2w`&@5x9a5yhgUnmBwv$tnJkK2
zqes4@>9|QU^w!jU({=-~#yZ_(;l{a})=l?M4%Vg*?8J;a9g^=lvbrh4R5oDMT?Y6Q
zE(C%0<Q;dy#7nxxW^`7g8gK3P+i(geh`(yOb)Qm!EYCBvjo|k^Bm9f9Z|+?Ows6o*
z?}c*n*;w37bYKJMxnnx8?>aCnu3NNk9Rvz~<2pL9VBkMYFWG#wwFp$&de1%(zqq~Z
z#<rHOUobd2ZX&MbO>4(HfhQp!=(6Wa#Z{vG2I@`47gE<hKTz-AV91d&S$1y!aam`3
zB*}P=J9PSUQv25=Ae1*V&{0aWl$c~=<jF*1imj>C;WI$AaMQXnM|B4nob!Yro_-w^
zsc`!`xE))f2cea@zkT~EGywHCApPT1?8b1$tI!r{UD8#=d8`!#V)U$I-I(;)SBlw|
zcc+cOxWyrwGb4`(l(uuU46dqosjo5c157J>o%}vtM(U9J5GahUA+pmrnR@CC$u-#s
z^Chvl7C+r8N*L#W@C7y3B2+o>LTHXY(Z6D}7e=TbmUn0gAW|`964ExJbDq80ex7Oh
zPH0=Nf{bJQsUrMAl|6^^rG_5$U82dQHKcBIBqpPr!(un9O8S5-EG)Yt)dof?{0Z`=
zVG9;xn`E0o`erl&7{-npoBTdtsQ(RX!^OE81vqgtpKbHalJTo(6<$#nS&dNgwg7=1
zp<GVKA1{pz`*OlVVfw*ZInx9@M_J3>Hoq?aeT?BHe!d>AQkNXJ+`#O!UBYG&!1Kk}
z`jZJP6tb_`d|}R#`aL!<`D%!BHO>FUdGPpcva;-Zr7FYz4(ZftQdP~a=-JhKe?RN7
z2IgBvyiYsA>F%>H7k1bAE@|cr{7VfFYM$CXNt@O9Yae!MWG=8I4p_za+fFMglUCyp
z#KWs$jBR4ht?5oWc|X(JEWt%aHKvE%El0dHcTH&0LzuM$?*bo<-*N^`v;L%C;_FJ2
zRN{=u18&}q&V5_Ce}#Uo$w)$Vv?fw&t5Q`tt}x28_ZeqfW>q9p=2;qEYZWJzTvlI_
zhjD-BC41<nicM(Zi4Wtr8AqyH`9}}k5mZp1YCg59<m0K{2=IwOqxt^Kwv~+Zlf>B~
zNoK$)zj(mnJi{&7{n0qXnt?(;^P<S#reJfk!<n@(S9SMMIg8z*a+t3VQ<>EBO7+|A
z^Xd_ZG?0nUA#KVt#YfO_cwr{w#a>fyL+VeSfGiTr@2!82GXO0PHIfe=N+rxYuhTDZ
zeq3B8U+m}KD{Zs$T%Z~RY2aYeZ^e1YdAt4U11#OIcE{|+YaAHteM!rm>K3OmQDRE#
z*{YAY)g|C0(sl#he6rQK*#SJ6od=fohm)n<p`{~e4IlJW@?GlHhU|@3{gPb!26&*@
z8sf&<(Hh%c<Dw6BEN<E~k3WBFdk&1|ODa&TV7K0W<TfICK%Rdvz`_J9mgU$`19tcG
z0_s+85gO^QqC#aTi0R_B_D@_`zWVE=pZ%G7CTU*qCs)psfwGmapPfs>!C>{e4idK=
zXLvFCf%zVu@8sgE&;F)B8Hhi2gC8BVYRP?n0?VI?P1H@YjG|$Z&^k)le~9?QWVISH
z(K3wRjfxxhk<f<xp}Li(e49K>%ee2Wov(5&^y~}27wOl<?X)DTz_aP?x%q%crk-!p
z6GLC3l7xgjBVKd=R8x>RKUjh!*q)@YBG0FyJNi;3q=NFC>|MS8G7$*fY~?R!zL8(b
zJCMHlP!<|h9v{SMf6{(6^EB;{1m#=a0%2L=An<VNH(OQapvv!c0%1~bK9qr?)IcP2
zpwn`LCRX#u27YO^==qQacnxKd<K0DfOh&?a9OF;{=FmK@Io3Bj=go*AOL;Vpho6ZO
z#=B%X=q9DI@;%y20t$kQak6g;hJcm7hSN?rUd+k7#Tfy7f}g9U-^21v{3HPl9-Bf9
zuhwH8P4#p053()*c0^v(oJoJ;#StbES)?DBePC=ayst7e*b-eDTu4~KNhwiwC1Dn_
z?7#5l+rJ$?0XOOr5v=;c6|by3SciPR#ft2B%|e>M9#T!rrKV?%7&tQ;`+0_8nS=$*
zPu?nU8*ZRxZ-$1nVN9}rnZW(7ViM#xvpw2oobkUj8AL_w`a3Lzt}!U4$<eDX$a8Yq
ztT9lp;ujV6C-LYS{UuhFw9k-k1uiU$$@JGs76|5Kh7!q4Q@3fpBKhp)9NX1lZk;|{
z)V(D$FArq=gp-rUv~h4xqF{3)gKO@eWd4WQnzUmOX@K*ZV-@|+m7)Xu!BYhPjc|S<
z%@&n(=r64wreApcr7R=Y2TBEp6znI-sI6u(m!@`G`yCRUFcfc4Q6DQ@q}1!jphWWx
zaa#t^+X*JaA;8_{NLo=?Rz(;`gEB{p4Ctyj(yGoR>g@&97Y=iMP5s=dzM`xCl=tQh
zl1a=2K}SiR?>-*A7NtsKxXV49P%fdwH~d@~ak#wb*O{tcl*Ri<${4dK5iG{gD$Y_^
zV$svaTrBUJgZ1^h(okwp@zmpurnHBFhQ)7vfBj}WR38vLH1zk87@UdH@2dErSj=L7
zXK+MJ!VS~~a8^2QPJad-A4Z#`w!zPI`KMhj<&a^HZyLDG$zG>zi@r_$lAv67Bq-7v
z+rch5U7D4T=rRgS+ng-CGYEg7!Z}#wOwV$WEo5oEjyRWM=DGctIC!9`;+uD{Pp)un
z&7=SMGmhPatX#}1SYV8v?#YEdW9tXQ)8BGG6J-^Dtov=Bw!LcuZP7B+IbSh;H?0kv
zB^D8+I3^Ed6KYicTvIqJWE}nPDM%Kcb$mpQ-iiSzwd+#(xR#T4-A{U)Cw=t~{gD}X
z2mY!2ISGUnu-$_!V{Fq=@V`jp9}Qhqez@;ULClZEmaa|`%*@S$`ixVtvE>|+n0$4;
z<^agVG{D-7ODokIvC-o9jDKT`x{pAa83jZ9p%ts@fD*n<48DX>4F8JYV>~(z+s7|e
zfcZOVn=4LESft*m+{E5kg|;wbk9pAuXFCV+Q_)erj@VLfdYiGqVYjz8)4zULn2gy-
zK!K@`wCIZSM!yV+zW`Ocf>*B$&VwNX(;e^Ma|=Cj7)1Yp`d9d8&^acveQp#UFXbll
z*=wtBt^9j?H<M96f^dS;1gy@`<5w2*4gxK9{qin<uqA3pCre#nX72foUP=Q!^H>F^
zNGIod{20fBRCXj3{1P{YV%j>o-|eJ-<maSjUy9>e&EN&?@~vVqd!i!--x*26L%K99
zVci`buOzj7T>I|5TY6s@DI2WbQw_5f{&aGWGx$+>EF7_HVqh(^BGT6Vtz={5Iz1ec
zcBFeDgHb(#>fgmXGdpsv>BmkI#I8C*YQcTwp7>gUSeachx5FSeMmZZN=&RTf7AfZ?
z-mFkAKTRlpNl~NXr{Uq(#!A7v<7`Ju$?8DvmR-RJR%N2qYR~OS^4Ok++l8QiADa5b
z3929ld+L=+ogISwI5m73vhCz(fXIi(AtYHhk#w+7#ST^m3v-wbdDo<fmbZ3(CKIp!
zH|(vmIR+O(??pVH;=krCay~L>YhFzC<wankF(A3SRx5FtZ@ml)2c+lpyrgd}@=;|;
z?m=3~0$1jnl=siWX&hpDLx$xCmn;kR#Df7xqzVif2(k5-f32Y>*ZpLRSe2nX4#Tm~
ztp&>;mQUhbU>}Kzn{d=Eg6j)!UcRgC!)Qvvou9;;2mbLQ0f&T9*As8<-<ro7g>eZH
z{?s6dd^tCYkvUDkP&X~qU1MMs?DCc(w36S;yedh$rY1(1!5x_{%0Nr$IM-t;RuBGY
zhQ+e^C6_Xz;1Er0<a3B+tW(_yyB?jdcC473rF8S+dcHB-p-}aukmkQf^1t6KGFt(5
zgDt<}TdW2_nH*aN%Irju!*J2xzkWy<ay8JGUZ<Cf9%vC=mRz20tBQfjzsv?bu;=JR
zZA*TR=KMB2BJ*SAK}MpGXeL}ToQV@IT*#<Pewd~1*?K#kdkE(dVp&oqGV9h)d{|LY
z{<&-ERISGjc%8~5U2DdE`Z&-ycQyn7JURJd3~kn&I9~RhY6%f?&qbjQ&oMIlhpB!<
zrVnWLsbz2CEX*FuXPnDkNjFqR4Tv-Kx;r!<t&~*fh=-?%-mb^ZUrCo}PSvE@kaFAu
z_Z8_EbB)^jmT`mPDUzpRo#3wPDk2-g9Nvz7Q?9ID;mSOm?+)KDrW{0sG6syx3d%sB
zEHcWGf=?YmA1(Bm$&=#8?^InB!o+UZ_6Qta^qJ}lB+!~JeqVlPNw@xndOg6}_jKlN
zNx~a=Bil11%WQue#<VljO)O!Q_ZPsCz!a{s{;h1H1|0F_sUYVF$FI5}i|%eb#bc}u
zn7sQS54V!N6cU_r_-ZII9VME+C-iMk=&pyreXq;US2xl(<TKFyKwCRXxpR@iLpGH4
zo>yw!?8=RHr1Lqx2iIK7iSK2Xho3{%p$W4?V}SI;$2Rhue5HwftZE_Ci4El+HcWnc
z`6I&3m!Mzv7wT>0JWZK!b`ixf&}QhLSaoI8HHm!sw2qN26K~_a*zLeWW0@zRVe(Ze
zT=dia5OIel(XN<92Gh%o+5niXs(D=VkLP6^HDare+bye9QDg5~wRan`15TnB#QH_x
z7ekCE>-%Z&o1xO#`2MjN#kTy3V!qBF?X~_&-cW~^YjFX*>P0bqZ6_2wJ5bCcgHdqL
zw<i`m2~`B4(I_iUwJ~?Yk9wmLr8QT+mL>&4E0~;FX(#N511IKeceIKWSc&CiEt$8a
z-^HZAtXR%8<1PTUlag`~JcQP*$QBlIScm<K82gZvnoF2n@{WyoykV+7U_b7d5kdUM
zGxT;UC=Lh4oSHn8+>Q#X2QZD$wIa(V*fc(GDu3RfA2DbN#P}m?<aYX<01U<Wx4<j7
zWs`o6YY$&oEXB9kw1R2w{XUoW-I)DBUy8hBB~*qdUo4qrr(Y~#dhzUh0G2>ixS{47
zfrO{ky4;X66krtZH1V6u*cqL{Xcp5s(Y6pnx*CBO#&^c-QK$+m$Wq^wE8dQGK1S6d
zfEjJ%T0a_SlV#aI2mfNpyZR}VWZ{KIaWquz_9N|~YN)tGh?GXg$-2PbdqJK_Wt-Ln
z*1s)oXYvW=_)KhAMm;#Q162GG#B3P+M!h)BpL8OC7+&)2Pw{r70&OSnEo^iKN?!3k
zX|F|%t$f@&caXF!({m|r?s`7vk#TC3ahg1R#CWPOrjTrq4OV+{fgJ@d?d2ub9;ab#
z{s{D0<5Bkh0^3$ym1S1^HA()+p*2q7^v)oQx#XpxSHrKMD3IY^%(&qwWT*TuMg3Y;
z{SzGgW@YT%KrYi}^^~Es*`ZNWY?PFmv24ow0}A8wvxc0N3g0^tii=8~k4R8)DqW<<
z2lorB0aSn16`1r7r>kHCZ_Xnm5rcRk+7(>v6*c<9;f3n}`lTq%oXx<7DAu7`AM>|P
z>9+GA#Lw4MemSwLkR4mdnoPPIEO$!Uig!+$B_-q1huqYceqW~)co8MQ&54gzXh9`c
zFJ0=P+XjhktKt6EF)Bnhj9X34NgVU;@@itu{D37xJ8S~KS2a@Sg*S2FePdk!y4R5G
zUxpE;9BIERU__8~;>u_9TP?Rh9ILZ28W=dhFtRBbI`G`e?Y(G^pJX%oY-gFeQ{|&t
z`9k(^aJ87F8DFYv*Ia!gi7H<?TSmuj0}fx&pSR6g-|5uNKWnSHC!n60xGlTlE%ug^
z%GreT%=?{w$}!_7N79>pGI^_zK$=g~2dAXN(}w4F^2aSth}2kZYKnYulLSu30!a72
zfg66*OBh4tvHxyNS|iSaU$o?ootYvKQCB_U&;cZ@w1SK%>sYT)NPm1!!Z#RHcJ`8`
z#u%_<Sa*<&(F!8ASx8ycxBRwz6;j>8)UkM0Xn2{IR+Ezx$c2gYIYG`dY)E^@y(E;j
zydyJu;?q#zcm@=9Vy$U<4tS1v`!CH|B#5%J&F91Yv_DM_<~#acs-T$Vug{zV`-5XF
zXfndqpDbI?Z>Jfa(SEP<35vN^Pa2Stj2{BDVhbYssKocr`D-vOZ>hNZNGq4iYj2XE
zr0r&h1S*P2C)wiOf&o1SH~&J64Eu?}NvO6Ay5~S<y(uj>DUN!5zZRSBj=?`2r}}r`
zl(t}i8V_uhxGf1GQT2ML9TxO3x%4B>cm-699hBAW5qvu0@HwhBt70X6t;|F3(~ny5
z_M|W({lz|#6L8!wp|(%DPK(<+aqXPoRT)2cU3aUS(0-7moYab9GD(KKtG506i~AJN
z11U^Q&u5zml+y9aXG5ZKcIQRrqPM1uQXdBWL5R<<Zu#H1T9-$C<{rrBMT43S{!9|f
z_d_<84M+()OWP&%VlTdWYMk{_|DD1;lFVdziOip9AA8>}-(m@ta$$B;zU=g6#2&;u
z`plPo>$u!<sx-19I=iRV+`#%KU+pe#YK$yih9ZaUR?@JSz(9dLRo(1Tjk%v$IIt(T
zr!C;pVYnh*xRF>|*=jUIS6M*$!*Dog(-+p2-xn>j7q+JYW5{nP=NU3@c_mx!Y&QVU
znI<%e=^(OU;IJ4yQRv5@j@heiK(9Y#lkdj3eXQ`ae3Cvf{JynoL$%21x#32AxiUpc
zNsr!tyC?gIztcg+8(RTm3Jq-3#(LR?|6!1md5eKO|BWC4QF58f#4G<}Wm#ocX|?7k
zriS)5tx)EDk0<(tM_(9ps`ya>X1kwi7U)?2IaYK(KZ00zT&^^_G%h#iuHqk$fBU76
zPLK%>r>xq*Hb(wp`)exYXzbZBkC<dJz2T30>ik4nUO+v^4~d<G3%QD%FzLI6IPT>6
zD&=)=pS@F9Fo>V8SZ;Yt>OypX(D{N1=a)r-n6C7vq8IvFaG2ZDWv#4^$YDx8FcFso
zCXj9*y?QS-CTqm&(l!Mmav;;3d}6!WZlz(muVl8U2>&_$w6@pEVEEC@|C#Y!5T};?
z;b;99FUQ9caorDXOaqL(b_MsKR4b%tv;b{KY2#zRUcq$m<f!Si1f`jxuD+7~i-me~
z{J{veF`a9US8<`n3TO5B7>l}1I#JfSc(o@%t0^D3z46z$^#JsfB87tk_G~1NiAU>}
zyAOeGfcL=M>5@Lyw!a3Nbzv8X5;i%4=oy`QQ;01at>yiArWo*b8!Tybc<*vr>GoV4
z$XxY4US~5uFx%}p^OXq<0r{OL4cy3VJXWp5)A3xf`jeI2;me1&($6DZqvx{H?@$@V
zDg!c*p$}NLe^=UmyXPdjb7q$lDRBz_zA*9IVxhh5b@9Ao%|C~p7s)fW#sKcuvegC$
z2qWtIX+(cEu-qzFe7bB_qjNJyw<KF~mVE<7SIu@qDfktO^Y;qJLOh&LRFIQR4Bad%
z=M#{CC1XPSXfyhkJXgn;N_@iPIX52u26w}bkp~mi@q%b;)sB$~+i*4gfAtcv$T@{L
z_Ui<GxRD6I&jyj<3D1->l073UtyFfc?oWAE%81LmhuXeW6`d}Z3ycW7+)`<&yvJ*g
z#%|;X!?q5Ko5L(+rNuze;*`y5h@|Sd4;X44dERJEy}sRg*)QZ%5JSqHW0cw+4-W-F
z_+PkcYR}>~{ZnK1a=krMeMS?X)vQc&{78sUtvxliN)q^I2Fupow}RA!1%WXhffz@S
z>x!l_R)}QggcIjHVs|8Mxu>3>Da`IJCEM)uwBF#Oz*X83Rd{>=hIYwjOtfJx|8e&Z
zib&=na57I?vvN)8j=ncdCIBe!s?j^%Sks5*lltQ)J+Z6+AAjYcbp6q+GVv*T3hAkk
zFC6F0YeO8K(LuVbjlG?z)@-AY>3<mdo4AzpDOpqMv}9YepgNt5FQ313;AtN_UjRMV
zMrdN>x|Ad$UCMM!_ab;ChN@vz#>kqpqINN^R~8<-5MzXgbW#x*Eh;iFq0O6)3z$!i
z)iR=YG>OEqD^S{Bi1j&pV=PUNxo&PZzdcFi_~lsK>xuN(ALHI8-ZtsaUDv-~gS*4k
zephQzVg7-sIpjs&Lv7yVoS*rbr2S@%2;HT2UdW)iE&yVSz9z;d+G^TcVjz5Z9?6)5
z0|yd(aj(g8wMHx+_K!bNZ?4RfSmLg)R%05(v3|9$$XXu$`woj<BLox7$=E*TN5mzB
zles@m<@E3u^!-}`5ynf=fhsk6(RwPCw3uSC^kmuHT7Dm>{LeTEGZm^!iWDm^^W97;
zhc63#IAtS;dzEdVOg?LFj>^`{nm`k4GENIKHa%l$#Otf8?LyAPZ+ArZM6lb(o1H8<
z=M5g$SNNZAARkYTg!T$wO#5weIaT5g!fmos0y<mm15rKWm4HH8+ag=fjG;1b<tGu@
z$M<5I8SAo+%wKcL2l+e5{H;(gMCMea*nRl;(cVv|xHA@=Xbv(~B9T?dT(XS<O|V0g
z-;FfEZeZ_B68cJshQJT!qP&x2_GE1>Y5s**4D6Q23WRNqWPosv{og>)tfXkzhPCxT
z)+(}II2CKkl<)L?=qJ*jc2brWfA8VzIByRszJnIB2y1Dl^|U@7o4D_(2Ju!D+&>Py
zAD;4NEc?O{^;VJS!%;|%lEP_BhyH?w2nU!x(B@+4{j|O#S~B2$gQZae`5{KF<Jwt+
z0u7u$5Zbnvhskn#bc*S}ZC_VCISka!pCf3c<#hdvt9Ks&Zv)Gyvc7%t+^qVdq6J8?
zOj3N1O`aZ<i$a{Hea>+JS8bB6if09WK4{M1Z0QOnbIt<}q;eggX(Vj8s15jekP!T|
z{j85Y_S3R-7lW<cpRE71CNy3PtGv}JvH_#tn>9=sNW@P=g&G%osCX7a#;^ZGZ_wIE
zTLAd(i+p5xe0h%(y5_CEHSML0M;m8v*C)m7l*~I*J82kmF&{BesPYWZVrrbQJRDz0
z304_pPJ-r}13ht!S4DtihHs{cs(UAXT<s-EYFGVS6LXPsf^0f=nN=BkF;2Qtl)aoP
z;`Ki}>hzYK6Cv<zzy8I@<h#w5&$~@~+!oBUW4lZEM><;)tp4kFH@~ODuX(zQcUXRZ
zpEE;vUy>8|nz!c4cEH!|-1O{%4#?-yDU$8F?j$2T7DlEVb`DNQw5(o=JiTC*5IA3*
z2>#NOp*`C7LkTUi0_eOo=#%V6CS8N9eHXv)ITJHdPHa}r)Adbm7P0vWBZwQaBVh4q
zgECzuM@|x0_I3~4c95j@)&1@nH|NGJ!IL24-J%f9`>!Pqg56w;4d91M^;ZdbBjZI|
zGHC%+xu~e3;o^4UTLFsAIHEqQDl;*^`hW|UwsU0E&ICe$4MqekdyL6&aO*yb7*9uq
zM6=*mGUktY7uOny3Wp10I!^7p&RQ2pxq3&9Y~(`$LmXPoRvb@28A}}H)t`(dq4f6#
zD?&{^y84UVQ<d8Rk-op9_*o8v$y5S-K3%qa0)4WC<udypW6olREHQ2ED2dg{((_{#
z7ls#P19CgivmTxLuJ_1W$lZ_D{`tB2c<jAnm9pPR^NjQrpDb|LdVcPhQ<xVYM!$IN
z-d4Cc&FCq#e=i!!xvt%3e3II?5Jq*SQWSHm`;PUSmW;LCg147DaaQ6`<#?6Y&CJlB
z1+Nb+gu{C+qKagSouu{#{8L$f35;QHerIRT*<Y*Obx3OF1zQidn6c_s>~M8|S<n23
zrxoT)wCd>Q=<X<(MQ3_|q<jD`QAa*(bwCvh&)n{=j{cWm$5HkbK7W+5nSf8@UJXmy
zEGC}m*5N@_aZ1Ne*qRIfDIci(S<}#~eN^CQ3$`sOlrW-i#FeGE)Pyb=QHL&g{<6aa
z`yEU6rqV9VgiQz||6GNl@v}w{=ine2+)0>>`OEe0K39B*AuCaHM7A$47pB&bdlD++
zxvGaMkUjjJ56SNU1;v?GUW1!3nFWJ{IsDlE*^2}Le4U6kxq-9wk8Ho9vYP)i{XO`b
z@SR^SIp=+?CEaymx`_9)xb5F`i(4Bv%KueV5K3N<IJSSHi~pt|+F6wj!5JtIBeYdV
z4o(8UJSCLLU;#{v*x7W#xcLTPF1U1xw%0B@tT(?Wt{1lgPG5-APQ<gZ?wWpi+U-#y
z(+NDc(I>D>fd2Rn%X0f!Rg_UmM@t(z=>93g)vb1|Qp@(y*s{Sv*ge)}!2o^G*c|g@
zF*_;g+%6URF-JK`9qJK=)70QYo=irq3_bn-u7BSVbZ_<-wv!_^%id5fw~_A#zg!2X
za(Ki+FV(ndpRq7YE7lw8DE9A(+ZL;1Jm1a!>M~GUVNdhdiJU`An#Gb#_?+7%?=K0v
zKR$nJ!n=JMt%j_FIb}+(<j4DnW5l~w5JxYB6V62L-oVT(r<UAE-Sxp=DgiMWGKn~n
zN+A5;UvmP3JV*RjuON4juG#Ab;z+&L^+D<}VDXw7Q;f*<WYzVm>L}`J<7f@PxEZ(>
zc_bMi#^ax@l6A)sH$p{3eyth4dF&?E5VOz7Ha{KjDM$+Sd0UKE^m^t6$=d&V`G}kY
ztyQB?c}#DH_KED!UjXZL_bcAu_NW9N(C)Y3nSY-wsaFbo{_EN7kRuDrMGVj9C&|ah
zmMfQ|(HlzJM<d&@;F%^jeFsRpX71qi+S7*?`G$^#YY$=UX0wjJCM{kr$acaHg<%hp
z0j-vQRtU{ofNKkc=7Y_$wyZZj&Ffx^HAk@bE!{YhK4Z;QsCJd{A#RvKxye3jgA;3A
z@2GcAHu5NKr$%?e##ZJ#$;b3wMEMBQO`($Vb%78m8lpM#>0tWWE2s?>U7|d{-bb!9
z)qzMyhWOsqXuCo5!ew;70S$rpqk(?<gi_^{%9d<r`d@Qi9NQv33irVY@h;e4Tc>E8
z=C>82M6Y&f$8p#&%);7z1aimAdUc_ATWj;t3-e1G_|F6iZ~}1Jt-B@4qDkY1ilxx?
zk9+PSeV6~>ISsRuzEogZ{T^(`=!uh|4n+Kje$lO^MsD;WCRjCE`KPL`VZ6%FSutza
z!M;`}=IVx+#uiU5uGm}ga)+<<+sfaAZ)lcOo=l6JhUJ9O0J>oBRv>Tso6yFqUn2Hy
zC)I4<TG;-*=)dqRqLU-+Z6QodT&<36pkT!`6eJQY<E;Vm8g%H4z=MyUZ)|)iOr9Xz
zjz28!`Dyw4leNpmF9})(_J?>jaY>cmPi~6GFFzOhD<`$wW~ZAF3dyCr<y&{XDOXM!
zOd%BzmhI8${8reYX(6r^P2j0uYkU5qb<*td-B*s_4cIfF&vU>TuT^Jf;MeET$JILY
zfBrQH7|?KDtYESW1XN(XKf}p;`lfcO=6j7dnV`dBLxOZ5*>hl?iW2*S8D}4^%sSVH
z^@4`(3$MJ<_dhE~+ICA}wN?SRiwYv1kF*v{x$Y{hE%;)*t6jZaM@jR9602@82EsY3
znZfVL3PsYYyM`9WTM3%k&vsf-)qNQW&z-vpD28IGl0uCgNPiBc#(Ztm`tYG%dLq6G
zgKX1L_!)sr<$IfgU#zuRe~Oam_iDFYM8^iSKiqOjCQPX+r$S$oy4XH7Yo6>nI%>NL
zOR4>}{_zJKsjL$|cUVQKT8+CPp?Qt*;~#F|8ZY1zt=w=rhmDtthoG+ZtCwol{nlsX
zWc2D|eFc1;PH;XQNh{xh_9mYir$P`<<4Vl#3ijb5q3b@3(&~Z+D`aSx@1jV_<IS0K
z9I?~Z9C6`{w-93lg_I3RaXfD%=yRudv01@Oj7V&zD1qNwOWy7zSKs_UPfvrgLyWOl
zTyU{-&}lEi)|*K#)cXG=Ydjy)D?a;6+!aAQPxJbic1;f<szbt37}*^!d;eJ|io1rg
z@H<JE9YAXYA~>w3$U?_isI4m%yKTjbw(Mh>vzOFa&{qEai8$KxzM>ZpeHQh$HL%|8
zQ}R<jC6t{79;E&cnZs&^=!TIZWR&mJF(!#+W|^p<4fIej1@wY9Dp!38q!&KrU6L<0
z`X9|CS_=xn&G3*2Y>IjEDu#I)Np7sp_aa>AJ>8vcLHRg2+4P>v84Uoo-rN(-7|*5k
zhjfN_ZvbxT>4zju(Y86w8VF<n&vOB~anfGf$_8qp#A%Fzgnl&7@U!jfQvkakea__(
zM5*-)Q08TC?&VP&%c=yp8;Y9&Mqc3}Q&`O=Yx4a{kh!=%n=8j}h<qk%f^qYYR6svb
zM&%FRH|d0rwP<g{O`+%xVVH?ImNh<qQZA1;h{gMe#6JTE-nKS$ktg0T9@-;IaD65r
zNcP?x_>+>%Bj7j%VM%|mXg+}e-ButQ5+9HiU47!|`K3(ztKEeTD`=w+nC3?hQoIp3
zKXo1+S_*PHOCkG9_15M|bw3@({5?wq4tc(Ynvo=(IWxxSytq&&`e|~Q|7|<CvM!ui
zM(wo&<zgz(`&Y@Vt@fy-bk74#@vsZ$9F3x`D3$3DR`INi-S3R&q<D=+@htbt5|q11
zUq0633UUYhan4~Zg6<g$#xH6;-1CycO7bQC8^AB1C_0uEE|>Jf_<g`#fLH^ln>Em#
z|B*_(S4zn@9oJ_zO_G%FU}TFWr~&2dEzy5sHB|qJp*EyNazo|e-X4cu6#zl)78o<J
zBp(EwSqFb<ye;EB2I+)L9YD*KTGN2CJ1K~x#H&;w5K@c)c+FeRPFV)bgIzPa5G`hl
z)-N8|XT#*Ku=NX{C1bD!Rm`~J(#7n%15+l<x(1bqyT`C&l#74n&iKC0Ri(=;`K_?Q
zfkqo=AjUg_aW6kqQI0%{cZQF*ADS34cjNuAVu>#;V9M9!eEG*`snXC_@kRG}+kTB!
zUS+l&wUq4Fi;u*u0MsMVeY1~KyPIxR3_dZE#)P%#0~tx13JIWT9r|OA=PvPe@@Zos
z<k8DB;7{&tOES~y@17vBRuAmk;te3~>fWCxy#IiKfbD$3vz{tUAF@?O&wO+-qUw9n
zVPcrq1`t)QWkVJR*08~dxrp>hX2M2pf`Z{22qF{^{D`)Va~p<ycHgu1sEfi|hCI@@
zBGF-QJo(`c!^GqF>i;c%Jj4uWB|u_bgarUvPqA;|Ns2(vp+7-Ht4NGn>eebG@dc(J
zp$tmP$>TGWLb@b5;8&(F_=YT86{QpzM1K+X_!ARlJd~3epwT@fOL*VYwJn5Vtj?i^
zi75r3XEuNU#XPt~(TRJg^=^YdFrvY+T8M7V!MOK51DBznJee?V*ES|)|62(CR>D&|
zIN<r}9{BnF6ej&0{x*)!UyvNobLx=?b;$Iep=tlQe^Vn5^aTIslXf7(MMj@E2=gMW
zg9+un#~lX>DfS=oSt}FPEo|>4p!G{PCQ3ba3p{Wm`%e`nYMFpPT^AiFv*`YycK#H}
zxIL7Cfh0${gzlLX(sBF9sm+(=Kz)ksBhng#^txbU+)eQqzjr?s3k={1%1EE$?HU^L
zXn{f&SUI(41H6xR4H5502K@-y_fi%!J-)Ltb?Z0Ia$dTLTh21Xe;@=Y^T7mUty+gp
zgTcq_41?jyh_18S%f+wM$ek1>)K{Bs@=@mn%F^9UpBGCR;CZ>~(peP<%@AK<i$}J8
z$RR06Vm{#9Md~idwdF<;PG`9yf9lxL%(RK5-^}pu-&*^a&y$Q)Bted0+@F7DxvVED
zhWvga$!{O0lcimK+Ok_e<HzZIFb#qKeu;pON$n@tEu!>zk${SgA73I)`EL3ClP<+L
zzpI_{KxH)pqfgt4`b>8I0;9onn|o6ij(~8aM?>FT;eU8HW52CaqG%>mVK4u7lod6V
zFagBe?<bR*0hIAYA5ajlxUQi+zi2S}rc~!L@j0mNUk7~5x@%c~uE)c+qX{w<c&KS{
zaw`OW5KX%40`7kBxGW%Mx)4Ir-Z+EvcODvzAD>>=-1=Q1$AG&nv@)aMK+(&5z<Cj<
zA(J?$Tp6CY6nc6^i(DE90jGXHHDp5I5z25*hX%wEOVZt*gAnKrB(rImHv!);N_sFS
z%4>3wj7~&t**#%2GFBPEu+gJ<ugm{H%qU%m48?&HhwK$zGYpb302jO8|9T=pb|tJ|
zp8A~ymNkSyG<;kDzI^S?eg6(nzr>l8N6204;+-NK<Z&oK<i8C;KPUd`MSXf14Iwx;
z_6b-#RU`r6ozux|0wgl7g_y3L0sW^zUAJ(cAK&Gz2Z}s%fpkquArJMwvl9C?fxqpH
z$XJ~4+qGRd*W6^(2Ob!LwZGR(-!J~hydS&j1w}(v7Og8UQIgsDp?6glCsB9z9{U9?
z^X4nW=MdQGA4MD(iU*O0uXLfdI{-&AK0tbRk@ICY-2W8e$G8c>I;ZO#WZDFyooAyE
z@Cz4@{S6<hH552V%(n(C#f(7Ebq@LP_k-v*7ky^{(UzXJQ7KnNAQI%Hm^U>D_TQy&
z#o+_L-pt@9^Lq?NE<)x8qwJH3j~{p#9ikvdw~r8+N5WHHzo|;G@uRy2iN`DDTBwg6
zyc2%0cj;+KQXRPUU}cBXcHwqZ1G55un>*#U1_yR@)Wkp4cclYw)Wb%MX2kECZ!n;T
z9(r5a-3&mTPGY{tZ6e*;W96m5`*&W-eo?%gMt7x*3g3-)&%mEAK9a4G?|phm>*ask
zX+48*Z%!?E^sEI{Ljj~)7qOmu6Arg(P*Gf;D$jt3M-C`Q_@fcDTe+2q5;he6*y;)9
zT0O<T<?Ic5+j_}IvO~}!-|7;v@@iEKPweGt3;wN4?^CTOfMazi;_)pMXxs`Qg}v&{
z^(o}CY$fb9f3<1>?N)8YgLWIXo|3|%A|BsBW%*Y5@ozhNuOHGS2Y-7+&v{&X=AN%a
zbSSpA;EM6A>O$=!q@bR;;2;*b78FJWdyIHA?(MQ{MeYY**7SmOS^?zMfN>ZGw3)!?
z%fCD%B@}GfT5KI4AqdoHb%MgYN-iHp;6C_1vzUm0@JF!pCV*`f2Q_l6CK23PLuI*F
zr|@oHG2Rms+(xq^i0WzrKaRt)6qx*#v(|j@Zl8LeUO|CMt;D#umJyH9thsegeUaV`
z-ukD_1xpLltJ5~k!Xj0U&h8fW3q-DT`@#V>F~@luG^%3XW6OLy*nRot+B6(Q=9Vxg
z!Hk%GX}7egeaUd$(&!9$+O`-UIQl8o@)P0^*CIl@S5bx&(Jk>?=OJk-<3P728azd%
zVflf5u9y)Y)S*sL3UZFvETH`Iz-ejB6?BmxC^On~_gIsvZ~*?&ppo?X72;P=#4PeU
z+s@yKvleXikbU~?V5fW5u7RM`@>V{RaYq>5buE2+^_%<Y3y6P?iDHZY_a&1JTqahi
zT9C~nPCT5pv<Jo;cA&xboqD`%;AMSoq*Cl7dQILBoR>4>R$Q{*DeZ&b8Dd4913QJk
zg6mTi=N!kxD~Nv5D6#~K(<Qm|+;#LN-!;;a$>{KDwOjLSQS2yopciA0(rdkq$YqI&
z_()xbw47zV-HZPmtSXB0LOPN&Io{>fTON#Hu(#sozGaxn**(GYFir8d|3}qVK*bR(
zYvV4#g9mpHkf6cc-5r9vyA#~qf?II6#ogWA-5vJd<i7jf`Olf1?yCB}s_LHEnOW%R
zPQa)UXT|ze6bS%K*%8q6Y`S`(8v)V2oB@6m2?&XD_1n`J8$ph)F2~|;3qKlP#ct-B
zHfg}J!r*$<b#!|!(M1tO<!F2e)OUW}@T@`7-wn*E(~F$yqS))LWY*oYbvNAd1q{f;
zyz3w$h5mZC#r`~#{(i~Cenxr330SBe8>C}Q*uP}`hz2-&UR{hLuUEVb(;;W19UsaZ
zmFO6oWOMkGxWnuY_sQ;}|Csx&fVLi)U2=8j2ipmA-5yk<<(3<6>;VMdF2++F5@Ln=
zvmV8SNbG``Rl{6X0mf%*5dP_e$TFPtmjqZ`24r0*st>MQ_u+*+(iPrh%$7N|<ffrG
z`YngJpY1{r>+snP$8OzDT|<8f?Qgl-1*5NJMcUjp;P6D0y%e_;c<iVg$Ou|GeC`cn
zv6#nSwpIX>YJp%98BwU(`X*M5W@oJA<{se8`%yTKY~~)Zzen4I!)m?U?+oF~=iyHg
z!>wex$={!?R=&WZ_q^_UI}+d@@Yh1Xk*H0u6J+e=q|;3c*_h*77kYe(<V;WEf$|4U
zO~ctX&m)JAI0BdZt;Mf<U=xb^Cp}hRZjHDHRH*saM3(GtF{G>#nRzu(<|0uvBA|Jo
z5=WesTFc3!L`vJCP#;nz7`l-;^~~XQ%k@~BjNu4?Yv^-jPX~#)g1N>I^weutlZ<IG
zxTd3Ch&1qoM-Mt&zkn&@tTxy|uP|@EOe$u)DzNA8$6m%<ST+QR!M2bmu{PR(KW%14
zyAy6!)Y_XfQwyI_7O4hUuLtb7RzJ&{7$QUV8%vrYZ}E&dLz+qn-QI+hXW*yfCF6E-
z^J)Lue^h#w>jK7vo9NcUrkV>~OGh)s6F`Nh;n~6&{n2mvxdspQ(D<_q?IeI&lu=$Q
zKL};OI8sAzPz&0WSC;o+l5(ad3!W#46J1gu=XI8BSb0FXdug0PllA1G#Yd~?J1ItC
z*7;%FHCe;&+?nOP>iBK}7N)~BPfW1_Cs`UB-Y4kP?}aK`84&`pPG+R|L~B@h<cSPg
z;ar_F<5$EVN$>Gobbd_HNF&05X;ZRB0$ty150~^E+`bWxe+J(*Ah--3aPM_D__&R)
zZbtzvkZb^R76kYSw{xX96CYf7SeVBRJNsKl!PBi!fmD;(lg;MCf$W%H3sqoHlVuII
z(4={o@$`c$pB!<U4t!?K)cEtz>Po;zV`!nTJN}E>jvKU#fJ?)hYO-p!*Z_;@@f$S7
znqYklO^2WN$BnIFj9z_&SK&bBOwv=K2b(G+)_0&3MLd!OU+-mAcGHIkc~nR@@pwoK
zzh)%*8(4tJSUT0j-AmLh=XgAGpS=J8;FJArBZ8S@)C6<e3WiJ2#S0G62(Az<n+L|=
z>3Vg?*mM#@EZ}e}^lN>lWhM|@r83u`+VuH7;dbG%5}%w~I*_laNx$W(25U4372XK@
zS(wfzzX9UU2aI{}?dv9Np^eJbmg6W`gVAX&7rv&TuDhTU_Omgr&y4|YUDWOLQ0Qnb
zyE685xBIDQ1?g4-O4`d&LUelaRI%!L_Mh4nVtc=Wq@_y$@r+gd4;zBSVioVbUAA(H
zyFY>J)ZKX+aSKiD9<T2}Bwh}E=ZmhbM!u6Ze*vx&%^m}<t)y00{ZBFNE{2?4O*{`=
z8D36LNjKwY&wW$zsC0@JnZBVNq<v~n?=gb$KIKI3flhD9Wi$LkgSJvfTEr(l%gwgb
zseS3c6x<9qUid6Aw;Y9lt@zLnezx4i6AUCVFpYA^BD)0rJ!Q_Dm%y|3`}Yhi^{0BP
zQ13k|M8lv}fJP|j)ANo9t!9tv157#mvuO-ziA%_)&cSP1-2>*1y+6(DIBV*k=I@(h
zbc%i;{{C^92qcVezC?g9f=WMJ2{Dq^m`ne-%%PGSeE~|Llb?#01EB?yCwARmAPLk9
zY8PtcdVxYvsN~7wpke+2UWk591~_9h{gN;jspaEJ3e%Fn2<>U3ye)@zq*0(z7W#{?
z$Dp<Z{9>2d7KNu$pM>x%@b>mIiI4vsG`<11!-=m1L?NlqFyBv@WfHs(z4hOm+kH}H
z@V+4?x@r~~e?$|2u6gw-zGRZS@Nb^vSW8+?igUmbTQ`}qFC`(U+lllQ(A5Cw>ZD29
zYDJ{d`^lSewcYK<BzwS^+MzWvkbwBn_Snlk6-(8bSgqN?$BUY=M^eNbvH8mHQsX9F
z{t17poK>UfF~4TUx6`)stMlXJ?Ju-6Ta90@bi%p5d`*=zq0H5kk@z@I913Risgtws
zf_j7`qx-kegsnr5XeUIDojh@tPfTd{W&0M4R~=FM9Jqx~zg5=}N*-`(R&b!L>ab#x
zcWCBNfF_;KUefcvEoD48>i2pEDg>xiFIAX6CxU%Mz*AxImY?A?>5AWp{a?$ycw%wv
zDR7>vu_+MM1Pd<$G*x}5s}z~z**l+qvz#?)>isE+;Q>2CEyF9R>-wOmMZsz=z0^D5
zK2lQol%>LifNivL^eX=&`q*;2ycWpQK{NpQfvpEEQ?0s8fz&3`#rWrm(Ie&8fr{!I
zM8{}wQz?Y&<2#)fT%)nUrdJf7?zxZ!kL5OR=QFmm<s%70R>Rf%(hZBk;k@#&2IA$t
zec|Fu$b=Bkn=c;|0Q!$D<ez3@t!w*vgiwcqnN1qIL;*dz*#6Wi6ny*-87N|S(fvjT
z_@RL5J>sizwNUamEohX8=;b0V-ejURT*4>{!w{ecF@#|I!LE^)SsV~%x;Q05|C5gW
zj15jV<@{HMKwHf2hyzsoj|?wcS~JNlV(lF10c5@5%o(&2mwiAI_Y$t^l=4e&=d-ju
zDWAaBLByydlCSa(=i`;^CLl>F9@oEWWOcIXH&x)-?dA!_k`HJRB%z7i=F5`qBN|hA
zK`7;L+(gU|=p#A&50>pv(PEa5@gBX=Ed8h1PWMQq+g|~K;-XW<Nfzj)A|2e#xNY9*
zm+`?mpjqH+aB;@#oMaZvqh3MJpX30oaZmTH0e+eyT^d7iVe#${*XPu>O5;1<n65|Y
z$d5~`jiktrN4UF9zML22THtpTB$kzCecrj!IpAX_qQde45SI9bN!ow}^{MrUyMFZw
zsHex|CB*%y;V^N-p6^3Wv42n{L%o8ELA7ww7f=OZ0uL1PQp%HwmjH5ara-aeLk@-j
ziM^LtgvV#Nk%3~~#5scyax@k+^LJe4+L3sYIZD>Y<yXIJ$`cKzf2w^j;*Ov|Y6XyS
zo_dE;P2tB-e%W)u6xwwCxx+4G6jJ4I%5z7U#>_~R0KH^_u_<X03>D=9coOP(w`|5%
zohh$rGV8ePZo)+*g7{g%NZ&H`=*>We2eXT+H`QY7hAF&=$XLv^@wy<Thk?f?$tZX`
zqjA}G`DXy4)VG1!@6rR00)CH?WFUXO-?C=XTdSD*@fIV+FYMxrK&Q1hVl`*UIj6d{
zSa9W}%<O4f(n!MoTa;afS8e-e<m=a+0TVu)sA}8D+EFXT(`q#nxsfx`^skt5`OW8T
zND!Me9N2pyebVfh0Z>sv=xFVw6>uW`(CQCs8nW|!^lG3_sD)v&4m*pKnhadwG`$Qx
zPVjS*MUU_0qXfG>(N4geZCJBLhCRyI4d#p4_u!B3Kf9x14?co|d;lN?XTJQUnjrA3
z(KpTq{l%<GIHOWg*-BFom^s3V{c<=4hzg5P2my(RONXdi>?w|nFZ5L-Qz10;@8Re;
z5-rBHYw?Mrg$3G-6#EVt1==<eBZnOweJYBK?s_d0G$~RNEo+Q4DXZlFYxQpElTl=R
z*8gkOig);1{IyCc^4T8Kb4s-AeSqqWO%fyf=^%FriK7e>&+S0Y(Z8g_zowM`BY|{(
zfheFLlz+Y5xEK0>lG>a8i(3SF{U1Pbf!9XvrU)phyuayMgFO6CQlPMx82>nR`^g2`
z7qN+>?EmL2WqcwfQL6KAtNsSj24&;?U#r7$$g)km!>}tvNe|<5WonY>aeiF~`F{Lm
zio|fm9$=5Y5hURBX*1@#Yu*GZ#yd-neg)+g8WS(9EOFaG?yAobkya;FgJ`NmAtiO#
zEIvZP<RB9BD*)0BtZ;9T>zVqQ=D_&o8WUoLdl1y2&+fP+!oy6J`U`nupw_mN`zxXW
zjETPnKA(bqn9=0HQ$0+?$)v*S6#`iXNoZ2uWD_55Rz=Le`b+r$RSpbC#FOdW3qvEm
z=j7sb0#ke6uFqy%yB6C!Tiij-pw|n=Lj~BvK1v0Hj>y=xJuq378gzUsW9YOseuG+~
zr~q0rm}l!WV^neeY{u4b8<1@Y>YRy36>$(OTP{-kFHt${hc7hfhVy8dyiE6*lkv)1
z!Y=dcx_`&BZ=au@Znsm(9_gkc0yAA=fwsf15;X=jM}J!z8mSS-D<K9IEcx?OlI?cX
zq*kdBij-&C)JdimZI$fjw|KgWaIzddhr%l*9MXrxcG;lBh@#Xynj|c^_JWZ?NOv?f
zhqz0ju|1)}iO?$dh>;tL{dhb*Xo(SuhR&w27f_1iB^J4$W`OQ9hr0vH!-|{1s#wBi
z^AQ34sT?oZNy981#HZJPSlbG;XLh)ADAmi)&_mWK^;5EAnDUe8F~-Yzd@))<x0&NN
zk)z5h?+D4`^6Dd_;d#D-0csjG>J!zOi`SqWihTr7FH{ss=4p2)>7KqSq{Sv4Zl&Pu
zSmt6$ebL{T2WOIKDNx#?4(wF@5}{xKN-zVC4?WU-6aMcB0NMGMZiV%Yp35}j#0hj3
zw`@ET{?3^KH3wffM_2bZsAE<|sx|(WoZ(v{SiPddGM&UQspw<z@i%8PyGswim?lTm
z==|neH=rop{h6bwYCMC6<*=NFzEvd47b1yardXq4IC}IrvI=7@j)7XMx2{;b&k*2g
z<c<Agu-ddKwa6Tqr4T2T%)b@8bhdIC3CsjZD;?y!d5rqdviG09dA*-*R+)LIN~t_i
zX;GXzO*%a``OeO=y)Wy@<zz7OE4ru#i1AL)ZlO%Dk(m^4*4x%z;eF$@Zk5g?Nq1!3
z7af^M>iXkN*yLhRnG4R#+<h>tHuXj9$qU+g%Py-}Y-B<drCf4Xe7PmOm%$in+bTq|
zh>1thrO^&Jho>8)rQwRNVub$<7mr4*^>)_n*lZ*^F)O1b4mEhO1DA%I`)S6J*RtSj
z4QA5Ob8+04s_Bu-;-l@`lH|BACnu071{C02GnEL$UQ(yi!4bPT>W7F^LNo@)$}m8f
zXzQDFq@B$j`$V6QzkFrlo~5d`fZHkfgk#*ReJ_HHTup^x@@V~0sSuTiHx7nTkEJXc
z_`V8-`Cuo?&ObEJZjomOmI_XpvijVnwvSyBr(f5(sDI)cRUB=z!wf53rg!rwAl;$c
zRr*pCUJCe}pt;zBB^@vK{Q8-S_mnH4o*hR@l2bnwl@fXar+<>v*%E#(yKyr$QEP{0
zoEJOUPonM@z}0AF9Af3Z18$ZNQ?A@LZVH*QJMb+E10B*?Hu*Ff4ufbs>RM-pllloI
zRT!{;i9N3YGvb%>K1a^D`&3{{IfY7zCM(xJi7mZGrJvLZUDDTZL;_uCqr?nH*{}HF
zgP>Ga*ZEN*@tnY1XydusMbH`pu=nxJebljDF*%Pg$g}$a*W@R0P3rroKO$hVLVP5J
zLZ>mHBK7*En>kf^Q06&l4$tK@8eB>whmqR(F2J*^;7I@5Rk5T+mn(OLD}ivOel_Zt
zJq+d682{H1UB{L%PmAj~WV4x8X0dmEmaOQnK2dsD+=-ouFHCpQB`<`G-C8x^A08WM
zvS>OOc2fZS%TY{n1m-cclNOl3CIE(cL?#XG<v{*aTHd-tk==yeOpUto*npVkvnaF-
zrEj5Oqn0-BRFi3)xG}h+(t1-wbof(c*UkV##uaQT^jxUewKy*NJi(>dHQm+Xt1$9q
z$t-uHwPssk&g0xow!{LF&cL^gX~XlW;K)Nl$Gd1OEj343XM#<<fz#g8@2xUomcGbb
zo&K)%v)A5qi>1YUD=J$$ItqAMi^9>D_7SJA9Hkt!xW3cwA~67u^*q+o6@j=^q^jdo
zx<tZ7lsi<LPrpj7hN7%iT1Z&}^BbG{ks-??4<&ZUzg6Z}DJ)y}r@)|E>F|f~53PkB
zI-ekKVOMR;G#TddOS9mA=v=;Zt`ugV_008^UKROVwMq2vOriOvKMM~cm3~fm;1P%5
zWos>izW>7|Lyj$kKlLXu?_g(=81vpSnv2l)Ew==cfL)G!<@<?2G?x&0a+eFkd2D#z
z^n;?#X?UqzY3iTAqr0-3%b;D&<*K>c;#aYGl!PTosdrCBHjI-J<*1iX-{XWIkAY2;
z15b|yA{;lD5)o7-$YyFWzod)@UqC8+{;<J!Pw93hhb8JeJ8g}tIHYW!$lur`C+IOI
z5rOZECElSWCSg|gb$nDYN^N#^{ZaVJqt6uVV0tPql~4g5?Qe>1@zpQd09qWnR7n-h
zn)#DsS-tb+CdypO&LOs}Fb_ZgK%s1LV*ZBHL9Vy(oz$1ibpwO4f}5}j%d=|?F^R)N
z`b9%9W>{Z{VjiC<-8fqi{*vP@IQIY=kG4Ggyywr=S>-#i^UnK$ojUOg{_wH?H77zO
zKtca43S!q5Gw4I~W0Jm7fZt?DL*U33bLm6W|NKA2zAa|o`#(_MTh!Zh87rpB1srKO
z-sgW?AGO6N1!aI#a$$+Xw=il8eUMySA`qDe!xD}sj5)taH>#iv8bhcs_a(1HKv*I0
zvj{S2_z7{!K6Tj|hA<|3n8I8a$O0muiQL;dfY8tKAas}fKLSXNErM)_$xi+IzZkJK
zPzHQNAZj%z9LTSzLSR_lOI?Bqb(t3zD1}e?CH^x14YV0gFQl-4`!_OrIG*~yc%V!^
zruWptX;ER(M35!_dFd+pAOGWfN}$xd=1&LAk`|i-(b9g;rGQ#MZdsD$)$1gVCX!_9
z(3Cj8Sn6fjSJ=6nS8{Xy8xy&v<~08Dg8U%2mGP@UQUoa*St%&XPBB5T7X3>iqz>E1
zpqs$B#AEo&zR2XFQAOov1yxSC=j)$XDx{(c`&5)lI(@~Mit}F{Ncr(=%I&PoSCqjA
z8`F4?(ZgwhDN?WP5hRhFe*3`lBx8ng+<hD!&}+YkwEA-COA<+*aVupuBay#LY9ZFH
zyvTD?&UQpGRaC*37oCN|X(?j$H<4+|oLcI+XnCMza7k@cF2L#EmZdZEM(CSE@<s@*
zaP7&$X?dfio0YkVvnc>DDJ!BxKB8+2^C@i=AsJ3aG|h9U!+76h$rnM62nJg<#Z3v{
zWML1(Fl~Zdb?!LRD@n^1O-7ZJm%$iWzG%SpV?f4ZkZlk78Iy%Jz}MUlQAQ8jsZiRh
zc;T?6fM$O%M*fuvb6QUY^@9J-8UErHN$pH&=2uQrX61igF(|L>u2}M=pyb9lpCpFK
zJdVx4%Y@oLiai4Hz|n)&E0g{3j<!MN{RgA0!oK#<DdL%v5&DQH(c$@ltHBjgWp>VH
zXgAyUZ&bcyFp~kW4tI&f_c1qCsabneJJqfUhg}R3c_lXD3j6Mw<b2c#M^(MM--}9U
z))3@aK<VIGZhPo~j)<X6e&YS}ntBjE=-~TL5OYVK1nT03)20OCD=IIKTc;vBO8mSF
z%NwzUFbz6iDJr?qvYHLB1a(30bb@w9-iXw9kMuUpA%MT{LkU&&TU562V`8FScHT(R
zmSRjDLUW)avPjZ<k#|DyBG`eT&H+YQ=1H&5_i)-OD4X-s<_1JZXk<SkhVuXK@d4CD
zYeNPq=Ah1@MQMsf3tA(H7EQ|=VOZmD2*KG_U04^LUxEpB{`4N@Ma=Y`OD~ed=}1P5
zP~{2w5}<V6X)l7#R}MG-)i#?C?v{>4h1<>kZdK6pL(B7Hv$VoK=*gj|g#B@gUdf$*
zj?^gr6}-3M$d(+ni~s1n(^`-Z>=D`{HbACrgz);B|KWU>bz8e66%e-uS?OMz;<*jC
z7(qO@&-b%34<*VgdN0s#V48R3fL)LXY7A#kx>RtZwNJLh5U4=-?IVpRfgS?pEmyDI
z^c}264>J5J*b<MRwC6<C7BjZnX~NEPow6Zn-Z5nLB+YpLZW-?)ty+xGu^>)ScUE}w
z9xp*=@zAwd?duuywYDH{@AT{gGozAVTjG|WTEi!KpE5Ep*o~4f;hojZtJ_+1tr(qy
z1BR1SQd4=Bx14^DI(6bZMGBA8hD&8)ln#|&_gu3(;g-<a(F0Pd_2+wwss%4)!yy>c
zAy}P1=jzC!sOGp;x1Yklze&gcWI2Zq#+$|{E7kQBd@qXU{f%I0DGaZLamAaJF<cne
zP1uQ4kFuW8srBV`(RR5j;dzUTmy774a*$$QlC<pv&H94@+T(;%#DA33QQLOR|B-en
zduGW?Ur~WKjq&25v)=84Vienjsl`sChT2CVzu5o1ImP(bHELPdy!VS*o+z^gvJDUB
zioW3*jLdIj#Z;put~%0RxJA14WZnu@a<3!)4S4>=qLz2VEtv=se7|UNRpID`ic3Bv
z^h#vhD0F=<1uHdwAOQ^@VzXEJz?hrcAxY@P&A4GnI9YWm3re+`+)WJK!dK||$x-Z;
zsU#PD;54FVm)YzaUR&kH=Wo}jxxB6C#@d`F{GqaKbzG+5OOh$`DbxG43eK#hI;^os
zXhqX@dVv<lOnLZ)^=G*Q<%aND#616-6jAZ$E$-*Q#<H_gOBEPv4W&hC?G{j#^h>=8
ztMhoDn9`zR*44axe1@a)rE$H~HphwqA_IlglwvIu0|mRLg9jIPIO4R;je|8alIwIl
z`|=Aiic7xMJWv`Fpv`npe0FxB%xzrEXDa?K7u8qPZ!%WjEX_Qcj-4{BL>&h`)?`Ya
ziR~K0%TVcXP9rI6BisL3dSChxsp#f^H$mwnZe~->Otw)(mLxMH+Q|Xs?5I*6L?w57
z>3v-3{eEVZi=C<KFf*+3fYo;B^?q4)Z80x$X1nX1eH|P|t<KMg#Z!6D>$f#dggq2r
za15^3MReg5p)u-}s$g5-pq+5c@1^Iabdc00l*uZg#W}DUvVE`nZ<(+v;ls&oNWSM1
z(qNFs#-cpBIB7S_8;d(p5oYXvI>=%3yt5|?;W}VaUlR~}F;IunCR~X-oZ?LRWwEB+
z`2;c1J^*D;)1!$rc6pMgv-@qX#(3MSJuB>bg*q$Zrz>GvNuA?Itw`PmW;J!$TabF@
z&#9StbGop#pH4-0yx+Gg?y>1jhXS;=4Kw^mbnR!mt#w`M0(S0KGT_&AyAbVE4ZFUE
z(<14op>y78rif^ltM@bL(aIi;<tKf%wG+O`bSRl!-WDmDW&5{9{FY@B%=(VMG(aHQ
z0q5)i<LqHScib;uj3bn#8^q;EtePa0HPHd8{<Wf=+!>b4M#!60cF><VJ-GcxuKQnf
zsLA)aW1hYLc#rs}&JO14_<V9F`M;Ri(ciL%_Mo633ooLx2g}^CNC}&3mKKaNmz?MS
zcvSdre5N)N`QqDloZqrlP|hCM{~Nyn>BI@;FB;_7?6>TsXqGOLv&X{R$c8Z}h4%FR
zRlw~EJzK*~;a&<Tp12ECjCb$al$F`E#zVd2;h*E6@3Sms(<$}bQB}6y6Kz%G$YfUY
zsTj~jlqtQL6Q1M_iHH+BI-BmBoe5mT(kLv4mZ{S^rM0QE+vZ6VgNIYGygEDd7Oe9k
zUR-p1e`cN|CAVn_+UUp|*ydC%zN!R#C(Kqo_L}hfGCq+<JafecD!$VWEVdwgmcbo+
zezc%Fl8foil!bnJ(I(H0vx<-T9v*MK-hNIkY54wkPcg61O#}KfD!~I9b|#HLrY!bN
zaz1rVl-HO-;&}>ZbRMo?ru!~6*GNg%w{W5b;ksB<LXlN?`vYq+g_(Il*3;asu-X2o
zt7BBOLoR21J7y&!doJX^AdU8_Vs)8F<GKv$95>=kv-7X^%n6))dHG}fOJa!=ddSBa
zFtBVPHnIv@-R-PSvuWV&yJRoyjrsdzBmIZg#0m>r0Ka(C9=1=CaNfeD`g~c%J?*hJ
zeBoTtrn?|GV&}yE-4q_zrF&-6>8VFn_x!E5*L0a9CG6qT;8jW8RtaMv9q}z1Xno;h
z(~So-?`4YCv?g!iLzZ>&X}!>04YmyQrk=V8r&|InH2Pt0+;t~x$J?)bdC;N>EX32h
z8Yff%kB%lsMRf!>jWN{hdK_aEzo(7(@-7ccBvPKj;$;w@(TNqpvlE%F(F%{u-OtHh
zG-MNJWPaxb6zaOS9#;glU!(2&FxWnaIqXs*R{0lZAJgy;O4pej7dcYwMa3LZ*IiDD
zB_31i@Q|#I;iD>;X1MQ?j99VR@(8C{9|)l;ICNws#PA508d(&c+R{nc<l|WdTETHJ
zDRKwA`)E`juZ^91QlFL4KTcI$Q((KI5^fh&u0F}yoH)<jxY}1IX@1z)Jo7#e@qV@J
zGQGE*Pd;5kWryR)diUGLgUXlyf9lw{Sf$K_-E!6ol(Cvuv1x$GSjt~)gf;0ds)M~T
zr9V0gR;a{5J<s%BF+k>Kg~*r)tlm7->ONra`3`puj@-ST-PPJE8)h%m-1|rF^dTaf
zzbESlgH}QBV@wM&CN!xteOFSvNR?Jrzv<S(RTNxk2i+ok(}Vht$8YaFl<c{nSI_Xu
zCZuMWdkN*t3hAt$3OYxdgNd@LxjPK=@irHazn%!7VeG1_p2{HNxa!Aot>unAM8`<s
zmZ=lj&hYR%CE3f#lFi$hDRN_wKP8HGw%8JCdnVbLDU8P-(_Njj*V&qY@qCCjfIB|{
zE>OgB=<1b|A}Ho}Blc1SLM{UWPxV=af0X?KV3gfv_`zL!27Q7RhU{dwQ2|Px)t+6a
z-0F07*GW7vd|d1xWm={QV5^oO@{RrJBv!CJqj0+N{;^}o`o#SqT09HC#0%FPcVsOm
zaJ!!Y@i3Pe=9k=MEJBv*)a(P}v(bMCM@dI%D1s)<f&Ib}9$ut2{&l{{w7}U!*AKR>
z%h)J6IHNdQg>s7|h)@5J(Bs2wGDzlKP3KQQY7*O8uz*TomhxsaMA4_}43rB7?LYNB
z7bKyyxr`J}?5(=xVH-`bkN!0C^n9+6$(1WCXKf0OYuOUkw5UsB&n_|puHD`5i<Zb8
zJ^h>yDq?39$Igr-UctmX5R{g6(d$(*XsCnXZr@b<@~bjP`RHj3g{+mKoFT6;Q`^VW
z-JEF{cO_`^vqs$FM}E$NrpNq&;D&dr1TEtpUANS)F~w2_kKktC##bbzL(z;?rog$c
zb8R;pCr;>$W(O5XT_=A`OLlRe3a%T2yA?&&VC5HMu?wUAEjpYTqn#fjp+=+o&OoUv
zv<8AN66=cPARn>oky6t+^~ICX&#Pm8aL-b;`jtR!zvpcpD`Ak*=j`RU6$;$-h!R+Q
znn6<odFs~rh@|rqTK@p6m(wcerk}=Z<lC(2c_II=Y{e@o{6}6bH9_fhmTafskJDoC
z&l09A-49=-G9bMUB$f%bl=vYB;h9rx{CmsA7moeh>1OM#6gG?PbOG7cblfpSZArBW
zL<$;r(lh+J701O!K@nA{^|G^b3tWojdS!4~V4`^%n^&@A4S|2$9$jWux>eUYI?~XY
z0yoHiEm|-^jcGa4uWHTdOT8_YCT>bOTD7Fi<_-?&$U2A#wme~v$ybsUeN4w&Qhr4O
z`&?rh*U$fX`gG?(2WJI`m_^@vtuxSXIp14PbrVr8znwA560`z`ZY?wuSvSrfjsPHN
z&7e!U6B)Cj))Ou8NWr=@GREzqqzy&G4K2ajiLODK&jGkJqpjRvjtdJN3f6bo6NZP&
zhL?rg`DmUTzj(33`xnDj7S1z2g|iRv=8`*Ro9+)=M((}SKYx<in2aah*r;&WLL%~8
ztnY3We+={sF0rVuFT0$>O*A=8Ydjm&uVwJW8`f<c7#c{)TH)=6Pafwl=0nskNbhg)
zH6iEkjSp47VWWQzIUmn_*#rx{RM5L3(0_0v(u^*PqA^p5X4A6nC%Zec#Plq1vn~0&
zIb?zVG7(Y@RN)+L0*jQdOE1bX>9$Q?dK#Y^m<!sB=G=A#t5ZG5s*Q2vIknO{g8dpl
zL%7(9u3&~Ep7>s={taCJYg!UF6WdBB_4I>CflP#g&<I4<srkukappMjL=Cjc_uJa!
z{b)|}(GDC<6g^e^@Cx}3zKE~3Rlxy}qMsSDf^#56^#g?<P^-IOLZLn~p=F(zxiQyn
zDps2Jv?YBv7OjGnvI23XwzUebYUm<+9<73tcJ>me5PlPBU$Ymz`Jryw-C=Z2yE6Iu
z25?>D;v|1dMIeTw;A5Edk`2B?RoLETzWc5}vmLMcWUe&XIV;x_j!EbNXO$Bo)4ceb
zs*Iz|nQ8H5>cRBu2tufB)3`^OpI_31W9!w1X})VxJ}h~!+E+{G_X&RlMYa8pP%dST
zG9*)%uVtYR-70ZC#9aD`+j*K#&k7}1jj27Wk%E7Y#0tZG2xxmC>O-=Ahon9A>?A#j
zV_|hnf-CI->s%>*sA8J8-gJz3+(%cay}#r#O^66b+jOa3_Y6Qoffu_92n3tIY$@iZ
z*)aMOE@|`|ITx6@E=0i_QSg`{A+u*XPoX8+uaT|&`a#A+s;%QHwg8FjEQ%|nv;KAq
ztfK`j*A6+7han~QqhqA(5$J1bVxr=6ISPdAV0`kVaqxz1(fwA7>N4fHDlQV#OeSJm
zL>t>i{b?D!BRtK{H9QP`qM0s4l<B08<jGy~8oOTT6GR{&&cT5j-3>9OP``2a0r32s
z95>o(KrHL}st5EMEs-f#E3M~<DekIV1pp%$Xajg@Qj444FGn4(#B4&(Wf%rx#Y$sy
za$|8yFPe$lcFELEE}BiKCq509<L=<d_9h6iTa!iABNLW<pb_2dZ;j;|Pa>7&rTGJV
zOIR+)xN(_Jh?x-`We?n`-}>m0GRIcs;UEo_kGRvAa_tH<)a_VR95(r9>F+YfXyo1E
zG^jJjFk5n-34EB<7}ZACx369CoJB39)yFC6iHe3WI7#dJ0V)@Qz&sKY%!wW4#RNiQ
z$rDlMm&%NzJ2UB1^J1Djjq0t#Ugcd|pe0Yk@I>Nx+3^!2*M-o<+4i;kH0CtRlD=&-
z*ZuHN;zZfFtZt+#K{$ZM(5l+e8LZU;Uj~*jyDSagYQ^EMX7np2+qWfx+J|&!ZG%lK
zB0@(S)@YHI)@4I39%)WI<I8A>;l5%r{jt`~L3vFX$0zLN{L=y%(0C?8!RXmD=`K1G
zij4=kO9F7rgt27@j4XZf{)fq@a&Rhjjk@<?*8!?;YF*U372{w^!J{}Sp5h@tMsWz(
zl|(?-1BmdE$hm7N&Ak&W|6^hvSl;O*H3UAlD}RxipW%qv8C-T<-^=zyaEYy6nj|vT
zPxr~)jzic_)rRdIzj;pll%L`c0u1g`CqjP{eSrGR5Xu{xuZ`j&x-~&N7<%}?>i4nI
zrh4z>!l-h)o5YKPUtVEhsrB<TbcR;_GaG&m_;Za@L1<&kJg);pg?99!Zf1|r)mE<6
z%^450vqi#}W;oy5?z@#}=NrDvn^?SDGgG6G70kQEbaq^evQI>P%j~hc?OHOc#XOaw
z%Q-fqrFAQ6BP^ZwLRiD-v?m$>Mq0~JnvoUd;P7t5Kp@+*_wN}Q^R4gtCd-%0*bPgv
zp_SbCj4IH~O)ZKdE~kwIYrjwTtLw1v$ku%~8`o;1lgf<|hxHfl<XgNp$DB(`Z}qky
zZ$=2*f-=v~+AuY)1#f0#R}E3UpR`s+I8^wv8eZ(0-XZ$HB>IFRd<o9iVI<zv2)oA{
zKXE^GaU1=h`si?7h+D6)Pp(vs7y+tGP)9t;ln&$nW@q7ID_ml?OL>WX*VmV1NpiZV
zb}ekew=tjj9{c{(CVHAJ@#j7I3<g((fZ=?gjXdA4Wk$)OYCOf(p!zIXC5o&h?$y)4
zF4V6XHI0-f*21*l_m|77OT2@s;`2R(bJNs)Q+2Y=ji*!9QL1q`qG!MBfmPv<(#{Hv
z<pmDs&=#)Rb>n5572}ocPpf>0T^o+zbuJHO7o7Y!mP1G9DFqZWN+sq!Xm@>o2JM%q
z%qrtVfs18ESm<2XIi5#HA)dB1^$^Ldm)}(@TknMB6~itAoJIqq)5;lnpFOLIYhK=L
zag+hAgg;|A%VNF=XY&q8m+4ZDk*F{!)gbtO83G4YdU!c~r4Z`DP<CSSW(M9rZJ3t^
zCEg1<-Ub~Uz>kk(q-Ri(+URb7D1YaiBxEAB$!2bG@7X(mPspG`vdOM)asLbLAE%?Y
zgSER|Q#Ek-D`8-B%BipM82mF~z;Mbf{S$=y#xXelEBu9lEFhS|z&HlWzZx2d0E$WZ
zAN9mPKdEaxab5p-!fWk4IyMlHpkK&+NZ)?r+6l641@-%3us~ME?*Bsi)};3zb&qKm
zd;~ZZJxpd8u3fMQ&oA)94{?wQjcrSK%nt(_QnnANxu^H9euL2vCbRmpU61}hiXn{l
zHUZOLJrhVj^^fv50t}?AI?^|Vf&V4TGWmla^3j4o%>N%5`LF&0l+j<j0qcL$v(u#n
z=|9Rr`Q$}{Qz89}Nov}KwL`H})a>_vYGz!!9>cC*so*KzlnH%)j(Wp-aS-<4c&}M!
z%0N4$Z;t?nnr1&A)zlq`yS|~g<}O)j{nWZX=ptGR?UHQTiwqonP0nX!f66UBsI>KC
zs_Lz^z|TkAd*4uplh<6wLla*&WcBz94o2iOI5UO<9DNhW?ETxAorNo<wSVBNF!J^A
zY9KQ(R?k-S4BDuwZC`dB?k}@qXTH+7ztTqt#}oWQr0-F2{f>^48es!rGH;MVR~O65
zLdB1zH-e(y-C~Z7w*wJTqa!G#4Ys<7Dq|0h^bHBLzf+Bva1_oByz?O3^3s|x?P&=J
z4rvC{iJL;l$>l(5Xzt=5rw-B7*7vt~S&@%G%*^MKGWz@rktg!>i;$8w*e+g_!9s0D
z(l~rHEO8zurFa?~e-ftHUP79k4J0C&JvahiGrlxRbT6ejc<Yk^J%b-bu)GwazDQ4O
z0rVBCszDgpGMsiHwNQXwZ1^JzoP-2okfx?S9eW`OLoOL}AvASJE@xo@beTG)AUT6y
z^l>;xo#*XKcyw}3xD)~h?nG7}gtKQB9(!2rx?m&drLV+z?5>kw@Uf)zS#wuGuyTPl
zwk7m_97{w7bAL`A^4{R0da%fXA0db`=Kh|yL&QC~#nlbaS<GmBA~>JXDf!?U-16+c
zv1W2-d6Xn$cUA5LCD(UpN>=Edp~D-0b0q@tz|Xqpm~q6SY$On!wjoZdr8U7;>sja%
zu)B5)4ASbsuw#&t>bq$Dcm3j+?E{h8n&rq7w$~W3EzH63HUxz<{a!d5uN)fXj=<~G
zd}uG=;)m=mu;}MzAP|R)$r->fh>wU*J~V{d3`&Bb?EoBeJrK=I<<g)%KUZWBG|!oW
z0X@B+(d^z4wzix?lOY91En#>o)G;~`mzJd>btu%as)*4?=c_($4}{h+V_h&bilVJr
z7Vvd$)w1SxQ@Smo=Jv8+0>B&@4DABd7h2w264nDjfsXC>jt-AFrk10izEzu-E+#m5
z!1s34>-~9=?hy;)+3w<;KJq}%wHmcVx2CzvoQ7b6)ZX7f1aDGaTR&$7necKs&pyz5
zTbX5Ryzs<<g_uz#CTn$A#5hG65}SqUHS5!}UFitVU63*sZ?wZusFaE?RbO$|L2seO
zhhkFwUf2UWT$QBO#7=J^UXBXM#H4ejv!c+;yz5>fN+;5-7?$nE^;VZ5p>Lp7BL!Gs
zCl=+^wxW^4m6W<gg_tcm?UsGSF-z5xUFo%YofQhGqzrWnoX`0|)QR^5I3Oyj#Hz=9
z>`s79Pi_jr6i#BJ1~)ZZuA}>uOqJ%a8Q8EM(>rN0VW|Nof3i2#==Q(@El)`V?swC3
z4JEHj4JpqGmFwoC|ARUkHB}oXd=1=vWCBbX5<R5{aON2%z(0U~lej<5V}i=%Hl2b@
zEu}d|<B9DO;pLvqN{>p2eCsBLc>83&4C5cY?&rS7nmWi3__v^XwXWWIX4`g2O$>-R
z90fYQm-fP8B`YpLQ7eae+H6BMldHVbcztpE;Q2hi19}&kEYZ#L%SmsHYDu+YSGcr~
zb`Sz0q?+-~LYJ^N1mZnB{Tx-bd`_3>KLH6ODEt)y;O^nA{ww^2{k6#F<Raa}8~&?(
z0TJ-;;idnhCiv%vSs~{O_&;8lJh45l+z;=kl%&M9jL4TOBN`fwxaNyEUo44XEM4@I
zUO-vF9_+6YRQ`|n_*pji5>%i?e0-BBx;$tEIt2y4w4nEsT1uk!5vs8F6H9X9+6C2b
zzPF?vrhWMdy+Sen1a2qwxzNG*__xKEX|#O4H)jvOtramJeJ%^KR7>J#Ut+-_VP4~>
zj2yI+-RCK&Y*c}RA~OtmVxLct-k}6IJkNJqm+6!A9~*ZSKBk5G=ezmCC8)&>F>g&N
zBq&Bz-h0Fr`6K;e%{YI&_d=%-RknA<^7k|9y3N!(AL3?L#6FgFZ;EX6Klrf@K*F=_
z^?hDR1s8qf0v{i^WDw^*OfKJAY~miHlu_H~`1m!WrOuE7@=0c-rP#zEPBqEPs?5VB
znYcH7WVpwMJ?ptp{P=hteJ2ql2?^Z-!T7{I63gzOw-V_=sS3YSVg<!`HCAo-G71WJ
zEUn)Ne?da_Q3K4D%k?~Ye0*MEy1DxAyPH3jn22ERx8n-PZRfgi2%8rIcdHE+Mnb22
z$f<`z72)9}Zw!>tFcbI4KI;z9BH5FB#>o>;0zMgot-{g|CoU==O5RLcL9rK#dOy)9
zK%t%V4jz3D2g1k>)DEg+La@)fIY_J3r!|O4yz2UYD;rJzaU)StVpFA*52MhiHPoob
z1JzcnaNk2}41HKUdl9sgVRwh58YT4wSwO;vvW;Ddk$QMiv9+i%mz-Y=x)h2+WlCvD
zVj5li2~$U39ijzu4`$(4?{B^}^u#^3lJeAm_%{d>J$T6lN(TSEqFnMEIo?#fgPoFT
zqq$JM9t(T;0t)J39QllufOszwtW^+PkroCN>N`GB;%~lee$@~{8@PA{;!0==ch3B6
z29R4RXW#&~?gR&vM;DKV55goP<!?T1P_yYun=Az+h<@|A0fkFY*ZI*-0v+Oc1ij^D
z<Hbr)hb3<K4vb=E9OAtQqh#VT$@Tu{GFlfA_Tu)TTGDq5I(q==txwL=euWCeKD_I0
z$(iH;|7`;5z6qfB$$7)lvu)A(@Y1uZ;$h4l`7^4rJ~dTZh)QfGNZ>_!NXjG!vW$k1
zQw{s}gf}BNWTD4rmc_SsH|sW#NQ1rt=M{pJfG5H;n=%}k;-{=LYrKh1_uG%PhY?@1
z?-^V{@1P2WIjZ@J{an-6`!*lb<zL@|z^bFC)rr8mE?=%D5S^zIdprVh7vu!Gmj1*t
zZuL-JrX6w`KQ0rl3Xn$aDOYR+A)AGQOH>7lLdQye%ngPz^}ex0l*FLIc9~!+5KmTt
zvIWY=`Mng_V;OtUyRgGT5~UOkwhMaC>{>>F+!y(h&lMklcUi%70|US8$2r}FX_g~o
zTWCpM<3VKt57Gv#8+FCMq0n<e>pEP3Cq3Gj64G;?CcHXgRTVW5slkuL3r6J0HnB$*
zT-p>_cUX~zCdq~`5K7a5YYR7+$?mdKkmaO5RK$@Fy{m>kgQi%DPk>!4_p)dz`1<`h
z^d|E=mT~n4ZhRMn$$d;x>{=`KxoKDVZ900?(U8!OEQi>QnGms*F+P~5`BJeUJ@DC&
z=54j3Gbo>X<en<iV!16fs;hF0r+N6d6OaxF2EdE|;JTu78zP|aDTIi+D#yb1DI6oT
ze6#4k9IQ&5G>X=vDc8gCDa`b}y{5xDsOhibied376np~=REZV>zxWjHd?S66T-OCD
ztp=awd+LMr?w6nDpO1~g^-6OArB6u~sr)W)k_}fieN|ilm^Vq4ou5~9^ZfuQpF%F*
z$iL{MlQ*g>x+p#8kD6_@tU&|-9*RWor_Pfi@*ja|FgCh{=i%t!>QPYo^R_7;8f4vS
z$kx@I#@{MmZ0BM$IF#AtfTV*tKLb=|EUVyE&6(uJy3ge>y2?cHE6RBN3})Z(6q%w-
zUS2sT`~N6OD8$s{5gCc_UVw?NgFWKn%b{_etVXhzwp9SeulZ}in^vUgO84g~I<*9#
ztq^86*l@O8qN8`T>B1fVP)|S_DPaClX!}!9ziN&8B76nLJ7KV*L}v}u^~{VS!pR$4
zKB__HomdYfA_S_L@KLJJ5Kk*XuWOIhKX=r7>$IiWs#tZZ)JRbpz|@Hr=C^$zB=tJZ
z(KF#ZuyHDDjlNkNuFn|-1h2xY>{a!asfMcjsa#K$v=Zm_K2==dm|-}n&NkPi>I0_G
zww;)%h;c%S*8DQR#G73!%U}9A?~$)^T0YT3U?@aXWUDuiEw){4%@U4C`$?4h=?k7h
z{^RU~y!tdTAgww;F>0tNXKwVxmU+_k`t2wySjA>wR&TbLU(;H@J!)bqDERGcK<!$p
zmqT9c<JX#NWpE11;U9>gjD+1WsuU$p`SED%-T9@BxKY_BCT&>jo87y`T=>l8XKbE3
zBbhB}pq4%JT8h{U_1M_thN97#d4)D~D(5)F4>!Qug!Rp)6pteK0~F0*N-3Bp#X{s|
zZ6iXpn#BG-ZI><<;pbe4GnLsm2Q2-n5R}AnFwelFA88=F)c#+TSpQ!XB;n^0h%@$?
z->g{rw#-TRv|StW5A}Z_&XCO}6G2XW1(9(<Oe&=~kS?o-Pq81w<;*^Jh*$CJpnQ;<
z00VhX<Fy1~ife)NYTyqO2C<ku%HR)0Yz+r%^574q@(L9YXF5MbYCyz<a6?cW?A0@M
zP#`N6OD*W7jM0rS6+b;cHMFBbNEJVR@j&Ki9;}c3a{rSv%!PLWtSzx>R4v*r_2deO
zwJ>i_KLk6ncP@i6Fi%vhY(^#UhkAtSrdM+*=q1lPTqWqG)j&cpWj{c%vfnn}Shq;P
ziPHNXK79!oaDx6%Vj$}J!ZNSWlEQF9L1fO4Tr`B$mgfjjO{wQ1I?8wl%DXowb5e8D
zvh>38Mn~S&G=!X;oUgjF-1gCXJGNbJdTo*oD#lk7yv&!s8F)?worWRy3w#ye81FZF
z5bn?A-4juZ6*FL$-(L1Iet6mr;)#>2bomS!MPT`cXvO|0dqd-SJh^T|>}7o3X|2_w
zC@mE`y``JWuVShF;L^A}x>z{KP|2A$)%$Q<WBfJ7+3niZj#QsjHY0qXl2(S2RR6md
z{>V-u*g~$OEzctKtDB4f?~(3GP&C+Rf|yhSo4xUqXuCRe5#&o^<D-Z_Lr<5g@afJH
zDN^1l%{hr>dCCK$9>St5pJ57}=gP!_d-HoU-jzR5=1HANYc1_Pd{L-Q<9fvkf)4vo
zMVsRN^@mECp=FKX8*Q0-X@AU&jX>DXbImiVy$(Bgo8tK)>V+ZM1|xtZ7Dt_?$~9MO
zJ=WaCq7pQna>nci0@esJX1J0^mkZtH<wkTl!2{yHI(NH&BX@TV0pWr;#_LocK?4)M
z6|s7Jm74l>ammi-WhJiKB$U@em-FW<Fq(PO?W}6E1~1g(Q_DryoUcy-9$h~d0eR;D
zd=j3RBJAT&*@K@}rj8>N_%|~9Ps=HPeML5_DnqOoUtsDwKN4u|ZLj?v!BYP627AFE
z@Fa%ouw=PZAdfXPn-RPUc&d1-H+?^OxJVjR6BP3hd!Bw+{fX!<Bb3y<`(^zI%kqwM
zW1d0zO?ARv)z!*^^PEuep+0NX{FQtukmG3x;nkJPsydIsEAl~YuA+!(^T%p9JIbpv
zw=2v#%*Osuz>d**M#{rOMX=3e8iGKZPg-NJ=CcP1NpqxOi57!iMJk6nKVwXafpryu
z%j%-b;~&31-g52*e`=ChQwyg4b3yY;0i9m~jiIE$R+;Gp_4B28Fiv(7I%ZW~)*7@`
zmotUy%&YWeam@3+COVkzLpu|$m1o+45Bu|}6)>C>G$rV-kp@WFEL`!HOugq>3c~rr
z$77mFs~4DFSSPl@I!Udd+S23YMu<XmRJt$`L2-o`da#FU@GP8W%aq`$N1l2Si;4E2
zyx8U4rC80Ixv^$HYnGvgVkk|$S&6vvf|WWDd;F+@gjeEQv8>CN%;`(2?*I}k3sC@B
z{B0l$oyQdd6(Wq|VrnonRa+<6F2$3R3p6uU8Z}j-!^hO>+T!YbyQ*$}^uE+<`Xs<@
zA=7+(bRQ|t@~xmK`Jl4$+H#>%YkqCMsk5crbv?bvUF@c?^WH+0&3U6*LIy+5mJ361
zI`g3Vydog@X1qNh3sd=ERsb@Wku1w8c2drtqT(h$hIk!Y!$bAs?5$$t7J+7Y`<dD-
zIg_2cq6faCese2lFEBa>L+h?kM1zrGL7eSLr(WV<eTj#O%f01VAJWWZs&(>nc@XOI
zwgW&TJFYyD;)<b~3SUXMh^lwS98Cnit*1d2MRYU)-@t?-Yv8(ms(M&J0^E9LdghC}
zGI4oi%y0_y3(=s_yo|$RJH&vagMq3EG+mAA^IrM3LhO`7pk~GT3Z+Tmo0M|{Gu}-J
zpPhvehE#3N5@@j7bUV5>XJfI}_k68q-{m&!E9!8b%~_0sFQ}B#=nzoiZnPtN2Exh^
zXgG3AN8mf|;Osx(BrpXThmG}Ju@>R!Jj=WKI4h;S&dSQKYc{i6QL>NLAKkk<xNX%q
znqOL;;L0lxOTqUS+IGuYPZxDmX6NBYHZHt3kqurJ0lxE9<)`bs@0X+wj4|<!?)90Y
zX%38LBq??K+7oe(?va_JBsvHCBWawf6YbTR0`7*of&w%+#maqScjF^f^4@<O#|4r<
zJ!^|CCwYFY<;7UerZDqt%sl@t9;)TvLP4JLgHckyLL0)M1?r|RJc~*745Gy_=5}OW
zutj@dbf1+ZLDWFNyl@<8DOX&rFQMYL7&iTE9kZyqNPY~(&c1o(#7mMkYmRX&J?CER
z(xt~Kg@R0rF@*rc&eaxlXxSUtXH~6aFsMnr=`$Qji5RA3C$QA!TTe?eNy0$n)=hcF
z*mKA{(t#3jxR$*sLrc(Lk-4_9>UZE~C{1)J&I^~uFpRM?k|W;EWJlyP3QF*{ay2b9
z&QhV#kaM=&F&_({{kr-+7;WcQHsj2f$=HPbknF&8eEZu$A|ffNpQeewDjL&K@{2{i
zd7$s@$t~!w8LJc=N~4n_m@uBR3d^yUU4(|NiR=c9@m$_OOK}}V^DBbpB%S3SN%PD+
zv!BE!qG)=UcCegG#CK0#8>*Wga@-T;6SdhRX^A@wkMgQm8tQc*o5a9(=fiXNUmqGQ
zZMZt!n*!c1r=}`E50%)vh+=s>D(>!Uatc&B-~1>V$m;U|3<LGRsN}(fOaQFUNrOnv
z!nL);6kko$_phkFRoz~U3potzjTG~#F8(S@FtUFdO>wLP`_4JxsZO6Lwsn0I%MF^w
zAe%a7R-{!ynLk6ktu@nBhe^&bxhthU)TI2an($$9ZDtLvw25)%vN~Yi4VwEmomFpc
z%F_59`fBSRz7el;$ydrBcB1+#@hcP*rhb1+_(#@ocDxL72M7!tMcgy-2}^#dCMD0S
z`vc?GotXI^BsIttSJo5(B(0U*oq5IZ=~@~q>Uw&u%O0#QvYz!F1`{w3`9`Jpy9ESm
zmc>pCZE+?QtICgy(;<xvE*DpWWSs1x=}Y#j+zWp!)#Yk-6}x=O>aE8L1M<FzXldeq
zPBResX>(j{epoCg{>R+1zVM(bio^COb#|?`YXmx-B=p8TWDiO_vqJI#=^>6!5hIhp
zJs>fj_IAx~gvZruq@6jFIVGqKHXhg1&c3gmBGz^SHKBGT@`s&Iknr$`a(rUp18R{j
z2|HGY_ub@Ib3RQeuFt==_bTL6uZvqMidE9J@cKb>z+umhC+Eg86s0qq?L9^xQsrQY
z$JO%1A4yr2(_B)RObg4H(5@^l>c|5AFrhsZDxG=SP4xtlw*xZz8v#L|1HIQ(i_}jU
z@y?=W7l#rtDxxN4_39@Y{vT&=0@l>Ey$@Gg>wt<QI6|#eR3;}Tf#fPx>I6=JfDlnJ
z0zw3oAt6bt0#yX81E@e!wTKcSG7p&qgop@HD1<Q)5F&&mhLD8J|9wz<tI&SG`#ldi
z&pA6e`|Q2;+H1Y*T`MH=-QW?)YJ1W{i{I04M`XVIiG{`Pu$Njo<={guZTG$tuzqR5
z`8HVX+FSR3xSg;4$s{KUZ{3i-a5eXb?LVNcF3Hjo!fS8;uw!_!@2%bIcADMVYqV1T
z8_VyPSA^H18y7EIu9%*9Wd5om@>QJV&-IT4L#tO6F5#BA<uXxAf1{<<5BL?BCI7Ib
zcH6w9d0xkEPI)`+Qf{I4r>IpsuU_aMKEE-aoIGUw4z<~}o^<b+2%*2B;HtqpW{dKh
zg1CdvOvujNA;GfcDAA6druuiPOL6r%L0blwSVwLyEPC8_lsyIaxZfat^IMna1*zA9
z;xdEc{bG~0FTa3T@{6zgwu1Yr`bXVY{<Hn^p2TZ+?p!(ges<Q&Q@?1^wr=?;+I;Pb
zuM5xW|5mwXK)>d>@@PM~e|E%g0kp#PrfY8b5$0MW``l(4tvI-|_F(_ZCw3;p9?pT`
zjSBm4Uzpn((jw|5uYitZkD0iN%GW~6{aYEQrW?M(dhLAv;+wXeGdeB9aX7<E#+zAs
zP1(QwP~V|cnSOg8d{M-1&x=QyY%|vVJDRujsPw^Wle{jOuQlEB=<c1(%EBj}M%Jts
zE5BZF?ZQIm^2h(_+hgx{yRzTTuV5W3J!R@#?>C|A-j|j<|NUICS@X!6J(q55TbJxM
z?CIy7^Lp2{Gbi^9?XlYalg;kAk5-uu9DeP3EHuz|&#XfZd-Qv?rkhMFBRbgui&}m^
z{N1j`3#MClt4^(OaCP4jFr$CRy+4LpghHV=1+(raxAfC4x0=FhQJ&o+){XNXt#ZCT
z6vOMgJ@?LD+Si-qn3qkJCy<L(go8T{6mn*!=-2E#{p!-v%z)g0J#&>FPAOMc_5E=+
z&CZ@0m=)~>tLLoAF(@Q|=ks=5xbNsu{K)(7)?zcxeY2|nJQXJKo^SM>9-4Cf4|^AW
z_eySOuWY${mUdH6M0i<5L@zreZ}is{BNr!?bia3*yF2uogaFB@@2xIrFCVgO+r2e%
z<E_Oj*jKO7e#o=ol*+ZE@|SH2$CCj#{YQf<YNz)4ToOL>q`ls~bK&h58{bsbY(3C*
zcs`$$H;+-ZOWllEB|coY^|V%aDjjEVxcNI<>obL^BX<(?=G6}0{{#NpF2cDR7w2ns
znr^rBTDxo3?eFV<UMv5#Crz7pSDE{BBlyWAla;LeJ;|_ZnGtzbui|x)Q_;z(lFVeO
z*7JAb**oQC)1prNGGH@WQ2rG>V#`uvhel`BuS?oluPlCf{Bpf%b@xaG9KC~}Fv<8W
zim>JIf<>JtrGI8VTe9uK20!Yu?OVQu^W0PFPA@gdT<CIRNpVxN^RCf%`;Tqvh;G}Q
zN1#<&JhB}avRp;Hvq>V@`%80RR$ly}L>7Z{R`+Aw&Zx#sw3x>JdoC5}ug-d;|E@Hg
zVw7;t@Q`L`+r6v@J(qq6=-m3^dd1MRwbAJftC#NFyUyx;O!k${vwu8z;nABE-t*`R
zvv~somsRLp2Q?RL(wbKPVEXO1{YC>n#9x1Ff5Q9Tx?9FO&z;+W$HjWyMxduGE$V-3
z@_f>!Yh2ZjIniE55qf4TJr|qk{j^hGaDZ|kd}{HkQ@?GWPeJ@KI&JxK)AB9X-|j!)
z>$X^6M!PkXlik{J+hG}E_d3$L1DKug<wd-B`HuPm$+cRX_0{<Lfwn&~^N5L?^~#mk
zPQOcJ9CL3JX)e~A{^ZbFY;tRNOV+vr;;M`jTM;*A7Zk+@oigvlXKc0?Zg%O(_}M`2
z@^otH{PnAUPWy`S)@IV`RuAP4LPbi4JuY2%dbWS!50uFf6${&ze$B2bk!U30+qhPi
z&tX$!r+x`$SiA|=olXzNd(7AU=Wa0mz;SMk<xZ1RLpu|@MAX@hy3#*sHBPB*NS7yq
zn@d!?LWo&;k>RR=B`S&2Gx(Mt)jgMrv1K(J>(>Y!bG}BZKXWde^1RscRclZw&HXt3
z$#dPPZsSFbe7ly%xQN(5%xcvPR+|)Vs=65wvfd}PO=M9lh<8k?8}i<Ln&N>)%8CvK
zt9+X}V9{~Gc-1$J^+uPO)jbQ6dpD{Ugvi&qo@AWwAX&-mk?1TvS$)B2RnQXEJi3c!
zUE*?nE2_CQ?^{igk!EJW@x`O)y)ln_IJtSp7s@h?`YMMG&!8O&k9iE&=G7!+(ov1A
zc~6V)+QHllw27->DM?0uw&ooOC)6Z0Uo(E)ns-<{W3TebPw#KuQCENel=12ra~^fp
zo!;Xao$>{mfzG79(@0aLVYMJ`=JLnilZss#vh6#{`7jHqE&L>NMpEJu`xL*<H=($f
zNmjGv8UHCq?<h~`S>*s`dbqy9wCiQk!U1~dWTyEKLQ<o#<duH}bq@bWWuAKvy>bw(
z?{hL<bk76+Z0P;Y7`Nl`C2vk9E~jI8r-shFXv)-gIV%qwI)gHGAN&PDKW*HHHcS`Y
zts8nj_we1k-W58Hku<qY{1}_J8b-*_CDkNlwMRx5qE0fac)p67p{R}O$%)I8BM|rU
zdVd=#TVijNC%a?0-kyM$t&23pm(q4!$4614uGb_v<0w|jsPi5<Y_)tF8`U6=`fd5J
z1653Q{APRB)3b~_-Hw?-iln^V%q{)A6Nk0e>iLg%D`ZJlOTA}}rezsF#Ze~BI$aij
znEd1kN9#xPsm9I2?{&l%62$}UCa%{r;`-J^ZLH=GrdS}T$F>J|#ik<AlQ7M0J{ioN
zZ!*l(*eOoS4alZv^%=Q$i|+5-bJ0KFVKnn?t0im?+Z9Q7ammW;|KnzNt-H|<7xY`A
z$6YU9Czpl0;1)2d)9>>sizYRkDVy_NFq`RH`W^l##p%^#y7#NeMnQu`NA>Lej(t=5
zv~Sx}XKdk7)sa_|wfbB2M&q~A3{JztDxXbzztX~RGOof|uNZsY;5}E*>nT}vs(Vvs
z7`3LmvFIqp{ng}<xrIN=M^9CxEGYb0WLWq!zVH4NZ^rLa)InSIZ0EcRva>eXbLFdK
zXbg5RuO}I%rzUw!_j<~YDLfi9r|{>rZ_M9hfrn#uyqb)20Rw0S55t3g;PVTAW~vIE
zTUSpL%#FJC%IKF=#i@#7Fh)9faR%;B>97D}bq6o3v3V;D^aY<+_;VF3cxkc2UE)_p
zCL{Bo>Vc<tuQ@fz61)=$ZchQ9LV=DEc9%3Bxyc5nkH?;!#M2v?tgQr(Jr3rW3SQ(*
zBVJEv-4`#-eN%G8;+q)ePe2}#TY#*hO6p>NKK>&)mUTvgyIhqSRN-!RYLYAHk^KJk
zc>x(r#tle}-UNE1fC~(G)x)qWPJ3UZ&l&C5hL}cumPPzoFb@pScO4j@J{Xkj36R#q
z-={b<WpS#kcR!RrEr><yr@wzBe{xPwYBU;gm-Dr;7<3Ol6SD_MeA!+0`JCG6^H)A)
zmmirnx|k@N)={tx^Nq`N!Q9a&u9%C05|>+xCTU-Rg(WAp1XV;lg+4sh$Ko66y4~a{
z9XFx3bHl-sWG^!|51}+pcPg?SyDnP+rpmK}ruqw*D(^~jP=!G#(1bH(;F6`!FFNY|
z77|P~n5K5UE9gv=zJ9}Gbsl)z@JdrqMe(cYPQ0&{S%!h$L%_laA^n)<^>pd#cST1#
z-a+@er|L}t|6m3@iE#!}vPsZ&@Nd3)#aS6pUz?!ollkD<2^uJl4d!9F_jM3Zyim*|
zNb1Ev48GlAE8ZZ%0%L#{x&W1GimBP$`atjalJ{nQd%ni8o%M*_{T`Lw4mF$Uf{4mj
z8xoI$pG#ieWQ*&7lo#U+b^*`_<mk)2?-vX0VEtzXyq-vBcs+H6q?&U5RxW?Jg^ylw
z4m1e*{#TQ8B|8mIlVZ!3ouREy{n|qM2=V0R^m7YG_uS1TD5l=3TRsdHDHtfEBX|W8
zqgR|hS4o`{m9yURNLobVjoc|6c+fj@ius#MuyDJWIB!Nnq-}C&8p;riJ^?gek9I9=
z$Xe#-ZGL0r8QQ+2r`}*^P`19BoCJ1W9Jk26WXi>`ZyM&jm>ZSqYFC*ur~mEKlIh`L
zZ_*C}jZK8s>HT3KJWI*z^TB06TfmFT1iJVKIEVJ+9iZ}4flgikTE2%Ew5avuboP3n
zZb6IQYyoTK4jv-~Z|TS6#vi44lp0MI&drVgc@q{~*3AQF%xP?=r*b9Ohd_9ASMY)z
zB>Vr^UUGWqa6w9lefv=G{HxMiVWhbMZGXO=V~2lT9p36~AfVOVOEtItes55F+Lh<!
z=H`d0I1z=5f2&D3^$k4qvi-R~TUws@&C+|mpC30n;?_xJR|O7#`7sjpOLAdwJ}x_9
z^XBFply^bi-P{15Gz#*^L3`_>qNcCg(}D&&9$z50wH|TRFMU*{%r6SgC=zU56n^#?
zsx$F%(BY;2me>=Pm4VEVptI(x?R9Oa5Kl(!^zw*cZzmI(^^@3Zw*3Qnk&mzQ{H&X^
z{6{<#PZBQv>=q_N5<KfmWVqkv!G0eqp!lqe9W{-LamYIK_#$hctnTLMatGX0lNs^9
z!nsFHLrcYx;?Yt{qP-jSpw+MUs&+8;tiY|ge+MPd1RA|Y3zFv;4=BT{jDDDdA9xr@
zFOqLqS?OF4mpMEW_cCo(9QHVA#cfyR#HHg7wza(0>CB(OYkG})#dR}9BfV9)ffsW|
zT<Wv?y%i44>aQ7|mk{;69+FX;6Q$w;haT?prfU1puU#5d+rn&P{v2kwT*6;v!k_oX
zpXZ2Q?+y0o!OzR>_r~aG32#RO<;6~y^m!uo=!K{$PJigEokH+6Zp|@4sGeP4o4NPs
z{&R_2jrgUlsG5U6_y|8J#y@_B&<LP0qZ)2hk)*sV2Ux!1dRoeG4vpNOx&)s2%%)hv
zmkO#9bl2v=pY_HUW8yvJ2<xGIC-pWX##N@MdzL|8!st(I(hD(B!#O~g!ePZzZ;Yy<
z)K~|t&#w5pHTd0^7)sS{MGFUic&+Z(Do0VM`7=vS1vj03L1fDG=*1uXLmqHU?Y~QP
zunPC!65|dx?xA}lxn-&>sKi4}@D>DjPrL0=)gRacufuQi9KMQI@4Pd*xY=5DA_=c<
zvJ&IbkLqFB=}(r(PVZGWtA`oLUuCLUdt)5lX*%ch$qny}vWi-VWMZv2eAZoo9&vkz
z;Hv4(X@N11L<?ZmCG9KfM!(v4L7TjEpqI6#UYrUGWFl?~@G%V6t8q0kyZZMc2|q}-
zBDE_eiyNpr`kieh3va`#Z^u7Z|AfS?kUYC>^IW|K3HwfRKO@w)e^Z9oyT2ek)U&@J
zL+suko*sIrpIO>_|5<!`sB3?ChImgub4%}ym+@U<=N?IV==Od|hIm`QUV7+%`k8hb
z%1gs7n&Owqk;>z@iC1gd)IZK5H}q7vHom`ZrCu-#@wj{UX7!<2@V!;8?Dv}*ZChZQ
zuDB+XVp8HP|9l^A6>~F=@l#cS;?YjNo$ts6mU&BLl9+ISC2D-<aP&#|ZgPpMXAGK#
zt>|gu3s~ly9FKP_&XY5DUuXYW6&a-@-5fFQRnMA1-i_b!)J3;pl}0wY)cV(J4YdZu
zAS(X)W6-Rk_D>la8Cj!vWNK+mMT<jc>1>v0BXCEWS;IyRB<;=QQ)dr2PzTwg>v(SX
znn?{L+!e$bUn$~;2lXUe!hAcj(v;8;-Rv5=g6F1tR%c6A&a%G4;Vs?dX_vGin_OJQ
z3xhSBI|P5)^}q*3dTl&>IYI5A(HiwDOCA?T@2T>AtIQ^H>+YPDrA1I4(xV@prGI_I
zS<1gaA0$U!$;!&B=-*&|oW~r+H5|unOq0~L)D19xmN0EusEtcJ!nBP$(wK#-UMQx5
zhTV(oZFTky$=zzFbo_&zE<F108TyVf{+Y)bbO&F-5Y4)wSzrQ4V=w_5`xnv8EVEmF
zS-gRuU;@|u%nKi>iRp{?7R<$qBchs<AE!&Qb*;w|8&)mB6~#GY_i6Ilt-2X?P4|_4
zTb-qh^2B%#O|Gz)I*5`k;JLv`hG6a<G|($~a4(SDAyH66&{go6hd5T9VbHbTIr)Z)
z4Mw}sziWvbDqHqz5*zxdgUGXx-0pdSVL3fVHXFZNl47(8OJ|34#i&O{onMr;_{X{u
zMS_-#B7gGPOs1Qrm<VoTDD@3QINWnZ<U;KtRHSoR(jZcWNcYg(-$DGI-}$s;dsb_=
z8~$+^==YKvXun|TKJwbKS^Pd5uiUZ9VX(^D2*`k2%bDTIXWeyA<2JD5xP&ZCtGo|4
zv~#;|2hNuzey_TL6B&mdsU|B^K_3;|ZmJR%&$`ijY)9IL1O42dfwjv5_z!WJ2W`pp
z<h$o3gR02}Yn4&MCl-T$P`Al9^CGvtB=`o)N-uG>Kd01fdUBR4WuewNMl}bXtK`HJ
z&v4#IwjALzo<%o{Mv2btedpetETeMXb~nonV*dC8X|np+kNw?quR5Q`I^EXabD*-f
zxXEsYzD~W$fQX|}(_04)X}0qhEI%?3+%quHJz#fJ!tL3TjX<Z~D@Z_z`nTw+IJ#$n
z5htVRsU{wkubU^Y_$7By2LI(?aX}MIr6Zcz+xxh9xJB4!L<b`yZ<dpsx_>=xzolw!
zXz!1<oA~Zd$w!}lHSqn(nLk%7{Q9EsqUoZFudW_F|IN&!-+n!3-nKoK^X3g{YNj*W
zkkK>+%v9FSYQKpfQ5s>P$C#E4(Ojt=i^I|i3|(DfLeq7lgSIxJgId>+WxT<&y+Jzu
zQez$Bh^9=(11Hx>M7|c`!DK}1R62PLj}diF!$aV;3LDl^bt(dZ(<Ut64v*3(mPLur
zFm(gX{I@hxZzt7K*2^Z-88#ddK3b}<5mjk=YKW?1Nt*r|rtWn!QN^WhX{d+&tdlh}
zQ8F{N+*c<QsqpCDUfl(fPQ&%>)0Nl^>x@HUv<#Y(tLtbcVkBCwZfLOAM&%#Uh1QC7
zmCY(~wOqs47`e?=sN;pClSij|5b+YNs)P*F!l_>H*&fE^5Jwi6g&JPArNK29raQ^1
zLL9-WPg*`_lq*N;eG?H}FWho17_^#A25;*>#q(--2p@we0i3Q>r^$VEu3!*RQn9W+
zM}b+vlBkWzN#PQ$NLSb0YstDmvz?{JGn7ho$5Nbzfl`-Q3fHJ2wH!$=cZfV{sipPt
z7`DdBnr0q%$SR5tM0jTK{hHs}m?Q_wTpR634NoV^)yj#8<+_uIS_WOL&`E#<$X%nO
z_<=#Z?miIb&_acXGpa^<g!jYu59=&ZG3x40E!;;#(+XxeYsq*K1;NuLFJ(sOS*Uc>
zQKbhf9H-Z6><Djg)q2<Us_pTG4h@iCBcWT-Wvcr;u%vixyoWKPjXUCDlO%!D(T-~6
zpadn`=tvPOEc;*Ld!eg*wKIMshC{b?RrR2I@j9sOmO5GcPQg|T)lij0AFQ_C_l~Z)
zo4=attWzzuLm@OGoh13K6-hzVaT8f!y>(g&9<LA^vboyVT)bAUEJ46o5^$@UiPuq*
zbrOcoLB~Tm=+uT-CvulUt?gjhlu{!eUZp7IksLdQx~q?tz)ba+63#Q!r`y6EFlUs_
zB!lw`e80w&MO=|CoB5>dceOBMs(uLG4vrHloM!Q+4!}yWt#dbFYx<gyh?ot_l`?@o
zOjT`C0#~+W5xeM)8n&yUPU;F9mBIUYGhwAs=ulBcu30(Th^1@VLkttW7Jwi9Q^sr=
z@zcE&*t2k|c8QO*oPgCxMZW4u#yE9TDibd@#>;9|aNYY3-5uQ%L!EZ`VI;#bdtm1&
z1h))cG8e(O$7|23EG-H2m}#StXJhc<w>>@CSh4uJbEKt(Wfu3W;6jtA)DioOaTiTo
zp*Y2c4ZmpsGtI@n8Sbg8D!v<Jyu~3_(vCM47BEy&M{%BgvN4=7UH4uuS+obyr$}BP
zXb<Q13Mk;WAgF7WL}^=lO$<{FUFl_|nhvt@%bU}}DQ`ub03}_SYcJ#m#NDH!`@8A`
zc`VD>C42;jAgaO(IMrQ>iX@hkgRTe16W2tHHXu$VY*WS3RYJs?G$jRpcb#{iVNE|c
zF}exAKX933In-@en@V=1N&3`hE6A1vob(+hC|>@j#PDVHb+&~OM<wHm8q%K8HDr-P
z1ia7^6De}U#HcvcHu^-zTfS=~n>@2nUH4gVBK=MLwdW}N5XOBMnSHxE<G#(v;BApd
zGA2XI7v7IYbmE7l)ifUCY}9!Lt!ia=mX01Z+NX&(wAAuDN}ea^$Eaqi_^#I&?&qVW
zw>YAhDQEX8x)`qvo266noTw09q{B}K9ioP1uDZcqIbK_!|Dcx@E>RR5)^~kL)9{Qn
zJ+55cJDH_+hy~NUFX$lBbXmsOD2yV2#p~_fYo)>R7;Wkl1{%YqJ0_>{h~XH*YPh17
zh#x6{Yu-diU8F9mcEe<X8m;(^WZ4*$5Wl}ce**&79to?LR2vqlWqJ;b<06_Y6%L-0
zW#|fz(JoRkO3}f~=iyba`sxz0B)*j_A~tkox7t&(s1$AI%A{k(>i?vY!6uWI46B*y
zFx(`XHTJ#Yr9wGqwF%qMw>g}gv_X^OoAkP>G+LV<Ef8y%hIMc)zbZ9Uk*8SY;}OqQ
zSC8Z~tbavap{8CUsM|(N$v${r$&K#rE_>HhXTsakn6uJ;jMl(gtL(OM)OID>hF9|N
zoaRW(<hNxvSjHmEw6mjkmh~%+G@--iFl5z!&pD%wuA;YfON}ub91t<C@X<h}Ad=|o
zpcvR}P1iJVu%jatiHtTs1i$CEM#)@QI%5_QhX2hmMp&_eGh$B3=e=J?5#PB5m((#M
ziTKDYQO%KoFx5a%sy>5%opFoqn%YwrCFe7H&cP>1-)Ki1Z<h>m=%WHn8aJJXDn#MJ
zd9_ts&M5fzvmR{3ou1kk@njuMm70w3hMiIN1--p65G*^JKSEH~AZP`^#EK=uglOdO
z<|C+>%p%KHhXuaZowZ3**Rw2#c=-E<OH;|T+}T`aM+uyWV@k-(@aePRkCtiEt3nA4
z_}@r7N7}oPu7o{^>pW3myj-+=gsz;dsZZE&ju)n!u?O|gUXD^@apA~2XPbU5!^%l2
zePKOZ*Ey=PAf`Lg&Og|nufFkh0ISIte|A(Nf1a>-xIN*~cl2RP{NZx+Yu(_mg0ERW
zz5Jy0lXCI-QRU$82nU>Oq-L~qRM4a_4_8IgUKM0DA?xu>rW|#pbs$0aw&eSl&3}}u
ze8LBW>I<Aiazi9Fo+oO|Fo|T`@qHW5eyuK1N&*~Mc!fTF`7Hz^;;VP|IGsSj`(wIt
z?bGi1?@yF!O9hJRv(a)=ZF%bPFy);iIQGmm#U13e(cO&EJjssRt=;jzkQsyr4JBl%
zNwU!qYE-HNI?68cv=1-~n!WHS?RoVn<u;sph|dChLR)K70xug4Lr>a^s+uYZ3bhC;
zw`bk!AaZ?UGWi2bqeA5sk+Xgtju3}^)k!Z#-!_<%Kn!CXpIZYbt67`x_gfAt{2KP4
zNLfwFkpYKagq>;lX{w&u`(y-t8jGCbeo42W3GUOhN7s>Zu8XDfkp+j2j*hsxxk-ck
z+iqZ+t+(4@soDGhTC}@b>%>sPS^b6`ZEZma7qSrHN)8kdDXC2+IB~VXZ^de!dTL1n
z?u<;LLU0)v*$642fMrQ9m(**VS%h>)WzSH%X?J~hP64-**s_&HAaq3!_Z3vBq!|Np
z0$0g#+oWw<$|gF#ugUSg{s<{C?XOoLgq@o1yF0~0Q{knOdhZf#l{Xo&2mYRD{?Y*>
zW*|iw)Aw;>_^u%|ShQqF*hJz=1^nTmsZ9)D4WZ9973HfrDaP>1B4rm8Iu}Nm_CkRx
z63gQ4jEeZryP<}@HU~=<OxGm^^|W#<)@zRsjHaxMF_kk$o08!)jYO^+CTA^cLI|Z*
zc)U6x8LsBDx%QY|4`ba(6<pQEG}ewJ8q-IM5SrolrYs|d`jsnQ%!0#W-gEAA9jLO~
z#ta>cqib_!!8P3(sMSJ*w#S4q+KgoBu9!5*`+NE<W0WLUyrO$5aXD^axJ`ZVMN=wc
zRO<??YBEF9I~!k=hVO@KMyo9m8r9MgU!4v9d!0<eKCLt-c8=EJ!`@ozY6V2Bx-Fi#
z3@1L5(E6iFgR=0|4io9RwyF5gQSb{4Z|V^Hj(4FoT0<nt$oE@O23Ac<hM|L$=JU%?
zDe!bHPT%*5JtG_N_b2-r`iTS!Ax+#8bA<<=N>|jhwY4$fs(}$ai=$4^*A3?=5HM}E
zD?Gg=&A35_leIy!w<O|ZS|md`GBijex)O&ijT@ZNSY;c3sJ<#_h^Am+R7?a_(_6(V
z3MX|F)0g{3B0U%|9czTtlx&OIi=eKRjfAO3UGZmS+P=Pm0o8Ed2%Rb!8A2NC3c!~U
zh75`7=C*F<Hw6mhs{xfp);%Rk_OAN^ce!sGSG!PKiiwahxl3^v8Ph*HTE?sz(J||)
z@aj4qx1UYccG@*-FmMNzP?yfpi+9EK4ze)4Lmga|+Fz%Z%+_hyEIhk=idbDGVQ8gl
z>8MQ;mtP%-(ON90x4T*pV3^_eLsblQ8?uQcy`T?Mk2Jx{q^OWSqf7XGx<3?nP3i;V
zFk@e8xT0C4+jR1+s<>HA#C`P+F$&jgQgNHNU($K2B{aN9Em7&SvJ5LcO6ZY%43gMj
z#gynzp1-<Dg(aj)@U(Q8rNMN#10}taVN21C3~7JttE!qT?cvxr?WgPdX)4$L+EAIs
zhF5S@UkF!A+d2^GSVj~-N0ORl2#2R%peFIT1fK@Hx<mi=ByWvmxkyLkDx}qh*DsMY
ziy1tgE<c1P$9F_-=iZQ{uc0U&!$i7SS_P+VYO;||SgWyQpwY7au9E)mC1OGLnmXX$
zO9rkxBZ;_h9jZV4JyI?tV$bj@x}5bNBzI+4Ztl|cXuF(|DBSyjsYJTf-lSw9mlkEK
zd!`=h-aLKw!CqiCU)wa>m%tQ?k%JA_$ysyH@LOE*vH_$ddB4a<vkZ4md<^zH2ke!%
z5OladfTV~RiXI6Eja160l0I%{xpaW69m#^L-Zl;F6Hzz8bAxczz8ZcXjm6OP;YT=a
z-T7Q;_@885oj2JPPF$Wcl}q>!oT7Y}IN#jEID<RG!=zUqZtII36=nqVJB}tH4iolM
ztgwzoM_wX%vlD(bCe*|A8{*S(=@h~UpP5X~IrhNQPLo20<IjW&F1Q*y66kLcTH&|V
zB_Y^z;y2>t5SsRS0tMl?MJ0wmd{9&oQ?7MxG8FMdx+Z<@mNeX;rh}qyA#_l84D7Zw
zXU^8a@MpC?;lBs$0=vhBh`;$R2X04^zw6*?WmAz{omRb;tE=T|1u-Y(kJIs*?&=W=
zL)UKt#|J52l0>cEB_TAm+&E*o_IGbGOGILnsp>b&@nJGqhbyr&I?4_qi4qN)=mt&L
z^okO2XWvL<^m^T9T&OHrJIdm0(bbMvWiTE#p(vvQzhDK&#5e{m>$z|FHIWwcqN6P#
z?;O8nWUB5Rx|D5vgh120Ar*)i38pqWMUp~SJrWzEYUr(Ex+)byxhDKAj}g!*cay7m
z423etr=@wbPW9l?EuiM%QVF-&6^{q@$m@zLsw&!3%^$!M7&Ck=ZtD}Rl4X`gdxHkK
zFG)>`8E@NA!XqxdQU!`0DXZ7mH)T0uBE)AnCTC<aHj*c|pxZHG&?p#oMA(L6;LfzB
zh3a~S)pZH{USY;SDku8A4T5V&u`+LEaj^0#gt{#!QYRjHZODt`VD86cMfDo%Mq4QO
zEzYX9gUr_j16j|rl2>OCbjoB@22ItI+E1E|luVYk^<hv)ayD^oF}VFYnvGV2z>d};
z>WO%nkmdWwEggTTD>-XO#7ez8Tc=PnSh+3CCgK6B>ZS4F+eNg{l4uIm+b2RlEBU42
zbRnEpYHuE#g;3P7IJRkq72pH5nc|ui%n^j-QK}8WSU%K+ptlBx30x`K?gCPlCsiJ2
ztCi4xMD1BAf8B-PYLz5Lv?y_{hqy+OcmD=yS*BOt8tFRc0C&4xlo$KU-I5x~VK@A4
z-oL`94!Wbos~KpnBlzv9Rtg+`y332sBjATl)8nL^rHorO&$`Q2rOI{vJu)&JPHpgo
zJwKsj9UHn_P5dEQA#|2lDXN@vVS+A6^tDq?nG2k@t78|ho4GJ<hEw0j-2<2Ie$P2I
zb`^U!Ou6q-^}e~Nx!}FnyOsOwLLV0P)fpekE=izN`-dvm-b82`!|zdTpGz+=I^Dtq
z=%?M52O@pW2~?xQ_D^0MY5%USwki06nF;Ipto(F`0GsDw!&DAvJctL49eG<*u1|qJ
zlN7vxzik$0&viDUKQ)@1q`*{i9Ddp)>yk1mB=;W|$F0fkZ(nkZ#W9r5sln_vuoRhl
zQmG{;#KW9x)pg?=njY&}MQz&4Im}`C*~sWQ6ckMqA;5M#6Hom4&wAYaVJJ2!^6=JR
z8u_v!nL3PXEt6xv6Ezi#MiU9uEY*k^j+2EHOJI%(MEQWHn(E5x;bGARaM7#+i$CAg
zZ;rkn%|!{YK2!%HyvLE#JN9QEU!gMNq-sXP7`}`kQ5(6$*FeS(<~ypSEp?SRS|Sa}
z;3UcTZdG+JE<~_71#~1<lo2a+<g36BT!@O+KNb}{7O8}NeJV7!lO4QFILouM)aFEJ
zFXw7?BF4$UW_*MB$MIYUg~xWKsEQv~cgam6lVf}6!k(c9Ai^FXk_1SELLk%9aSb;0
z<@+w!G`&r%)+~#VP(<FLat(}zjAjjU?Dgs}De|?RhdDuhhdD@3PHJ#h0!1Vrz#<m<
z{mbfnlJ;L+_kHNv`9s%OmJ)Q$$Fx9Qb3xZ!q$k(o-0RCw*N&0BIjOM}aj}#@5d)!h
zMJ>g!x}xmMmy(Tx#_s2ZP^I~2Y&bQ1jIhSrTraXWm_`SUU7d*@?oMlkbS8$He0_>Y
z3|plKtqUoVo7^MBl*X%UJ>M7!59U90?5v`p<=B-%y5W21!m*PCz0*|r6iX;jY;V3q
z5s8gNy(cO#RM*<`@~C9FvP*uURD~mQ*j<Yt{bWSYfQAUr10S?Bh`pQD!_G}5RFi!y
z9TN}~Uo`lLW7XfZw1lprn?LK4t40XL8Pfb$#RWO?Lc!zI*y4;tT2?U9IoST(>j@v6
zSgJAtlW{SiGJ%$W!9<a{SOoYe5Y|V%3WoFw?b%8$$&(MUJ6Jdx-4Z<OjJ`Y+nr+sJ
zU%>P+QlZMd6Kff#u_)!8h9M=if-}~5Z=qcpt4-}z^{fY0)uz5T2-F9RvCpv%BQi4+
zd2+H&_(hXr{i2C{p=y68mf<Rjwg%6NvgQ<XB&WerygyA%r19FKRMFPHZiSg+Vwza7
zN#*ZA?_dU@`EMLS$3VVJjAm?9pj$g4OA#Y7OJ{LgFb%wL7`}OZ9+j(&!FkRq`ln7e
zbzu=eZMuzwz0e$8YbQgKo*|dk1=5Z+($fnKEfXA1AzF?v-HthEU?SM-nO<n)AKJ*d
zQN43~L&wMU+ShJe8PZwQv^gO+Llef0^j|db*7<BN?ftk9kB7qJyUK&I1O`6#V~geg
zQH{J7QCYp%j#&x0%fYm%>`yW<pUFXr>FeT-y)L={3CHW>LX%@FR1O^WxsrG7Z8`EL
z#EC0Cw6zKB{+Qhc<a$MlnQxi|Y-nqqcYahbw9nhqQMElqcHC>#LE-d(me?iK+pAG)
zMEZU<Fe3a={k<S0lVcc4NJQ55^wU-(q{-vVLO4tFOQTWGzihaD?(Vm!2enIIs*XvJ
zz0Lkb@7l-K${cJc>@EW;yYdazKoo!P@L9o7s;YQ)xEzeko1L`c!@*FVViw|K5BeW>
zNbxibe1Gx|HLsLd1w?!h2t%CFdO_A~YrX$pR_4>%m3%Z-vobX+hFW~=bKbRsWe2-m
z*bR0&w3GQh_B0@{O!*NCubqZ5$n8Q6I<kdj5sZ*q`#8K-61$<~>&~(H{M|@^+cLpz
zkK~;ghL?em#%;w2X$W&YIR_<hlygM=994EMW^9FwO+HEiT0}~O*XyE!-H@q22ATRn
zuqEAJ0#iS>B?&Tqv%h(HM8tq2WbTB(+!fGWYb%w$dR0nTof5ZZ7r5J1NXea1MO&2s
z$<4PDtY;`#$?EY9pBB|j$a}IhQn2}6kaKB)o}ewA;cH;cLs3hS;o3mY)^vunhfuGe
zw54;cGr*)){Y@4x{*px}Z=ikgkL(5u8t(+mMAB7oW+T3%&(TC;cbgFiM1{1yjVQ*h
z_~*d`mECn#05&ocvXR7Vz~UN-q`4VeCu(#7n$ie~bOR*PK_F7{tyqkxL`uMjBF~Dj
zo~emrtLE<+@5xQew~a=<=y?m2>bm@anG0uJL&r&<hZSi7!*RpfIT8Ha3^0(P>ngfw
z)tQMU_nB<(eAMm)mEBOym=4V?FcQ25f4=qSWv7S+-wFy&ajhXWS|uN9_THd}ky`f=
zXlT=ueq>H@XMqRG6(hYIvB1ZkGC~)UY~9Mix?!Ydo>bYOXMU8Rxd7}&64+-&9xSjM
z$%Iu2Y+pyPZE5HSANBt(6@Iac8k9BG2LS*8j2r*}PcE>wSYU0jM1SC8oq+1R|J)Iw
z*$rzZXmqihsL>Q!JZ#(M&=4jVQTcK(7=DBU7)OT~jP)NHH+|Y-eIK?M*!y2>G1E>K
zqC3dPzWEPYl;_Y^Rr%8RHSKBnrH-Qjr3fm3UGcT)_5AyFh&~;F7|eTMb~-MIJ|PyT
z0Q@vcDBFb#rYWm~R3a5pi9Vnb?7B*td}%m~Cbut_(-*~UXE)&f36mVG`S9+<u)^Q-
zgAm2Obsx2~Xnhos2?h=9Get_^L5j$ORFVZLCyNHW4*|ot&*P*zbwFE}@5mOF;Y5U3
zzPuwBoUx=Tx2lW7Fs4*h9x#!O@6qh@7;_w~K8ytygX|vG6nBOz2z7E&+@)%bELB8t
zKP&iE^iBJH$M*ZQJ5vO&VdgX2k+10jU|<S%3+w0!2mc6~NP8w!vrp=LS>@DPPl?zu
zk&%MT%x;lg25TTjZkFD_{S)KD3*Un8T7Q?yYJ#kA1{Tdd=e7vI?oL@ha0-HYuwWf&
zz%>-xaIaPeqv&AiI5AEJB{T)F>ln$WMLZr2+#NLt2^1N?21t=b;}m&zFi=m~z^4_q
z@#Th#G=ZL_avWd!(YWS^Kz7s>?0^EgyrEJ_kODu`BKKO<BKK^OJNW=V@dbcuDQdcu
zyS#h$7g8J?GitChBuWbaQ5wTsK2(pe)Wb+@`Uy0Jsx1v+a2yylFd7Kkasak103fe&
z7+B<C7@)**ACB3?EN@h`*;&Y*cY$qx*Bj+68ffX*U*hYQ^1)E0vv@Idj6nkrMWORz
zU_6P(tobigW<K_i$ppRgBz8mq?Ins1G=Mb?Jo1v&DRR&9h;Juao-Jc@=efUG!vWSm
z8rZNtVEt1ecQC#o?W66h8P@JQ!2tVbCD77ZXqy-Ff%OSyL)Ir3vOYeL^`Su4CjzoQ
z0yOBa12R*^{M(-lXb&);y}22_KqEbqWv9R&`Q%nip{w2nnP(wNwI3MPYna@wZ~@0r
zk1e$6BLo3&g0R`MMi_pCVp($lyx;WUeS7deu;*j%TkSCul`tX@5!)v+VJ>5a$uU(_
z76bNGojeUawpGYWSKp8?wg903;xdGh0A2^y?;wPedTE@ZO%pXXjiIT}KG|N>bU8P}
zCyXVCGCkQHLET%%i=ZwnIAae?cep(#DTEnR7ZNJRhI+ylkQ02)u!L%9{u6tEgxfNJ
zBaPspUOG}F@&bE?S+*aDI3_eaynv2?v}2+2v9K@b<2Y#s---&N5DS9=UImcdNK^#D
z^|hYwef2KLqp9Qs(A~HjB<U;>6O0FX&PNcR13<jDrJhP^EhA{q_U&YkS@k_aRTrAo
zB?nUys`k=&LKT>jTvN;mmur%Qbdf3$^bG-^ppQWg7>uys3^#>Vs`8C7l|6Oz5S5{U
zxJmM~Fjg}rmjlGTKyH`IF>GEVq;s?6=jJj&WK%+daG{3?1fIDNI`p9dLqI_TLl7Z_
z%=;i@!<#X6e+zAF7%KXLNUo>}K%B=STEP<#)e^78N@)ghQfw|o1j05jYi#;QVF1ba
zGE_L|jz&x|Kj&7g01Y%Cg0`^Wi4WLkZMh})1!#Y`3<PaoC;?86H3ehMl)KqS?56@C
zKMa6;Ed=C60NIak_zNzM`LakZ8+7@F89~c6-%St)8VKv|S%46VGD7H50KXrpeZVQO
z$5)#TH2?<@!4sW|#I;%9oG3Ko{z?%=el!3CU;x0}#tguib~o@m&Uub~0P7R!v;YiS
zO^8h+REs=IK`hw<c07(%xtG-=H|qoe=4Yo?FFyq8v!KcJM{@OnN?<?q4V+Aj7N`P@
zC{X|>(}#`Y%bQ<~g@{<28c**3VpMRmDbg-E6m$Yi?P37Lb;AtARA<N>EF6Sd)laxz
z%l$oV_LtiDiw7HvPK+Hwd%!}FcC)WdaQH;GERe@PNdu89=mdy|0>R`ET5zg-s0HXc
zMQZKKDey))Q>5(=3?jK8ZvPBcj`0%XwFuI?@gIeF1`B96U4}y9N6>EC0Cp3g6f|fI
zw41=5z@>7UiVKbzv_};CaKV``IJ5SJ&WObsbQBZ?zoiRn)&n>`n$ZB*pU5q6#{}+x
z8Z1ke1n@-T8)yAJ0!&dU{fm4z1Rz}66H=#q;iO*EgU29EnW%i-Oek1tMuSZU9U%B$
z>c(F@!NK#B-R}WQbdK%fVK#>Oz63VK<Ox9u)@>#T0jEx2mC_~*Jb9A%KoH)I+1`|o
zWJ-|VdswpkT-!v5s#<mT6nOBMrucsh#l04lvceU(m@<3DMTmfa{vv08E%z)XWA^8m
z(C1TL(bu)FZwKlb0DN91<nv^}M~!dz%jY#hb1&ko$(G7$9TPAHkO%K90D*sH5AfhU
zFqS**p6T`?qc7|YxrOwp(FB9X4XrWRjU8EIvJ?Xj{3vjsM1VuW*^xC_-eKzXCJbYi
zd;_DdJg6pbIF0MJq43_i;4BGdS<g!pfIy1i=RMX8U?E$Az_RhJK$G+iFpKDVfRtz_
z0cP?o__SI3H47pn*@XuVWeOTq`H$1qj1Ol1J-Pum?}u(4Gp=2q8`l&m2%KmScU67C
zc)11SJlu8RzkzW_V+w-w>ti>pRgzOL)61&m-n`Sq$5uYe<E#Kz*8&j1VE`JA)dQmN
z?+sqnUincu$39~=O6Q%PbMt8B@<L|}__NY$zmN4=RLHuA?J$5k?PU9;hqr(=1bX~W
zVJsAkeTsv|cmUt&?w58wp`qd54v*(s$5FDJ^&k1$JMv!H&7+AAzv;-5J9HnXnSq11
zD7umf*r`fp4M%b@S>w=se_fo*q37LS*pnG$yT&y(&nPROvoP!neu}m&1T?Bf=;X`m
z$AhtPmh~@99fmYD_K3;4w}4vwJ3AFv<hwFk`bDGHm`*=?%#D?e5f_mVaY6bJtOwpY
z+>XM7U(VURKc@tc1$2FgEa>PxI@7b$XzKCjV^qQ4E@ifhfJSC9fDv)j)NlMKEd#)I
z*OR1y`*Y&uQJ--bB&pX#jcDj;0HOhGCNeuh2X4=T3wATa%CQdQcq9Zme&L;cb386j
z&}cKh(JP0Y5J<y@aT9$YYOS*vGI+u4O(5jRMS|_)!vY(LLI7JT!VyeG09i)>T>oxg
z%e}}8o6(Ssj)QFUzv&vJjeji*WIWB~T~8jikX8$->n4wj`)60*^FlE@4MWj@Ui-h3
z?}*b~P-s!~u%&}6cQ8Q$Z8SjwZFFe>+UU{%v@tzh<6x3CbonHZE8-sl9XHR2F}Z$D
z5Awu-Wf-$xj-0<^P0104arrUOowtZ&-9Vd|D*4R16#t!m03uv$(2NE_a3H{5`*KPI
zN^mC4Pao2NBvrbZ^!5GpkRfqZx<i)lveh?&^0(y<l0ug)fwSYvy$?MXq*$fonH>iv
z>5Bj2v}>{cj}7AdPp9~w_OVKsVF%fI_9yb$d}b%Io4#~=Z2G9n&{mA>k35rlM?FlV
zbu+;+U}&c|2qy%ahIR(U9{}B6SLHq$<8eOdF|%UPht>V>WvS!r8URH@*8{akzL59P
z)P6`{%qa63x3Q%&%G^Q6!2XI}{O|U6Mwu^k^FP>MH+j*+s~wNzq1HS=fkYc4!|7`Q
zw>ti^<D>ijR)SOQx_#Gldu%75-cCZ1$tKPNPWUE(R!SEG{9Cpd<!sY?^W3J(Gs=9>
ztd$6lmz66KB|coqb2ebxpR*I$a);q_@Z^}~gT9COf@>g-H4}klpdzCI?Ct}wyAPYv
zhxI7}ik#Z{3ANq;s1;BhU5bt{5b-P*G>G(o5AJNcATtwsc2bajp{yRjp(bF=XC{Du
zT&s$B_+yb#XBB|$z^AW;f{Jcma!U|s9Oqm4KNfLdG-Kv^Z^_<&P1`|<p})Mz7;)f*
zngcX&gq};;a2e=th-(cn0+@|p>$7vB05JORb;orvuy=$jz&^-MIDCpf_Hh7J@0-K^
z6_^t=;u*vp{@>$gdTR_8G23TVw)6{@4VY-}4?RXYL7a%Xfd`U-_C$~x6vf~`Zbza3
zIl=Lbai7?2Dqy#La%_GLbo?UR0G994xJLIe!?}SUdDk(y*3(iixwg$6V{{#0QgFzV
zIjly)lWPOwy$<m!96Om%jsvi*LXh;32GZOQ@xSzKfi$2N;~V!+)Hvbo;6dYWKQOMb
zIgkeW{9;I;5mL(Qm~4>p+*Sl)?t?iX3z{vp?gz$2bBJH$mba#3tpzE*=iJ>;-2fS;
z>5Dgx=?~B<UK`f0bGB=1yYFo0`5YYDilMVi=<GUlb{-s}<BT0&)HcaSNg<hrs9_Qt
z@18-na({6z*C5kqXZWhdjrW%HI-r_vR0k~fd(l&PWxao>*N$8Hw&pAMc&4Y!1#EKN
zw1Wm&{ukFKG;OT3{^%V4+}HzXkiA=jo6L4OPL5{HJHP6i-M@C3ugJb9FrZEPWyh`_
zyN4k|_smx8@ucSdcsM`G=h72Esm}AO?K!uq67Z_KE&`h6<}^UFn9c<b$K(4wAH9{6
z`HI<oCBj*2NZV0gLHWqPTX4V@%-){&Zq`#M6ZZG)ijj!qcX;I6Z-xoGFCO{+?YwUH
z@W7(o7qMLPki94Gt>1rZ&7K<`Wy|j^C~^9gVL3ncQss|+_tMcOKbi|KPTP2((V!HH
z(Okms<lAQSZ}tz}QXlZ5=j<IImx0Y$VCn%bKnP9gKpa_iAar&KZ~sIHZK;o*Mp0zU
z$tj&b`OCE9gFh2GZk&%|t#xu)9QNW&WY$bl`Qz%0Ia_z^T4Ho=Zk9v!jp`ueA3zcf
zKLJT({01apzY=7rB6a{IDY#Myn5o49fSKCV_$ey<a~_bq#D2xDP?K-IFf5Gu#}-6x
z8){M&Uy@vz@+qkdGF|_NqJR=9U4#iuYXn5#gqiFTw^hOw3!v;TlSfEAaQem!Yx)@6
z4|A~heu?=B)%m2XPEmiK1ftaYBx|2s@w@DW1@GTw2d4%(lm{Y`a|fTzN&|*Vm~HEE
z8~_FMDG(1p2dWFpE5B6wYRLo#uTMgT(iW1X)>A{P`dw(KL0b-BS|-TD{XH?~{a^nv
zgQ5uUTM}v`h0(~A4Ob?n2|q)of5C0YFyU;@tr9+{`66bBJeV9?nubp094w_CGnkQT
zBPCS!0a#E<&5<v#5IsCoCYlN!X;FrxKyh_$iC~x@K=b|mD4d!)z~cyz6kqS=o{+!|
zL%=eDzh=mB1o*vy>B;@~mrO|JSOPNV5ma{pWJd)&lLRnK^-!6^_=Zn>^Mz<G5=8G4
z#{HmlVFHQ(unD3+RTldoJo#@)Pe|DQAJeAmMU<Ql>5D+Kv1~X!psEuK5>o#{%8p6<
z6BAzt5=$);RvlDgCHY?9SAZ-{0QNj)+zv`S1YqukVD3#|?t7uRk8k*E?g4P_bVe}S
z#T!aY)X@Z!UjCAA8wy7D45Z5DL-?-~z`f5KKH-iyi0K8XGDT2K&O|%_D(gN%G*OzD
zbZpT?j6yHA3(AiJQu6v4fKaZV0SaT%oqH?Rq&tM~=vvwJ`udr=ySd<2iu99v_w9gE
z*2QBfYZjEUCV;TN$Pr3eW9YzY$e`S|K=Dai$C6~Ns`7Q96P1+E0o8*Dq5z>Th~oPL
zW*3wlL{K%INj3|r0JQ<>T+v6c;;T2tGSs9}Bz2DAV{|74**&Ov&>1czkmUTh>DAac
zl6>#lZ0WPe@i9O#1mjKQSdX~we=nXHs{r_9%05O|{}fD(54(0u6n0H``-D;3u5q_}
zkffl`lHEdn`O<r5fL^fE0c0WD0p<$Q4qQD1Tkb&Yl@$bNf*@XtosPQoF(C?Nka9>;
z5co}%p8+8vK!3Xc0SJJ6wsai?7C~YFg#8``zdN>P#coxZwU-2s4`tCsz9!=k6}pJ?
zUA7ctBR!M-j4lMd=qa*-Ry_z>^%iK=bD)X_?=ki02b(Fl9#W4xs83t!#fPEQY#WC{
z00wfIsBy<cjk^FeX;hU9YoIV-Q~;IjlpsLG4i^X2^Qfea1rs#R{N%}_p?qbLujfRt
z4q!bfp#9UYp&vlde@NheN<)7Zje;J{ff_y@BUC;YuIV_Q00`F-a9;yhn+U)ExdLkp
zuYpJ=q#E#{B|RXyRni0KcW<cJ%3B<9*3{SSN+!_!O!*-wR2WnTv!Sz8a0X(ILp)K|
z7Djg3gy8aDab#G&oX1QG%m33kIV?X9IwV1dAn5QDIFzqq>W2@j#4*Ma%45pOWu^>;
z<BiORyE?${6eZa9b^tOU1~|w>h<odUh_*0@Xe)z=wj&^1@XiJ7fjRpVJpc;f@UGUm
zyOwTuYloGINVfkwJ%BBGxND3aIA+*hP>KZKeT%JjOaMf<nXqWRnXtf2*m2kMP$q!B
zRkW0kwI@LzJl%$pq9deTfCC8LAN#)+BPP;Bm;LtNjQg?*$c*FR?`D?WDC19@R$MIW
zXJbo%yvXv?4ItT>CINZ4dIj*!;o2HbfgK(WzKMM|$iOXNf`X(j_k`?1&BS>GjSSGh
z4-Suk{4ZyE&dO{|JG*`BJ_aO3>(3BlRdhfp^w|G0E|nM$R77x16}Sw<_W$az#ynWs
z-%Ju<t$<1TKbp_+Mv*;sd&zFmqjt{?dT#b<XxHY3%bpM0+gdQ$5wb%?o6hC{rO%R|
zW&zoCK(Z4+vco{KbHu|tBz5IEpV;FM`8+6vmD2)vW{yRfB|k5*@}J%BU;a4wQ$?0g
z->)RMW$D<>$Px%k<&><2vLG8k7Noe3&;>GnIc%GIUaqADQw5%Y-Hv4C*Hw>kGD9PP
zlQA0thts{_klPNiGfNv7%d3C+;Ws^I_9^AbMd}{{;$8kM7F@dq*|{6HKn~Bq8szZI
zPJ<ktXDc91@}Gb`<nIS|Q(YyXP*P(71hLTj1Te>df~388E9m1-Oz|&yvtHWibvy;R
z$l#-=zxLj=XT7L|E9<%$`!M(m-kDz%d~>443llYFOw^b-u8}m>=ij*Ou^eYn@VOO%
z9ba;&!QQ5U9TOQ+Ctx$b_%LutP!eGP>wW0|k0Axo@PGOID96}p5CX0>OBt(G`gl2)
z1Uy>uVUQt1Kg$MgZl(Jt>ca_gB++AL|DP(u=-Da|TML?057t#h{d+yYIOy5mjHW$o
z`EqTV(I2!NyAX*7UiTl03&uKw$`AgkH52ikczf`h?|JEekq>m|-~4xi2}L@&zxd#h
z7X51eWC)yf`_@C2Vk|F>FgRDTrUTS9rq}aMLv<AWpaKUeu}d2((?i;(A&Tpttm&Ab
z>X3BL)W9EtgTo+gzmLGkk>7w^)UX<q(K_JaeF`_9h=hEod>{k;KWBRS_H9lWzM*MA
zz1YvL+h#Xd%(wb@Rs)^M4<>oJH@VJd*BzAcv0aIf=7f5af&0l}&-6S@&hzc4LkFXy
zQ50W({IUIUd0cUq6nje?T-#$s2gtXk1n5B+NV5c$odm%52uQkk<${_G;JiGkDM6ob
z?^seKCOT)Sf&U^^A0dzrCK^mLmml&56Ak1imSwf0;ms|<&6hXM6z8;lEx+OS_*U%3
zD|^ebW*)CLyWw}b&Jo<XuIk<P#WfZ*a>we1Hh{58>OH`2Pa^{w%^Cp_?(~g-aF6i^
zZdCCCI8@0^U>5pKKd08>Hh_nGN%Y6<pQ!P5!p19Dv};=*+H(!1vjy|Q0?4qfuZD9l
zqKued7q-T0KLONWeU^KuI~cg1kPqr7i40Ib$pcEDl7MJJs|NLxcx{r5CwE%=0+hG>
zw^*dZVk{>C6=C(jn0=~{1^!Uh@$ZGsMW?go``b?@fZC2~j0Fc|$9td>EHNQrtON@i
zmmuHn`0JRC{Jq4)tGLPF-f55`{jAnHRd8sc#+ef}p1pUPcS3|$Cg8PsS))8>#75Gx
zz7zh*MtvuYp+j2|I9!Jgd!fTraCkc%2QrT=PA|FqrZ2UY?5o>(j^4iSQO%Cc=qJA(
zojcb=FJYR&Tu>?hBXoJ!8hz;A>*K`EG$X#z8jwf>Hs)XO&0o71su)GA9MXIJF3(ly
z*;P;uimV0+%>duKgCZ+;xu?54xH(~m{)VB*3P%#g9m=bMPl3GZAjqo@!s-U;W`p}T
zqd~C`#5RK>kH53cxjT{%Y({@s3c4fNYwQN2=Xrq1e4dvhcen~=>8>6>E{gt+i8Fe>
zzqxPC5=T%i#TMzFlDXmBO>#$G2Db-&+B34=);)LDEZV74tvq!|+KQLz;uB(TawhWs
z^RW0MQBnhC;Kb6_z|-5KU%+T0<`#%431J@x-Y%C3=oz16E8rH5f`D6eYyoZ&{TS%K
z^e}LX{Jg*Fl*gD{!U13v0J=AD8%tNEKqWGDAXYEwgQ{VMLF66`u{XsKhv5xzIp%*A
zIesVycgS4OxAqGyja5BRxT`)@sr#H_mBlpl0bM<!Ep89DAcjMg9GL)t4R;iKg9Pv`
zOh~4$xn9WuP@YtBfSeNR{ivj$;Qc5v-1MC1*{XZ%W{uD7qks{*S$zCM;U5nw{A>MC
z_*cKag348&#kpN+2AOT<hcBX}fB8ieQu$(l8C6m`zxU96K;ZqK3)3LME7W&|015&_
zod(72^-$bi55?^#p}74dh}*Atw73<F<;%v3=^~+Gx(^5!6d)hwpzts}AWe%nIKbiQ
zrqZc!c*Fui%<$#!mOIXHg(-x6cU0G12k#vQ!ikKD^hxgmdFK$1vE{9SmNyYv-X+lT
zT0zSj2rX|jw7jJME-zGue&lA$<?PMkjQ@;}ZY{GLzr+o0?Q60dzsMJ$-MrRjH$u^E
z%U-CaXM$3s&x)0T{~l3ByLp*IH#?TNL%|pc)T4w4HKRRCivoM4pi#hU$<7Yh0Ty!i
zSk^F-^<{Zv-#+=+@_%NxJTi;MH@atn^`146BmdI7yYbIK2=u}@9N)1O!tsB_kT3p<
zA^SeYke(y4V{rNqAAFG~5EQ~^d7)@#hxoxmjv&>O-9nlug#_|SAcZtmND01{0>gWc
zeH{e)TFNlY`BOHA1<FEck)SN}F{lcBLT6V#p|imEL>^8BoX0_t3YG-zOKAK33zhn5
zV^>g#V~L|nfwT-XOR3ezVE=Du;dp~6jbD^6yc7CZ-7nu~4fYSQml7PezPAJT^%wx4
z5>bH9=)3bzuKrVE;DcpYmopD+vA2XSk0JVSL#2=uT!xe{q>$Zb%}-DXA`w=gykmX=
z4*aI~xpe3&0sl}S10MUKgyt`cFMx1cX5#qe_Q<>mjhQ0{-~cN4yABofnn-|Ebb%^=
zGhPBCW`7$h_}j?68fzX<M|S&)-H+<+`OXcI*!#-1yk&L~RC5vZ__!w%jr3byx5avu
z9{;fUOn$2wzcqW2_x|Z1g#FCTn^x@FKfM@y#|Kp(9R$APQ^En?@$nAlw0mK#E)SA2
z%4_P~NdDdZ*@-^A{n^d=XTqCn_=iJs+gbH){yx%s${AmHWp_xsgR?r5L+@Ca{|{+;
zc!=hQgz|?Ur~#!drP0h&ZFFKT(gQ3P<V<n;`M(yJ0)!^oY7iQc>6t(3`271<xtkB(
z+H>6oBKU&KM3n7rURD8?z6MaWY_crSpwL-Wxc(GzyDDN4WH0;Z0crnpO@i2+7Iry`
zHjxz<y$IaD0))Sxe>Dgq1)9y){Xgp7JszsPiywc^rPDd5q8loqo=)k8E+izUd#A#Y
zB!q;NYwp(>=j4<S>O_igN;qK%8Q0+{p%Ri9*O_u>Fo`i3!*A{FlG%HAo~Q5k_50)d
zdi9!Dv+UdKy+5DNTJQB<@6`%Z5tahxv0fCVvACtou04X0(s>1$PH7gN2savWou@)x
z#UTiJd3907H8z=O<RF-gf>NJ2Jz@~R5TGmI?xWn&NZ^)2av9~8u2?6^Z7`;>CwX23
zHwpr3Tyhx#YJ75elK3FN9)&bC(Uyd}L!6C2CJS!EZd$w*ur!={ty!vuDlGGRRu-o0
zuKGGndim?eM9~gG2&BVWKs-s708R-xP--_!ffNt8DJOwf@kqzcw6889Glt?}t_4|O
ztXDmi#byROBRS`PDt{%&qo*vX_E6#EZ)|phk^8^bcWvl_XfYHQEhbibkXC;N#Ko+A
zkShnc7qd$KBgY93beaMVk2wMWo$zX5&ILw*MDa!l@9{>$-Qh&IEfF99V}$F+Lk3{P
z1G3#sZa`&fPIF&RFdtA?fCmI_-^G;Kl*sffxo}|Z;70T=My3rU9w2ZR9DY#=k!QzO
z7$@TYBFSbY06GWB`WC<;VTy!`SA?Wl0nXPl)cHcDqAz8wrE;rlkzvgu)m{5yY{Y=g
z185Xs1z-U~9pitPE0!lSxG<Ewq+RY2(K<_;^C?JXL$fbHx%|HrG!x}y2vI>xB2W?1
zFP~+-JyI-4COi^)X~YGr6&LyWD_|DHz?YDzfiq#!EMSCd!WvT;KncUuxm4y;fWHH&
zp3*d-fu*N`0^bLICy=sHDz+cOi5E1?6t^~_j2T^Ud<;E|UHJ=(VA~WVwMj?v>Q;!8
zpMj(n^pW=l0vSLikSMd%bGqpM6X%9t3L~1=;9S@(bCe1;gUk0%EbTxaG!@}&T$GJe
zu33?g3(H3?4B@j=kPAcj@<uFhVp2HB^^Y>9Q*j}{H3!rRTv)s4#(@WSfj9Nwu1$8w
zFG>)A{GtSvWj7s$z@A`-_?_AD*02(U^dhhcvJzNZnD%XGsMD;|Py+5E%F7|38YAx{
zFfuWZ3(#hGhJZHLg#)5NMPMxh1ktEL5DlyGC~AAA&Hw!z2GPP)ovHc^&mE~6X{aL8
zELY-}JW35LdDsP)f-a+&Rrq~A!U=3cO8X$o2Ni0GdaosWtDgQv0{TueOg6n?o>Bh5
z=z$Lz?k*8<?cu|;&wV3<9ObVo3aJyKc?!vw;OD6TZgq$Q;Y>&;o(Ts=_Gv@f0cLBg
z&y#~pwYc;K)Z!t8N&+(C@&oiBAS3Se%y9vp61|*SUzGubs~nddHBrfts?NP3Xi8b2
z0>ZWTEmToQGaR=?-yC7+?YsjX)ZJGdX5$GaXWgS?t>Q)gp$JaI6fJ@yRqgoVIdvd8
zQXj3eLdxQqD)Ty!9L2!M-3nu*&MMsry;h8a?Am(`rTYuSgf6{<_+JGj^jGnPbRCuX
ziTmk7n74yne*ugbo({Xd3u@*ddSmcFn1PMdB(eDrzjDc@>})Ri5SJm=vI;GVLyDjQ
zSb`cq0G6hUpz{+N4ic3N<BN-{lOuB$qE+>?1q3pzEWn5XS9<$+P@0A-X}go-Vp*B-
z4JNyd7I8!Ey+Fv4>*7IFsz+E}3AKy(-Qh%dBNSp{jJjyaCiarD<bd;l7T97(VRB?D
zybP#@#q88$MFuYoDZs!Dffkak3xVW7M>aP<35rnQVG2^xwmbI{R*Jx_8=d?~f^o<(
zLlwhR0)2p}2zG=Cdas2+(qOWTDCP3+yv>btPHEG~S|pEy{G&NSl}LJy+<}a!VGYcW
z742<Eo%En)!@D$;`VBvH_FDi(F?0eDG}<jCFe1!x{O*(Uhv4oh_}w$~kz-<Bw+!^`
z7^97|F)VxpE4PF=(Xye8a}r=J`3`3UC!zcQo&|y7cV<Me8{nXLE+>FJ7-L2?3H&&s
zESo9bJD#K~V4Um8)EmxdMC6+zZa98R!UYuX?}NjR4=gOv#2+TwsJEqoN3MICDoj)o
zP%|0&3DS$)sof&#0AJ$VbxAe?*y0zvsv@2(k+TjJRn<gmT8G#8uP0&g!it4JHGr%?
zy2J|(+;lgla3LDFVlGsc$!#LCXMJwW<@~=p<^p*@@%h0OTo%xCsNM{~!v&Mqy90)7
zn((9@ppD}lQzMkMjS0>5DReQogP0{2^)K;81)?o=@J1b0AONM9%r*qx;PzsTxt#qO
zQMWlb7=>z!_v%tmR}Zs9ryzU-JPtyLm>`}aZg(ppV<O&&vLf(~CUSO+E3v8JA-rZp
zj;3U9hg^RK-fQcqOVB-bsKAIYaxR359^6m_%?@)dqOk?~551G~Jq{x~ACCSi!Lc>q
zW=uyW0c?szFHb4~CJu-JaEn|KTo61*U!ZlvF9#kS9g&wxiv)7LA6EMToGs)AWA4!Z
zJ5TZgyq*{53YJn0oM$@29mC`rbP>c1J7!LzBLPoXz~fRZba>#mj6@%P@GR9Y*fJ1j
zgwX}Z#EdShY7cY3x-ct(ew_xaGPq@KSG_B6D=<fZbH(7!1xDb~;V*SEKEhlo;qIR!
zabS)KKMIbCxg;>c*)c||MBX`A<<to)k|lD{H&9SOV8FV1Q-BF!gyyP~ivBztR+W}a
zG_qPqs`H5!Go@&?L1cBpjx3N%$c2l=gJg=#Ok^Zt*}iwO#0*t%yfbiG!HdT_*Pj<j
z!T9DN??mXu69xf<;Q~G~(6!4?0k<17)?4zI7^>i^YTssThtLdDLR6ozy<ooW{}wTE
zk|Xi46@*c#_%MowhEZ6z*eV{b3uDZ%ieH2axB_`Z5+ZQ_;a=hIKRyil+^xpnK-iLS
z_y6&sK)eOV#9optQxVAQ&1s%^7&qQ3z7GN`nBfIsT*u!0^F#sEfE&}L2#vil0wA&s
zF%prIT3m7@VX3_~U4yXH{-ISoVWihgQag$2<Oo(?c4T|0<uyhcKb?%XR2x-%f9LIL
z%tYZ<0!InOO9ZqM!6(Ak@P4M+m#{9FaCayH#-f7Yk>O9IkJcJv1_2mhON_CY1eu6G
zRPb(+B)*gpz%Rg0F{msATpNjuD{#ioMqv;zVPY#nf;yDR0YM!TT7eO~SBz1i56PjO
zL$NpkwJ6FVq9~FOp9Vw{|4RXX#v2KD2Y4I5r3cOk_XLiK!9&3aXU7=nFI{pDWEd$C
zBvF6i!E%H1r*;`|{O=J5Praum13y7P&?3c1Po8MKfl_Ls;0aWCq{4|8I#6IFSm+=p
znn0ZAaMpth0&hZ)7#(jN5BLObfpUM$hP#WsJ&7E`=kU452nmY_>JKdlI{*R1{~_Nq
z!wvveXM`?F;EX0xp+#>8VZkV#{SR;LB(*y&sql3E=Mxaq8HpKDo+3nGj>vhjGFO2h
zXh|PZD&7u-2(*z1OOWOQeIP||gWkG`f*9I}jWJqqQ4#AY+XSSkHweM6E9&SZ!S5D2
zknrP>uqK4CdIZKHW_S3RRH#+Ptc5hRfEi;1GzhY>7&Hisi*QDYwFQ7lXKN58xLHuU
zWG@d0#+ZqKV@V?{%q1+nz(1zX?T)YOg)Q-q9)3O?6LU#mgtKFe8P)vHsw=1+IE`*E
zm>Y!bcv=#%)2ORJG>9M8Z(!M?^N%1F339230s}b67_W3-r100z#DU^OCw3elH4u&B
z%ZTP5QA#&QiWtAly^jYA0jiulCkAUIL?wwpP2%10P$j&acq1WH2}U?O=8e6~ONAF6
z=ak+j>7F18>=f{U-DCzXg`xZVC;e=kp398ni{iA9Lpv#hgw~7Ukv}Lm!y5?;_+fs)
zx)=IiT#*kTk|2&`s72%+5qK@&$`JWC;0YmV*IUj@khJS(WXoo_f-^bWDU}1fa|B)(
z(P<Fp2!~<8xv?G-09*LqLp892rB)=2zpp<H&xurru&+Ue>s$r3ks(E_8YKH_fFxsW
zX^@-E;Xn?Vz&M?lT?&a=al7hG^^<EEcKBuv(1-OA_(>3cK}7YSzzILFfdG*+IVs`^
z5F3dGb_>anC=3MwW+}q>_jJA#f(K!R^#(4g!ObM1j3Ona7%1WY6)MY*TxY;Ri2Ub=
zc>nqHQXqKnQ2aBDPyChy+)zLi5u$@OJ7U`x^E<y>f&(|;ZM5+N1PxOVy^*)5I>?tY
zwO~r;5lW?k(^?sfP!<^UeRmN?!xy@98ALNM0+!%_>rlKAKsYdB;JTA64&Hc?DFT!y
zhc`xfn_cdalunvFhJpVE#{E&43*>rXD!?0NS^>mDD}WYDT0V?g@EZjE01yh;`WdGk
zL8lnDG`Z3oKSTxnGqTW7Y&R(!JyUFl$e%M36=goaO@}&k+3tTMGBf0Aq!2m*3ZYRR
z3HrZb9i${W5h;n*jYO)Zm6_8*W7BMsGI%zblkRd9mMdR+k3u}(G%ZCZXhB6Mi+=Lo
z2p7{fJ5%*HrC&u0O1btqF2F^VWtMc+r<(QWdP&7wl>iO(Bk69k#0>KW;qH`@puowJ
z*5ptD1stGoev+F6O${F903AA~A}xmvp#w<k;e+55B7~U`*Jr_&xC{V5_(0c*R|XBt
z6G)v-HOOM#QwE~;!*ZsUGg-$K%F8gwBtW2qXeS_(P;RNNa^*`4WT7}6A1y(I39%?P
zg!*j7Itg3|s3*!qbe|2_AZTmA26_$!VXjg@ZJLw68T8_yxbHKnN_9aHnm~ngb?udG
zk&1s*A4Qk(X@!}occ@IxN1PytK;i{V5CL8y#0z-iVw{nppCXH&peZ#l?LvJjbjARQ
zDMbcIA1ERcQ{r|8ec9r6hg@5n;o+dv@=RxmgP$L|nPy6Qya7{C;pqYa9>dxeUnO@9
z+RkH}tAP~mD)~%3gK7t2RvLO%S9T^rSy?L7Wfw!0wm2e=INLz~cMZ_E7FkW#^k3rv
z`&7h%_V;z{dn4iyLu8C4G7|1iA=(mzx@MgyGg7AD<RZhwQOlLS#T6O%WPuY7zoGP>
ze?!AIC?&C3ZZ+K@0r?FzIu~M~_*$B4C}kl6y4?s(|DOu8rhh-8KTmB8hKdYII5eu5
zhm%y)l}CisGz%^{Z9_`uw;`qT+mO=vZAj_-Hl%cZ+uB3HPcN4a7be@jv~*YZXyflR
zuag-~A5!TV-Kw$RF|gw5i{Y-@n7(*Ae9nt)=2aobF52@FQMPor%QRHjJbHa-S3$(j
z11g|`Fs4z_tXKfEVq`>`S--NPIj9@iz66J^F$4~jy#ZW3NA2k<#mc;}x&3|nEd38i
zEyg-*kS_~vx-a|#7S-3wlk>f&H@F&NYe9z%lHkTNqu0T=7sPQuR&<l7sun~hDs54{
zHP)FzJMx<{4d`W$I{9_%;kb6-pS>Nk$<>dU`d=RMKQ#q!QyCBMXRc7cFXp`HA75^h
zMY&`8bAGiM-(lOJ!xW)(X4b8>s;;GKo!V}0f3_^7$WH+r^%_u*nr(<bkOV49|E`%H
z-jjaq){Uh`Cw5_g1J030S|={Sgd%&~@kYJsJh6w0oTz}erym%B>>Ydi=?BR#<Ccb=
z!H)FBNn%7s5h9}~&X}<{367cD3cP+$FZLXdYlKqvqRrM7%v+vRP%ri*H(k5GR|vD*
z>pWMuzgGh(J68&Wax{>NFJ+$aEN^p^MWo^%S6q}W*rbdQWczYc=Y46%3c#BPbAQ#B
zi58AfA|p(_vqmuT(!mk~xxard-Up5+b~&RURY*Ymw98RO^mqTMgc7G#dYVEP#XChh
zEbzWGUyZ7s*Syi^uebh9(+N+1C*4<jZ}d^?wIAfh{JSZ+lV}BobrpMcDU;Fyw3Gmk
zt7y-cA|%KPKrU1aiL_)9gwGJ+=;k1`;m*i1%4(h|*+zT5po+Ip<Y1AGGzLn_gjx?o
zXE_4V6fg7O)g|dxw@mZimFm#>U9TJF_qtm6&?`aymx2860em$533L7k-z`6Q#IVM2
zdr)Z0NS#S1QbW_>N`H2Ti3k|~4U;|cVZ;Ij*-Xn5P-chwn64~7g>>yoIbwy(xU~2m
zNg10@+_URZ)#*$$zxN}_iv4geQ(a{AHB5{)VU4<+g>lQPL02p{1LhzXE4UNAh`?+E
z1?~jZ0PciMCX|NO+d`3bMpOOlKojbppwtOl?$(>1;BRGkq3w6i{v4>C-o-U5+6*7s
zeeQiPK#U=TI;P)dp}w0LGbwgiGn$iA^Mj~=Mhmk`CkUNz9kzW9vo1rg_(btm=lq)e
z8xrHPPpT}neDG6(zOX%&2J&$H2i7l|IXBbbk~Lq#)v9fP3!P=8iox%&k(-`NQ%^2*
ztPRY}9<p(zxSTIx=$aZF=!ft+<=hr%CslUFLo{Wc2~;O3_<-MP`5j;=)$`-PTU~w(
zyw&aJrv-dal^nT0oC`DRSEIC{bg0Qi`Yb_@A)pOW!mTp)!93%ZVj<9zNb4lsGQp+i
zQ=^Lrj|z@wG7}Z#S4w7P9<ZmxK?N4HcbO$<58Mae_xkE0NNmg!5S=&HiS48;Wd4ZI
zZhb)`7;>G66!cS)G5vnUH<&pmtoQMKYarDsl5Urc#BH{%_!h(}JlBCl#b=_Q8SxyX
zzSq4@L+&N6NlIP$dIF=yCL*H@kr8UnF)))Qdd#7hq$O98=5UP=E3^P|C51GH%YvAp
z1ZfV}2M@*`VuxVQ>DFK88%urM?C6R6>w<nm6{=>7ATa5H>l-l5RaoXKk*My007C!(
zahx$xm(AN7f@{E&7O4>eJsEh^<bM{9n5BaqFYXDN*A)-NeEr($$85&RecW!nl)r!C
zJNiW#4bjtu86lgqU{vt;<ea1)So>1@;sScJ*%O7+M_*jBDov@DdSKOZ>gu1@BhzY%
zvU|?IAol(cchD$I0-3&4!l$+DTJjsn5n3`w_C9-rzAIU}=}5malqSk`-gIOy7E1Z$
zu-uWom`CIB%k%XF#={$kj0C&8AKCO$8+*)$$7GI}V~mz{8i*LdOHxzOd#L$N*(VEw
z)AzIo+LmujxfRr-oS#F5I_o;<N)<zCxHxyF+%yZyP4n`kt{i}(0(&_lF(jQV%(X;_
zv5zNpbJl~b1!5Jf70OA0>OU&*60lGiS0|lVAH#BdO-d+{aC_OQ$#-4Q!vdP&&ZPlq
zk$ey4p#7IhN|~}FY)_%@prAV!z%^}vli{*pQ1kANsKN78FQV19`w$iOC5Q@p3!=iF
zj;OHvfC{^`wgY^uxd?jf!E9vlQN5TSe4}oG&i0&|n}PSEj(IKMZr$puT>eZRD(+W7
z)FE>nWGzGw!zwiICs=#t{0YV0U%vof{dY4c%>OxUT8}*r^w?P*X~*B*MAH^e&juFN
z8;M#embjvl13`%y2}uW<-onaeP^|O>9AFnue>ha{1~_tN{IayHrl_+}8jCkhM+AuY
zU%Rt5<?o26f(nZ)XKfmhF@wk$OJt0}857ZCUh))Zfd)o6JJzVno>v*V?j17SRH(LX
z2#NnzoY~j5fu!ZvmuMKoPLO=LzNG<VQ@sF03AED|jjs*|57!bL;0qb&KuJ{}++lsn
zowS{l8tduub$igU>9?e86t6@?s|4xuZ<L9Q-w_#C5E(b(jJmmRM)x|!V%+QM<i~rQ
zV&}Xc9}Wyd)Hco{LK`cCB`@b*F}C+|q<=`f_HZC$-H|G%`$1i;ejle5XGJYUw74;*
zzkE~Z_x{;M{meIK$c3^(#}&=1=SPJdKB9Ga4a0f)TuHS<TN9S)<nojf<Sey9o}MyE
z+H!K)rj(P<U;FR<uKh>H%h^v)yJv_Oc-ptPKFI#ljbX8VWs2nX$lDsn;+-pauk32o
zD|jb#RQ(N%HKW{aP+z_^ToUhB3ym^nJ}x9)gT8(~b3p?}a@QGf8{U0&RP>Yqnez3c
z9p2(%b04B1YGuYb>mr$`tSeTKHd>J!YxX3gjovN%Cc|N+rTwNg$?x^=ZiYW@(7R|e
zk}+E7MCm$eUn+?)`#9NHAF{Vvk@N(%e)Jt|eFoXu$Bc5p!`{19Rxjqv<x3*m%}bN4
zU#^(5X=zdt+9c-3>Uvvy*Na;TC$401QP*CS$uCfk(4D{1|3Jid_{fkMSbe|kJA7oA
zl5k(o2??GAr;=c<I&`{7fvFfbZ`IBZ(AYvikA9eYKz9Z9#OZEoC(G{dgOEp%9<7&U
z_tLVGfO23l&|u`m^0S*WS*8$<=)6q|nX?X}+e=B8zG#e=QhE8}S45QfnIwGdlvMpu
zc(<S0l$h*kO6a}~Id5E)3RubPm{%~YGUs~0Y^D0T^m@^SY#3KVKg+>%g)u%poj$t@
zTf5<fj81Hm2bSRl3Xw6k^9Tx0QIQ5Y{W+2VNI0t^zzZ)FoYA38i2tmM_|GZuzEO>l
zj!^~MFlUP)Ixr1qOhhkX7RhiDQ_-8mRDv_&k`74M%IcZwuE>4UnEq&!jdo3KXmRpm
zXH>e!(4u#h@{@0O;a0+Ax&6|$8rdL=+X#9D_E66}xt78Vj5kcHmVMVk{xP>7Ub>By
zNUglUT%^*vg2<>$WQ5%z*U+%_i*pT4t064hCxn68U7s(&8(p3|qc5UBahVHC;l2SD
ze|G^;-avEyRP&Tso-{oSzp*Q*bQ##iGqfY2g=NA36!mwHRj2(LS&)HfH^wO!^c0_T
z!qOTzwlsLlF>@*y^@)tfiHuq}V>NnA3@reRaP}v(=MG5wmH)o*O9!(d^dx<dR6F)D
zQE~WDVnmwdcO{e~=9%k76J>LKn+;L}^yp&dw_^k0y?%p$To+6s1bhtWxqdx_U@|s{
zsMd~KoBsyi6$N-;dgoN{6bh8GA}MVC@$s1jHD}aIzqW#TP~{Azr@>Nd<?mK7h3bRL
zt8lvEcPkwQZ|7u@pJ29qAAV~N9o*ibclrh*0ssCNvOyIx4Zp9KJiW=^#J*4^`TdII
z3wp54MVq<Fp=Gr)wml4rmAOuh)@fKd9bg@OfdEN#0Fq`K;#Fv1=^IR&Jp_vtU4+Y;
zi){(&7P}IQ0-JDd;!~2&(EWy=y*42pmw@y#=xdbE#qi`9m$d?9v7%m#MQ{9r7Gjdd
zjL75ueIctDQU$#Zp)nu+6{`EVwNuOOx0jnVb9?)i?>4r1u(W8s<)&pDt}Q+I<C>zz
zWv>H@{{{cMeB<fiR7Oanf5e}6)*oBC?#CrX9!yi2K2Nhr@$#iCijGfK#*w@VP%=mU
z-1azEv3qYL?Fs0ZBl70$3z`FKSCt<&d9da9vJLI3e?ynpm($;DY(s0?i>JQT*w~t{
zre<%2&EO*Z-6I-Sefs7cVIU3P5WV!J>-^!{n%UnP)>E_nT)&@|PtPi&ENy6<x}c)G
zGKxq;<Mc0sZ(;)9L<PRVexJ6qne{CP-EaDs@C}Z6=~#%^+-&ki{=KiAjdkex!Ftg?
zrZ1ZZSFQxNtK;46$xiCCzVB3>TLQ}MN;^T}x#CIc^sB;Id3RrJV6>x(e1PiTZO>?K
zhDcf9tZvq#fYTXRFGNEtfii?@l1|>{nhVIHivEi%d`FUG^Ab)qQv9%?1_+Jl7|Phj
zGALZ+ZN_(5o1rVf>y3&Z%`m)>PG!Xm#llCON;rjkQFiG_C;jMQ+`?YMi?b44um{B{
zX6u(U^J`BAj3z_gYpPY8b|18ggBoj=O{r=F$K|nM$_4)enUGUUX_q>E>Eaivj8bwr
z`jwD56Pa3%ZW%ftAxcQLXWGfBS)-{oKq%WVt@sx!4SkT4e!z)KvVRkPvj&xujuOh&
za>Df)63RaA^c-Xy?@K=YVi`vXhm6kI84DEelyHa<Fiy61O3t=Bx~U{z%<uxwDUkK{
zbo@~HQ|E3Q!xm))`a|eUS<0JFe;AQ0Vu&cf&ZZq@Y@&!7UVxuGyTB5RONoq23@>~w
zi34>Mg$Ne2=b@JcE_3SILy3%mIAfxOgXwBzMA~L{6LybG#;P0~{U_5%8fUc3G+L7b
z9l}WTAQODYd%#TvB_w8&(?yV+X4%q?z&Ztu0cJJYm|W5PObX`pDcz}ffwU=q1xUI^
zyW=*bO?g&DB~YYovFsWKkd$q)JRC3v5E$u%TM`+O6Je)mpz+|0AR-G}Vy6Cj^z0bp
zN$PfQKfnx@F5xyie}&)@7gNq5J={P=-CdRuMgpl$oD!!bJJ*CL^w+K{U)t|chp#IR
z?S0&YiY5xu;_tk42?w1}9KvzTT*Wzq;Ji>ml>q0(6+nYp!`96Nki$dkcoB1zW&_Oc
z%6{qMfu2>sQ!I5kNMt-hWF*{OpJ>ZT0B81zbW2^}m`JbRgrt}+o~h8Q#8fcA5tX&5
zYmoRJrhDD=yRT5e;Q2@sr(=j<a8L`P6>L&o5ZQTePeQjzrxjEA_MW#-dXE1jE9dB5
zr#dezOwsT+2lqRrr7)qjk!~s9PF?7{1v6pLgyI$x7|#)IxrAWLxJ5W)26{})Bt#L4
z+q_?5KBi8F;?s3N;@YyIsY!YoKI>>E)ddqNa)uBY-x3+G5g7-GjFWgZ{lFqbK1~iu
z$ovCrHcj0?avqto4G$00VM~8x8aCjK4HVt{q&G6_`Vvcr?h&l(yaYWbM2cewQJA73
z3R6ai!jvweFqN1YL-WvSl7ShJMbzcE^kaSxl;$IxehjFn%T7!_1;?d(5-Lq>cVcCj
zXLcevI6;1`Bs&0NcO;4sjNL29v~x+KO$duYu+FlX511n|ISH1kK$EbpD+e<1bi30u
zIV*H=&V4r{C+RRFCz(AdCrQ_(bYe^1SWO-@^nwN8^d^rOB4aF(k#Ki75jGLo<dK0h
zCN_D%F=qwHsCR~*9d8WmtXYCIq!D!BcnM3Ipv_%WJE$GMz6<TF<q@^x_Aw9v23_~E
z5px}3hy*cFrwc3NGVWsFR)i86eTa;aL`Jwm%&&$Y2**T!OPg6=MX*Gh+208MHVwkH
znbjadx7Xpr=}3@Ndn>nPWOwl-)Jks2?E*-WTGempVqcsIAnWiBGbMK~DU#wgx8-@B
z%k!!t%#_>$68a*F`*Tj!`PXdhr#8HJXY=&}24rA-6jcKnD#`#9h<^dluf|P)RU+3x
zI{+1RAfUK}zXHim`8(4MUF7@P_T(8PdQ|Gq!H9ntBp$5Pzl=c)3=)qJ87+y7guBCu
z@W<508FdX3;h5+p0hXj4wb%W~qkh^RE1eDci5im3=<)6<WAgA5i>U2m!<n9;F?V+<
z0<w8u`doFNMfHfjX)IuD(%MRaBK_v21KO+6K&Gx~e^;@k77ytNg+x**Z#HYm2b}w1
zDk^giji5vS>j7mnMl*Sk_mZDl{U}TA7DQ}Fuw`WOKz?6()|}v(@9(z{RX58@&FnA!
zL(W<IHD68d|J11M+^HU&nXXi|yA50edehOU?y}#<jVcyLQyou^k*4Cnwr7#)8MR8K
z({Wy8It9K>M|P3vG=Xn1wMyb<5At?C=lDo<UH)5)`8=;$9?+|q59#3g1!-;ym4IX6
zKBk+T7bk2BFSKoxXU?b8*5qrV=nfp?AL?Wf>g7ysHV-sn8|Br8GO{|p#SAp&!jt~u
zPceRKcT;#wmcjP#xjo;XJ+x5e5_N@kV(<5s-A0X+7gN8wCRbsTeP*0n^i*W}($Cdu
zN3*Ru1j0xwudPf<onNlP@~NoWE3<u7UyT_W313^vY6^E={pst#0+YzB)z_w;)+_E^
z`R1sbtJbs??a^o??T`O9axp`%GsNw{t{u<!#y?AR$U3l0d2O20kq~uVKTi#4<~7sn
zteCp6p?*4A&!Lb6zR^S*>WftVK>Lhs(b8zFVffqhPCetZj2cFOO)m-oT)qL{cEjI3
zeKQjHCb%VXl$kA+3<4t4r@u8R8Tdy|zeQ)8lvHMYGaQ-^TNcS{Jqrt*zM=-D1`#k$
zzji;W#e|TM_iXCTO4)Ke=bm2A-39W;jaIgfJ_%Osd}NtITC-g(f(|o9T^>v%73~Tz
z0ll@I{oUSkM+!_$>F%J98dWyK`$`nE5z4iZ)7J@VI%}YU#hRf6^l^1_)DkQ<4K(wW
z&^3PFGJ+jW6vFez??B$1TSR$_y5zGp1fP<$<pZL=!7ZT55o-R67lSDL9Bx5J|6qgz
z@I$_gkLGH)VZ|kAu1p{TUeTT3W2ME!PHbr4?907JLj;-=_8DsdXXKu;{P?S$D+$X<
ztQQ_yn$(ILb=TBy-#~i5?FVX^{=X5``x^*AcNj+E(Vqa7Ui&BDc#B^Ej^}4Kn<CfF
zG0XT{8nf`QIA*a##u%J2kz+>tTN=CEa?B`G#>-_jDbH6#P|^e0sHpGCigN1Zzatj=
zm`Szd25Tc6xE=ZM@X-~@5@|e>fi#}cM;gyiV9lY_!0L0->QkbZotYAmu)k?3#&b=H
zummHPRvXccH?oh2??>_%0-C4nBUVJlM7**0NFpq&n0y@M&LH*no#5IBMo7}(jGQCj
zYF(ME7N*TkdrYQDAdZ2bHq{skefDC{cmpwVw}Bla*xl#plHEr3A0@r3`Awv!B_oz=
zV#PhQm_^fb{guD8<iDMU?Xv%PAhPzS>A~Np^!I^^=@sU-U92pOf$6`|3>=rbT5X$x
z^f*@@88t2xHM<XMgkQv*(YX2P=C9x#D!mPF)2*%WUdfok8)i_9WIL`Q*^cGVb)kM6
z(tpAHl{{;_&vN)<BA1tWq0Bg^Xis-sQn*QKrFGlO!Lz3t|IjhqAzZ}0Bvn6hFH)+0
z!Z!F}HTgU{@vMFH+$qp7?`ejALb9mAi*Eu$o<#>X#0fh}{s-t-yMBZ7yZ`H`=s}qn
z7ip_kBW<*uhtU)>+3HoAU3gJG|L1`SsUPlWzk1ljKYU8ZCbKrP;?rbRpQhi_AS{1;
zvF;XATGrV|eH(8+d3NUP7E12A2<9WEeN)9inz7dAzV+YB_Y9BtZ7sT8-&CP#zI}dx
z%8y?omlB<GuC+t@yCpss<kZy4APwnF4o^%^X0jHZujy}DqV2ySBUdUqJXx=i^`B`^
z!6C?EsX=ZL(^ZU>nreG0cx%}0qn>)f$li2Ql8(Q6hi9a-OnQ)BNm4^al2vA;v$+oa
ztjd^%ui+D6m-#AJn#flSQ~QNo%$bkRvfZwml(=s#UTSLT-BFZ474t0dxfaq|bhfNQ
zbN;&Ts%**R@G#p5+d=D6{T@pu>ycxgYq&|tBhy-nYR_nyYev{vT^<}+drQwUm-)F(
z^K;L&7`VMWx2%ZFBMIf^^o(6;`c5xT-yWVU@&W#mxLLnt2{c^6eJD&prf84ghPesB
zBsp89y0u&#x{%gCXl4SS*@viV&H$Q7$ttx`(Qs4WT+EDO6((D{w=rqMOaT6g#{#V;
zbs1~9XkTwnq_6kO$-dsUw|{N4$Z|10>A3o3purFzu>xq9)d6Y!O&Hh0Blgv*jaXWW
zN8<4{`l0BSN_CzI+J9^Ou%n=K<ae@Z+s=Z3`-=}~@$NGB8IP2)W8_r)@85PgmfLzk
zvA{%8)_=u3t)&@2#;Xb9-kQA48Jm9gwefAL6~4dLB0fF&{alrE66v;T>B)7&b93WZ
z`wu+VurO5J#=7nNDyQjHPS=85n+#7qfAY8e$qhwGta&AK^I-jCT?&BLe-1)&TObO<
zPkb{xq#dUqD`WEr`li;Ofxf93Po7Sb^==J;u(sT5j*iUeA}r%aZSSVhMcCV*wpWh8
z_;fvik!bhmgEFJ?*ki^X+km^ZmWglH>xW`4$ujknoPQC<_#?Mz3&2k={eV^Y+Y|;f
z^$GxzbpU$a;t;gBw8%7l0`iP@okler3a(Eu<6Xi#hj4^&!88ba_W-ZxbBG)Cn9?;=
z*^kf(U;LMv!)Z1%QmmjT{N@^|;}o?@9=hq^{EYYc4--+;LQpRz{UUQg%=(5!1l&J8
zkyvQ>6N}JLw<0uD=uqYX)Z}yqm65Jzry0Q+O`H62v*BqP3f^HMDC>CW`GPZA5E<je
za9cum7ytw@XcHJG=~uIHaH!7Sj1c!Mgd4S~>1Rb>y6k8J_z{|-QqyI)z_LrUj$e#B
zK3wx6A|te<`E<>*DYs2CB^4Lx3Hd8Z?%B(PKHUHCm8_wu!HNbXb1q|}O|QLGOhq#1
zA9o*pc+B|f@#Po{*l6Q+yitE+mEZB@7`PXVuqDo@?SFhZu6Jui5ijBI0}tn8yb-z3
z3q-~)0wW4}VRuI%FPw2X`VtGcJu(elcyLUn;R4>M8o~4`C2h4gKGp~`500~8l}m&x
zCH0ls0^o%Mu7bHuzivj;3W7rrv|#QAk!c9vB8XzU=h=l5LTViYB{>ENq{j0YmC^bK
zpl}*WoJmHs@mCJD?+H(3?_w7xhQrlkg9c!<+(I(`1&xl%(N%~bZ6x4Bcgs7R5J6h|
z=z9h!>j8Yj#SV^~{0j#%qlQo7z@}WP7`pxJK|nLGTZ$rh0^`ykh0r*O^$BiCygQue
z(`Z1aMw&s22tZwf`BpjqC|O?mf`O85jsxkYt5{O))eFb&B9pV(v$#?f8L3GJd{^RG
zP8*JZziahKRC*r#?FdS;8egTiDMG{PlHXUk^J=m>pC35H-hA4r@s@(DvO}f)WOC<+
z9alBBU<OS64>e-<m(0fi0<M>Ih>Qw&qst}0=)T2<HIZk0h>VdqBXDm5JC9(T5XhTB
zh7D`981N)^=&cS-@t6c0dhq_S1e#n9xc+z}<V*2J!rg%|h2PQxXVgW*2}~&;7zqLo
z7nVrbeS?&oBq@J_(Jn_g+Iy-K69@+>gR7p3Vw-}nHgpQsfmX{%%299{DB)3;5y9H9
z60bzgj2)?f2Y{w7e%1gAJ#X^22@7cCg2zB1T18J#tI)+*P@Rdo-5=M>%!sN@Xq@bf
zH@oYZ5%KQGiLfgSwF-YseX2J8lAx2CUz)^M!kAfjW5e?O1nG~5^N5mW8;Oi7a7M^`
zy(DD8@A(Wh9@C|)IxX@bnoSf)_ohV}h4fxEK{?u=a-oo_g!CXL@kU2ed064NQWt#E
z54I#s4?-pozdK<b(gSja+U&%2Uq((#2`@v>jJa^#%nn`PeLFN8<;mh`Lrt%Va;X;1
z#zBNR-12s@LNrw-xGL)9r6eH9TDa$ei6J0$GCxB`uT?<bJi&TtC|<dM_?x(a71AqT
z1ScjSbtXXX@1uy{ivH^Wj(wMH9<bVo2%X|ElVN5ZdM*b)KY@{OCL^5l2zQ4Q;U_|v
z0x(9H^Wd16O9JCG@$}P#t;;U9+MgLkP+YPAayXc!08&8sY)_)aK<QraP^$jVJ(QKX
zE^*%bk$F$Rd1FHfGJ%n`KNIQoH461c(Cdp&HehFyD>%^55qf<cMVT9z&<yAtYjd!w
z8v2<=hF5X?1`kD}O*x!rkC=0~>QWZzQU6?ti(sQOl336pvB4I&JV1w>Vt;u1W&xxb
za`WD9kC2;wsvaR#_PfZpZ%V%!$y}l;e^^I#KB+g`GjHXg%5kdpjhwAPNcq9P0Tc9t
znuB#vfe<lmkog%9MA5(C&+~qru7W$m&@@rZH%O8@S~-qE>*x(kOCsYzBBKY9Q6Fd2
zMUROE`x=->D#x+LigvMpq0F;{6N90FOg$5sE(F>>!^BW-++sD8>E#vdL!7i9BU7W2
zbw~<9`&v5n6(73O_SqIBXZ#+e1?U^nAT7)aumPk2D$)*h#=trSuz_>Z%!6S!L{zLF
zV25n*H9;A>n4~pBQ9T`H5D6~cz%NTp7L{;-_Jzr#|L9Oo6`Bdum2nI$bW#bAT5|T3
z({p%13Nf9(Ikhs%t+*T0Wli3o-1;mN*8rE90pu?1@RV?%;J8pQW(iZ!gc<DrAunC=
z8&nqjJEwhOepcMZ1HO&B4XqMoufc;LOY}o6n{qRhe<P2Bz6cS)SOYa~#)rH-L<@q3
zA?Yg7e??Es^253Cyi_DB*0fsDUq}h-7_X)W;Ed!Q#!k<FaKo)Lh7DJ7+cH+u{e+Y-
z&Ie<)B5a8<)~{A1<Xj{4YOG@prOh}aGf>xWHgD?na;ODA;8aH2?y`9vLIkhr-l@|G
z<*l=23<PuysMny$q(QO%r-C-dXr(k~C;nG?)a1b1c(xX)ln|t7Qbph`ZOxZT{Nko@
z?!Kqx@H9E!#`Ofo)9@a;bd#!|^+V}e88k0dWW$_<bSxdf8@93bE?_{%yW8CAK%Wq2
z`nA&NQzzNiS;+fBOA&LmY)5pSO8*Q0Wd`1ZzIkOTt@?0BHQ9Ah1xBJ&q|;}^0KN3-
z5cF+^espANN0_<}yf@e<j8f>ZZx|8Tjyf6bR-1EvCGclRdf><7MkfgFC@{xeDG)<5
z9Oa-;I!nwBRwFSG6=)2|Oy$ByjA#ES62Rjif&gySM=~*XU{gti9YCR7<6BQ+?BL2-
z)CE9iwh5J1l)AMUp>EmsAgrx;qm}EjtkCw0!`VAr3h%=_n$-V&7PK|`KFdb0Wzj`w
zYqaR1xSXl`w~b`Uujs(EBdT;s@dq|4kpl$mn$;L@gN{Bhj9&sq52C7xgXp7mU-!yG
z&u!f+Dr4Z-ZMe2$x1rPPoh>}~b;qhj1!p$yZ8=1J!?B<wZ)J(TQ*7_sjBVEu!H{HF
zKC>ag)(#3H5wi`{AY`PK%#5^w{Km1mxUygUljX7R%>O(q)vwltnFbf9yw8^tC%PYm
z1tZ_#?uV{QYyDm#DzFCLXiGbYzFQ?Ppfh&#pO?#P+vpqbTa8Dimi_7{lpUFKQrQ0V
zcC&ugUZ^pqUyh0@DpS}eS|B?Jld)kZ%-1=uq2^eX_Y<ghq-)NfD$Mh7SF>B(K-~wG
zG`=e_dC1=D;`?C~Locj$q!(5@P{(e*f`NGRm1298^c{BRFYO0lB_k@xHQ$7;a4lvC
zHxyyV3UfWaa0VLcQjI`{eLK<)%MocqdAA^yYhBK)Xdg?EA$+%B+Q7brC?Wtowo0iF
zW88q}-TGf7p(32`eP<W<LL9A+|DRoJdU?BRAz%!5v&2#YR403h1MRz+4^lcvFL9Li
z4ZXxssE}xGQ96mTa&e)I91)r~;M9BKGvvC6{K;w@G=`cGa-V5b`>AmIhd%vk-Iks5
z<m6E8=P*kLfw0Za&yFQza5X3+N;VQRjD}adk*|`(U7st!>;j7?HjtBI*}@n%?sIQt
z!5D94Q5y%I(_xH1rw3+t+(_ZMw_0f{pzA)4($@pC!zW(-G-dNL&vrdvbeOl3eLWG3
z*f`?46_F8EBm9;`yMvqtK^P*AH}*pB41|3MQ4GA>8xT>0_eQ<YCJkpS5Z82K5%@Dr
zzk)-7S*{q+*mJg*fkwmR9R)z_!$F{S3ki%Vi-?SiamEaT6d8QL)r&@9xE}>k81|Ra
z#8Uv(C2~>jP2$LcxG)0O;(kC15wL4V2o_JGy_7F@&OHZKmDRci0kWP<gf}C<0drXy
zD29zq`4}MAiZN2G<CjAssEDAL21dNOE^%HiIfW=M*G$q8D*J?rOt?P(B(s9g`8ZLa
zdgbA(AVaH{UpjC>f+}`f^`rHnbt&AC;^eT)Z)&0Iw!91UG%U95QH@RIZ3vW!NHYj|
zi34JwP``5X5p&$^b(!apBpSO7?v9`y!Bf7Io7ToPGgUIvqZuJ!hk{}R#_JAy`IgA|
zn7{~4?5yWvcZVi+1V#c@_ANw31YjV6lFD1quMcO0GK9;3tzwzB$!U>Ch!8^n2Qi2t
z2UXa>8}YjXwwSYUcGhn)&tYwWH$wgq@`IR{5sZ*Y#2X>ei5sMF{gb)mDNh2r!Gc^%
zTI5Nh>j!WXfBl5JA1B&UYf@ljL*bW}G?n?tn=;*RK^H>YV<CKiMl^4|Sc7AYx?yem
zaB20;johL~D8%I}*kcEZ+eC;L-jqai&c}(%>Hva3#A<K2PvqcN^^KX#vj6jf9F(j;
z%Iv(}K(VR{OYm<7O@*6QP{o%FLJCOvck65e#T3XUHL2985c^)9+8<3>`;x67Zp05n
zQq@V~<#wef@p7BLjA*&tG`)y|9*mPX`vsC!dr^#(vMWO=yC<ck!fv9&nd>a0`~<0D
z2Zsx(VlVK#3SuWs`$D3M*pm0b7v(Ml-IHA3_27o8?gzKDZ#hWGxmG+fskyu&@6I|q
zRFjJREpS?Z`zQz!e!GdtC{AEJ!b0o%1$OTR87bWr*wyVw;sM=yMBZ!N7t4E*IYv&g
zKe)L(AsK_p%b!I7NRZnHMIN6iB%2O$(dO`W`E0;aW1;moqRL52f0Se43|@UTP4r4N
zDGFXpo%@>>U?w?NwD22ebjH*AU0FJKV_&({yh=)@F1z7p28a?3%*bg&z|et+60rpo
zx5cNBih~3BP5A}{?gls!;Fw@19RtJ%!5I1708A|@L@SO3WR1e`MGt^t;maeyNC;RF
z;#~y01HeCnoKHx693QLI(zrF+sim=F2tMfj!zLP0TRNCQgRlRs!`U*JVaID&U(X!c
zaJ=D!3Bp1}$fgMOR1P7cP8+P|SOPccYhaP-|30l#RE$VXIr9wUQ0Q}Z{A0;8*n~6I
z*Nu%F^`h`8NaQeYG66BWcHcE%ND<4Z>CkHmD|~d`8xq2^am*esUE1}WzJ{EX?+!iW
zZD{5&s1ipF#q1w|NY>5z>qY980OULbAO~-tPD|djBa$}^YMTBa6^CL#B1UnuM*}Xy
zS;TX&&OKl!j!U?>G3Cqdi{SVVkjZ3OR{8^mAehPA+^DFNdcFvNPRt~oK4Q2@zn=&g
z<<QXWp<oA=^R<xa0C_QrTg>?)Y_1$FuK8rd@d6ksrgk}#wY#SA$8>4{HHl9SRbNR5
z!Y;(r_Kra2MWSiN*#?M+fZZFEF>~`s8-$wyd<2{m12X_jiN~wJh~E-m26U<xNZW#u
z5IZFXBv^BCfCRt{#e^`!kuQN6)QztSWs`DFhM6v-!zefn<XDF?fc8MePj0rsN&<vL
zGJ<A^l)z4L84$|egFly0HXi4w7Ro+|qUHiwR-tUT)ZUvsYC_>f;HgtLA?#{hBsNW!
z2T>Hm3xEgZzJaIZ5(}tM?_R(v8!p>U3oRk?sWlP~w9e~9a}Wp0r19{2EFNC}OqI1D
zT!jn+n5|%5`}Dren=%3r5BifNk@z)V2#HS1uny*5z~>4f*$25o3?>K0NxFal2fMr-
zdJ`I?+$NrE<O%BRkjch|xR5WGkPj8W)lotU_OSH{ke3Cq9Rk7ww9`pE2g{NvJIpvf
zj{!O^?x5q+4w`5YTzIN@!;Fs00O+{bhuLZqVJ9G)y%{O-CuetPA)VZ{lmNYoai}SM
z8so5|J?c<fPp6iWGbEOY<`o5?R3PeCv*#6a6&gy+oT?7QO(7_?=u+<FBobp~sOW$E
z4k@QNPJTzMyM>8%JvBLXL!kPF#4CPn{Go_92@dNC7;vFPv^Zh{-9!BDGNBv5_{qVk
zy^;?(3bQhAagU~}AD3=D<NBn&_Axn0f>a$j{|!>@`vN|${RcjF<wGrA>k~v?E%;T;
zA(Pt6;w}T?F80@RHYA?=U>(%C-|-o+dEdK9Z$LTS2JST?9BO*<=7v3uI|`~y=z~D%
zmMFAUEVT6~v{mWJ$zRAl&UUVKo;0f!CO5d$>a_jpC;kgpx#u8k8f20gZK7zqt0&xJ
zp=)KYQ?1C%rnPRhp{CV-23K8_du|y^qZ^BP)T-On+5pFUNAtKJkh;m6QBwEVjuW76
z+3@R~s3N<4=8$|^5DGr!CnI~@5AiuQmrXe{#m%rjbNEX_v%DeECJ+NMbl>>%ju4=k
zuIh5O6pTn3%T+9>xo<CKq9^;<Y_NS)gi&+j-ljK8I$}08O5friJ_loD$iE4QTZAzh
z{m*0?va+`pRohThxQK<%B8njZ1D5^Zev+$G2cuw!MKZ`V`9jX2iz%CDNyOO*;8qVR
z7^3kYmdpWaHw1iTL|G6a4--rpT|nk252D%m?!?aoxVb`bYN*pdKo|nRVp3Ufitmx-
zD)bFkFG~(`3xtdhIsD{G|No8gf%NABK`F>}e@V$9Dii@-Ce*00(cF#_D6T<7zy&L8
z9MTXG@H7wscSr+GT8EEgKvk%cX7?BVRN!H%o;$K2cjw`z5S{JA3vyF89Z0R+ZDB~?
zI!)e(zjh^Ag=;<D*y~us5t*#(C`*F+l8Evg2kZT@*WB2BXKNO4o+p{sY-J5?W3?;x
z#HA<8DObj)R_W0{I7+}inbCT5YrdnQj?n#MXQB&(%D%Tg_@Vsixj%Hjz2LLi`k<T6
z+05??&;7A0V$mqTAz$ywQyaf2QM&ln+RaC6BD6cK0z(okH`*6$n<^;1ZRfZ0naqtn
z2ahyY#1y@D{P7kPzSNg)sD;bzkLBE3!u9EOtmT-#SM1sGyxI9j&^BW&O@eZR4G3>Q
z`w`sc_%jf%X*|(SbBnwLj=b`m8O?dbX=vyAC^VBFkvuBa>dIz&SiJv}MP)d?2tArS
zIgf{(E=gXTxxepEo~5-!824^z>*;kJ+jJ#dd=-1Bnfs}iv(`F9rzfvKDkI?Mhv^};
z6#2GQ{s#Xp%C!|vPnHe80u?f*HN9pv5{yUU%*Ay92LE<dVkM-$8?r;nDdf`oK3r)U
zz4ctHadCF3iEh~C7LL;7^w}qjOwkP{bb4%-ly!VzvRS;s<g}`5n^%>S5U$_&kXiwM
zmH}(YqlVH)@VFX;a?h<SQg!03bSe6e()657F0>^H8_M<FYv_2CxQU!*TD!H!WqS`i
zLuz4imx28^m9oBu)w(^qOyCbg+j@lTY6GR#>jdOVyC|``^V=p%KPInrxX<r+qbfXf
zU3>1glMh|j#*4e3t{Wgx)F#h*mU&hRszo}Kr>jLq|1eaNhri`YxactLpH5w?N}frk
zgiBMcjLTBCORbjOLE0Tp(e5&%MGqFF{c1Oq@XU766_51IoO3!l^8?g50OJbk2DFf>
z6asihaN$aaU!|#mj4cHuH)lsrh^SFS+^I&xEMvC&$}UJ;tn7m1<jyycxY+pyl9Toi
zcwj%ZAVAz2WU~Z0EudA}Bg(yv<m)k14@2XC@HzbyOqs{##;~^6{zMZ!5tR}+o00Ga
zmbrl&b1u(@1`-CGw2Rvr#SF*o46-dSzLTL!0oc&85>m~1d%j3#LQ(F2Cf&<4Af)?G
z-UT|ZevWicAYwdl8WjCOaQG9P*k;B?bpRfM@8M(`3^o5soaQZ0rW!3|oH(~Ib{o%$
zVpG2bNBo&>perMqg%UFZ7m-y3l-qpu5p&2Pu61CU-`j6%z}|2JM9tSah<j@KH3l7N
z4NqPYyA1{Z@AmL3wY{Z}A_BAGhzG<1f!Vv!focV%Xzv4=A1>_w<vGZ_YV9Bo@H$KT
zT3;YrRUm#+{nlQWZM`r~npZh%pDcT~vnM=m@q!;<bn$=PiWZ~M<|9*U*CTP(-bTs%
z3Oz-aAn}(_5~X~uDJR_)2A(o2++iWNxX-aB^~F{7gABb|vq67Jo9ma=52kGF5jCkb
z3o*Z`MbB(zoWj*yKpPyTLHGm4V<1<2Sl_q280&`24{MGx!EJg`j$qT<nM5ATukbQs
z_OF*e?jLu<(7~8mB`mw=|DZsw4fMC+Rho^I04UOJo@q2ypiv~5mC~@T+~=W;-k3&v
ziTf51cW1sB+bJ9eTJ9=$q&l9w1xAuIbm$q5d^Oz|ZUIt$SKQ=*khO7$f@oi98ik0I
z-9l3I4oq=o_11VMGaNY#)p@#X=)jQ3Jxw#7?7%=vdD+zo%Rg>f0V%Sn6qDdIl&<2P
z1aSxv&wD9kjG;-8Zk29B#iw)+k*a>(pNPKx@9^PHMoJlOa)(X+G^X8)YkEIx)i&u7
zEjpVU>6~P9sv*n5#HO%*-P(Cp;xhBCB95uAi8;UGQ>|60Y_`QBPrhlylwg@f%)t#S
z6kJ}caIH<geObj?(L7Ma8hYJGP#coZW*53>y4K#Zky)XDVOx5{9Na`?lp`_{?ha?g
z9kY1-3WcF=lGt-ZJ^jA`m7mnp8Lr}<YS(D%LotDyRVgnA<`DulGWrR8ZXv8!9*h`-
zHLTZ($T*+KNVq$4BFr&w^@c!h35wjRU#skH;S1uCHT?iey7a<_Xcv4?5h2Bou8%Lc
zy3ObgtFQ4nzoM=!DX}EY1RSp0&MJ4D!RY{(E9t0K`YMKV?rmqC!U}Yru|3xWv654b
zv${;*R%S<%IPdpKr^CDk>UC|rlQ%V-Y&?@&VWK-<M9@`8W)(GEhRLj~bND7wLF=*x
z>N0x|B6XR8AU~WT{sd-T#Y^B(*?tQiRnzz2QROZ=HpMM5wO8|P8tZ$i1*@tiyCmPt
zzOcDnN92rsdi7((bCk7Bh_fxC=%`4zoW75`9P&NX{QbH?Ix>Bm>N*~6nwwmSOrp8V
zryQM-*xC~X;L+6lZhIe4xm>?;&|g@myiYWx3i>?QDpmsa)3o0LG+mZN0yL!V1o(ks
zrJg|Ow**v;jeiha#>V}59TTG;tC;*&^D9<!v&T&qrEl(#q*+wWwNsne^FA@aSD5YX
z-fuQ4%255bV02hhZ7iMZ&8f7Idn2WGUZ*C&XLr^_S<%fd_Rx{JloA*F*Ck?_(Yr?5
zLxt+(+nTR)$2sY~#`$j+HJlq!Eb!#kHSvU{Xm7UiZ0~%05u-acexkZoCdjShV`2c4
zKN@=cB<Inwkzq#B&ADQl#$C621MjAc@gK%z_BUTGa36eEUMN?PGhjK_#<ysX9D5fj
zBgI@T%+PScQ_aI+;$wjBF0=H}%sh6DTKoHUUTCnAXv=7c^*mqGlUBmPaJ12ql>wp5
zTP?Lop3idD>w4SN9577cXWR`^O7ebH{GPr0giuM3L{RpqCr#EikD=!6(qHXU8ZVaO
z^g3<XNn!n4Ebl#~=SJr6yA!*ldyEU3nWT%uxxuAkonc|FlDlaGt<^&MBE2scsuj$0
zZ!7R54aA4r_3iR<u(uzvoGWAZJ(p9Yn=<6s;q=_LSFU4_WW4g@*-?l5H^Pz5h8Zkx
zp9FGmNYkTH*LZPVwbs{^iMS-YW5GH?^(RW(O54Q5suqv->87~6&T&5MVOp6}zNdks
z`OUGp5)yy9a7LFYDxTc7X{{ld8*j?UofsAoR!Ua=IfAUzPT#4!UE=LmEpgo8b)2Sd
z=7W1i{OgJ}hcx@tNKPXx-CeDZw*6w0Iu@rkI$D0S!npUUqNx2Qv%LN#&sQv`$I&Ra
zQle9$caMFnW6MhBH}8qaFb?SrlnUv~ub-Dj$*C^5p6G35d-tnTEmsvaLYeMEiYadY
zap7D|^fvA06l_h@Ocz%>N3nlq$;o^dSk$`lCu&)D%3tR=y1zQMhzxRyZZexZi}o?R
zeo!SlxldG=w`$Hc^mMPMDjJ7bJ+s#J<~b!+ky<u4wy0D{di7hX#ko{}OyP6*y1Q77
zO>S!Wqs*M#{+f}|zR0<*M!nMG$$Aq@FVK_u%52-yoYQN~Gw=Q<n(6Mqu4~HW8u$Oj
zd8u~Xh3}9>N&AU4`sDpzin6cCe3rDUQGdyQZ1YB<@;v+BRp;*`X(jU|yMbWG^As~C
zbNsj3CAk>!FTZ7;>h&T|Flf9mSL0swx4NZDc`qN{&)1)5A`fgF9rW;a>u^nRdmfSa
zw@7|(b$7(p?tG((l`h^bbDQHe`@F|{L<(4e`(D+&zbz!D#)}ZEH0GZ$QcI@Um?@jl
z##e{2@7a+Z`$rNrf;rsn)%#M%_pj*Ycz;~6!uKwJG=-abY9goOR8m3Z6O;7U1Go4Y
z!FnnAUtLg+E9Cw?G@P5F)>7iZr!6t1jqmoYG`Vsuu+ZwE{q5C4p3(<>qxTe_BLx>1
zzvtBL%-bzdE_<zav^Y@FRVB&(6=Su9-LNFhliYio$(Z+{Z=^yJF5^SjfTxg9^uTas
zVQD}#!`-8!B$J^!QKb^bmC74_7i>qPR;vm3dGDTh(cvn&D?g7v*!;1pZ6vZ}z~*Wu
zd$-%*hi0!pwV2!EcW!-GH9<8u<PLa;kqlPuCUYvR%<G1l2KCIfX*>9qIwC2N8CGNO
z3)s;XRmMXv>gueFcW1i|vjU{MJDO;BclQU#QLjF3Q!8NeCI(ar@`szpeFAhxwzu0|
zq|4WH3oh^qxdpblRJqbFdg6UnDZSi3WBkvO@sV#P#=Y4+K8J$6xvk@2#;U5G1APV-
zI$GE4bPwo7k+!HON(Yzk=C_YC$I6NZ<+y#(q>CL-nhpua1vk;u_|1H|J|@X&sC{lU
z`9qgMX1?oCB|lfJswc;FsOLQ`K-xmHAZI6Uz?eL!&M6<Xk}8-O5LTmc>Uhy(Bfim*
zG*=hChzqNOJ<-#0!5a4FMDm20*`6#h?@`U&s$<;9l3=#lSaYRHaQS!}g<c`4maVp+
zQ6-qo%)Pt&<5)rRf}Sv`yLL~a#;wLxeRET#)Uq78&8!XV-nm@4=-m^SbB&pv7bo5q
zjY(<e7?VE+s7<u;qrE*?KEmggkOvucVXkb(=%C#0vCfzgwQ{wBi4R^LVP4Lxcr{+S
zn#$NHjc4p-KQxqHVqsT68tLh8*-+Bf;3`Gpb7Fk$DoSX(R&w|i<IPnDYP?~e=<%ML
z^myh#xk^XXMDE0gJ|7FNtSxCUUCq$Ti!7yK_r!qhJy@z4Zye$Mo@cjv;z6d-ShI?2
zd&_v6g+;X2aHTKXd-Q#s8t=nRP4A(AV7oGQLsvlmyXNw?qB@qvfL-JRj}Py9jPKI;
zMT1h@J~emNSf-}2SFv$4-^cr+i#<2-P;(d~$C)FS*})yF7Hi?zk^4Je6h-D5t9$oK
ztL-Xq^Wa*Kt91lRs;L&JFeO|$p5@*<N!^_Fb~I;Jf24HP6EbgGQ>pd9(KacgsEI`H
z{uBm}C6(t&9+wX5nD`j!O8&rSM@LT-cHMm0<?8*R(l;|@Jh`G^l`H#LL4W&J?_s`T
zN_-TVIjA$59?X~+=o0rBj;1qJJb4yO?}3rbW=eYI#E4f&rnj;LpT;h5@2zlSbq`dG
zR}PBj@8U5A?}%we?ecIK8fppfbd3`mYci1HJB>zpDf49*<cs|cC9YBB-f!E|<?^#8
z8q%dJB#$wN)uN1i)s%THu6<Fe-l};6)tPeC6kFPEzP(hQ0$(+1LTR9YG#nO9`Z(Sz
zHo+fiN@o{%4h;<kj=Z24xEADO!EaQL9vCAH)KKV5rD$)?dx{*Jls`Po^>!E<6U%qq
zK}zg1I;Qr*YrJRN$J2r<8L1k{5DS)-V|(u=sj<n<9q+@$ETWBf@ym*e3e&q%$S$1F
z++BHB$WB9@1Kx1Tt~J6b6651y!gAw2>59f)O!7cAzbp2`n2gs+{_q&1NU=jWTGj1C
zjc~_LcIE6FndJ=9K=%^Hc!aMVZG1G>j_jGWq20iO$+IW1IGJtI%<PeDSH@^x1>D<+
zOk1z^vDKWL1-t=ork$2IeSo>%n^~A%pz7(-F+SLxU^iCGD)<;_9M`2S>Bk%xieZnn
zN-Gu|)6C~p360o!#fkD?xQ>&eSB{kKuFOBIne2VznXc=dPKtq;u&kUdZTx0e2ETVx
zU-NtUGj>hkj5sxAmArYjO`}6%9b)6hipSdTJiI-@wLG8Ck+6H@tq@#U7%@ai<W_UZ
z4kLpv_$9uXwT=$tK?!zsx?M-n$oshu8hpPRs~$}D+EX&P!@HmD8cZt*V|UpFPxP|+
zm35KQ#=9ea&++Ogm6K}n;Fkt;sgF8^CtI*J3LM813x;`R-dyG}HY?XPsxh~?P?(YJ
zIh;5##)ly^*g^X6F|0gJ?mfkTYn-%;?8M>})=v1c$6J_W{=1%t0O_zLk!+e)zII-J
zu=iMB^@>(Cw;h3(eyBR6DWzUtQJ_po%FlNjt&X{%KFFTvF4P?F4or;5^d9Pm(b-Z#
z-b#8mRx<Iyj=gKQSFCN5M@N`<e?ahr<><$i-@Tyy($q0A<dEeR(OE-@cg@c`Jw}Tw
zC>xw8&>!a~lT_l2Jy|_tb!|7rZb_0|BFUEgy0OBigA1}$q#Q=enK_(Zso|KB(t#Hh
z1*EcL%&UdI#s|ZVey>n#nLAOP?!Aj98Fl(@lY2*Hdb~w+*{YR6!#=K(>T1&J69cWy
z#@@UxIpfEf`TPJQepvzkF5BZy>v%T12$qMAi4OL4UEX$bMa6`3-7VkfiHNj^4I?c&
zT4G~k1uxV()Uuq1eA11})sEAKf9JtWp_#vX?360Md4$3?q*azP+_p+Hv&K`!Qo0mv
zo0KP>)L8^eYO0Bo9Q)sm1h-gd#wk{gt?BprD}cG1=JxSJPsAZEn|A7(M)!ff!9reD
zx<yroP-b)#|6Mu1AzF>s>}yeVo$SKxsvCIE>&@HAdoq}!md6=o4OGPWGT$mxvBSgW
zsNEHn4C5Af?c|?QT0pJn2-GX^WNGHJD!e`>c%L&=FR&WDr`9G+8b0FP>B?W7*)GQ(
z8~1V?5BuRpuG(Gir=AW&K2pafw$|~a7#{rYcg#N>JV!%I8+_Rx$Lzd6_F6O!w-`rH
z40q|uN$%#2cGHs|yYaidCnkDm^Uj<;<h;eJcjuGDpK9F~&i~dS=G0JfQfBlG>qBXe
zQ}>(c9d_8Vv(F>e@E7qEmoU<EhqE_+Te3oD+3_FcV?*SY{`g+&@Tx!Ft-Cd_YKKhh
zmE#wy#{Sy-n}WuZ*GuRB(Ww@EC)<(D$kkWT_?|*HDl30=XZv=G^1Djg9nXE|bmr&f
zX+<OVPVCm&6I~&l@cs4qmp@!8S(x@pO#BZe>2%Gjk4GOzC;r}+vMub)qHo$mS+A6r
zF}*5NPl*n;1?9iakdcTq@VNA&?<<SQx{TQL?PnO*UUxk%$bY$6>f6|P-_NzYm?pLK
z(I4VrVW!z7>TmZSdpdWd=2zaoX&0$Zv4=&vnG*NEFR<RNV*T58sh=29gBEF9L^Sqo
zP+lDKe&?2NzIr;gN-}%3meKXBUux1Al%Jos+O|Ep=xP5`>vEX|Z<M!g3@0sIN)9j>
zyU$-F6maz7{jFIWidWivwe#5PZ#K_)YI#)e_haX`NJ=)ZSZ}gA(J!*lNcVl4(BcIP
zivF^eEK8`F|D)sK+}6L=AGZ{J{I%9qmG>t-R=O<tZTFok)O+%Ok>!80eVG26;-62p
zJX_N1b^k`G-C?IEA;LPH&lYa;+*M3(HCt8kx1W{B>+^~~+5Vi&y|XGc)3(X~Y2D3N
z&L?;LXHNbTC;5&4+VU;!`46?7C$2lmM|@LvNFu}asFl^D%*=<@W9Bg*?(%DQr+0Zf
zEb6=b_isfUwO={6QjU)@9ZpZ!{q6fGeUy2$--K^>bcblLx#?fA?;RFx@W~?Qo_IWd
z{N}B1^mLxb#$5kaJMvDb>Cde+Cu?0b&Yhj-BQ~C;*nF2|6!5&K{B84T1?^?mj@b&A
z6(77mv~XGW;y)HCG-$XpPdU7Pwp{7e;w3(*_fCiJ^V@%_@vzgvr7OPvA;Y#Whick#
zE9zE$P?_QC(~E~ftC}~2{B~2jViWf(@<4qMCF0V#oxTHo&sNE-Z}yOUz2?^MCtnEN
zyq$dDDQkJ~m8#S&vTXC?mA}`%vkl>$$e+L7=jXSFH%myIiF_05bdND~E9y){%H6%S
zOD_5C5TQ0M+5L8T&&QGeH`La@n9G-MNi-RI`sZ<_2gL`Le*N7%;dkoxb7C)a_4Re;
zxTOE}=iz5J{ng*6)_#Bf{$GJZ-}wCejak(tgR07lf9#B7)iW%$OzCmgI~6mm7jQUE
z-+Gctf8buZxK6+6O!3WqKbQ1;`2Or%&hyiDHLHcpZr1*GyrG3pR$%>W{qZ#O(<?$^
zlvjT3y1?|Su|LfD%0FGM2s5H4{jwnF!pcRZf3tN8{t_ONeEnC${VeLTmr;wnc1CPb
zKXG8&Zes;X;N>MZEZb#Q%_+J)-g;(7_|Ijh*Vc&ynE7t?t3KBB<XX|wuWyE~TC8q=
zDI{;#!r_&RUFK#uJS)C`qwo9*cEdM^8v_-$|E2o%X@!mY|FV8_E-%#VkG_v<^8PsX
z?d|^B!<*-Ac_;NFmwlyDsOt4|U)Oa*zu)~&_D^GfJQ$Vnf82QBL2;MJVy}jeS9-tS
ze{}!(IcG*o6(0|}INj2AR#>}z)gNoc794F1e_nrnfA_zW-`?}`JNWIcZ94pfg(u~|
zsb&WXjU;&9XmijqkdM1^cyIVGwO^f&3ilJTxoW<Bp|~QwMn}$AIP#{8<LHfL*SGF{
z_c3PupW7$o?+xn2FIRk!y#1a($9(UKMUU?Wt*SouuiwX)YnV0v8vXU?*NmTp)~<@%
zV!r*Lg6PjLJl@{w|Nj6@K(fEInJ2b-Hh$Ud&?6?h)KzT>Vg$QwJ9JIdqRUZDrMZvZ
zzJYR_X_s$!GrL>+%0bh$daFc2m4p5)q@D;4)Y|>q>b3_!hp#gmDUa=<s}q?D=D{Hn
z>p-wbG#gEips<po3dk{v1ZvSZjKgpkngduJ$0<bDBg!XP2l*}+&%)fIKbg*&FgDnS
zDB8{9L6ynlULHTLTM$JFikb%J=-cY+8dhwtGAmQ~#j^1n4}6@l@B`(p?dS{w^)V$D
zK~3&R^7}o8#wzpL(B|s`=31Wb4~DbhxW7D+bQ@@wR%DjKl+^<hO+-}gP^`fu%!s`6
zEl-!rxvQ4Sn!$>q48W8%kp#j5vQkPQ#(_Z~aK^95Sfv<9h6Jiim93bwV^RDmC^__$
zf#I~Cdg)b51Azc)47`znVik7}uvu}{1ye1(p>8S8-%%J58kN2Y`g|r89*FuYQ-LOv
z@Wf#ZMXg~M_6Uj;xKcQ1iPAu3iGh)nKUGzr3>1c-dJsSg)^T+(m*^M>g@LTu)7~r5
zGY_TAWv&RY0+NP;-=JpemYRF^n3lhs{8ptPJb-P+=xNl|=zw8OkeU`zG)kZ!La!u*
zKo&+IQrk%C>j#x%C;l4e`7lhBGEs|f)Jlddp$8LLJ-#ASUPwX3kKAPtnS1|`_=Hwf
zky+>DOw~$68=j+!?m9xU1KTzZ{e$A6F@(23E5o=3Sulyivw~7A@!C#CtWQW0RbG*)
zrHXUS3iHymB}#H8RVI*jI9F(oq+$H<4+H!2P)zm%XCw!@&Nyo_>fW1Rz?Yp~%Q~Pz
z^6bS0(hkPTo(`s|e>z|atzC3DvV-Y_KmmuN2qFY-hM@!CMli(m(_fOYz|C0_EW4th
z1MLFs7`*l}_rl~EwVCALASz`<!gMBUs<N9n7u;<rKoHZF8?h^icHkJPWwqX8Cg~S~
zHLMwGI0HFU3r$a-smf8ZKQ}lC`C+BPu>^{{fm}L`dXL2GAIU+{`=R#fXfy&s8H5J%
zHjkKU;l6M&TaA}k$H54ihtyZ`6vbK)mn;50Ym~&{l~&1xAoLiHssDfg?Sf8~h+;2?
znM!;&E}+tg-dH%(8wu|=hr^?2>j6VmDwiIDXcW4dND=w!Qbn$uC(|_U+9qy7s)$sy
zIy{sF<s_1XL50_cNy_rf3}-^-vSsX9>oEgs0(CwfjixwF<w{%*662rPmLN+5$P!%|
ziHN#1n5*J%4)wHc?&^FC6$$GHFesL)>&4iEO&&|+q9n7ewc0^M3c$G0z&U6{kW`Su
zj0!^XN+*E6^2;b{pnqZS(8oT3sv?8xc620_-R(eQBV;k8Yh9&?TQ=DwDA~iH1(uQ(
zC66beOW?MSBU2)~Pi^Ih5hKCaymns-=qCpQ#Z#CB`hG<`0womK?1jU@)#r;O1wt$u
zPWGwA)OLrxtc+&d6iPz^+mnpla2*Fr80_fk>#>TSz%VBc%1s~SeLTVsMkt}^-}KYI
z6mAho^zU)E?I|pe=p*Yq`bEI%h1&L&rgT~@io|LxlWcz_alNC&vXqhstXU0|>O~k;
zi!4K!8-rcJo|eu=b9QVwF<_vq&vf7bMT<$=)Y|N5lYq1yL)*X-PTTn?fJ=pkh9ulT
zGPyGbywba|gThhhv!PpWM_a(2j@Lx0&7L3`_k=1r1c5w09fJyZOK-5Js}uVsj+8NI
zOG>=7sz`ELHc7y26$f(?>e!cHgu<ib;dBHzqkngi^e`MGN24$vpk$_5RC)x5&h+jY
zu303M6}#FIxwx{0sii6{4?CbHfeRfl&;m4)sSe?W6pkSa$40^-(D+1A-iA|q)H9M9
z+&v$_p*Jn8jc|d5J3~*f7wAgU|BR+K;7U+9k<4XjMGDuK<IszuQ4z{(Pu_gD`PyrH
zgMGcF@SV_YOw(Feb5U?Kt*b!A!F8V8Nb5)hc}1u+*x)LSppBIi>1G+1Y&50KmW@y5
z(k)YJ`7kzbV)S)v3eHLjCWcTFkrm+=WrmH6>=aI=bBRKj-dz6(DvFW9sMg_~sVpWZ
z6%*=+>qAh<OpX+i!CaiJ{Zh%8zMMx*kxcYKB5r)pfwGK4uyq!umS$#GEDOoe;3DPV
z9E9zRjo1S-AReOK>6k1J`cW%{c}fyg-QA**3aGcWFSN0%!%QaPyJw}LZ6VD}^@UOF
ztf|J8x30dv>RciQ>@U{UavAE)W+aWbgJf)LctT3?ie`0YcP1MhF<n!~WBnO>u8C}R
zV>+CGw2-Aa1V3|eT<P9z`s=HgH#C`b>tJkzxPU!NV6LvLeWel+SSq>(JDeMe_H}OP
z?gj8oJ$>Y-2Uv*5P5*Tpyk}KphP=FHY&MEwjwvN<i2J%p;&gYk@hk#DOn)6JP9P<R
zu`NM*LLe&p0ej;Ua2F--(vmOD;66YsL942E2F~p2kSu7iFA4%1`#~1YWSeu@64HST
zI}>AY;fky@SfN={9N5UrrIZpzA_du|a8zw({h$#w-*i??)1>A$NXx`#voIiWBu8x}
z6Q$Ks=%CgVEMrp7#vQ(-y$EWmB^nRX<r0w^ZT;7pBUU<V=4DuAx76bGOdS``{t<jQ
zf?GF!dw4)9NIoH_g={8;5mHpI7t`M+p7+I?<P0drv!57fpB5<0dLd}pGHDWGRVT*w
zSXKcjcd0cl6Zr<zLYzoYy`J<U)FMR%L2b2kI|QL5qS(vU6|O5Z*`gI*F?a#@R=O=R
zc&FmQ$V#OzIj}k`%E$WgxEr&g4p*F43pKlvcoh*hQrFj$zjVAxW?Ds9RT(n=FXU8e
zRGez1{<!&6U!Xw?H5Ngm`ihjb8Pfs-Q^vU+SR%)wtyXvz5lbC}nh^$nnCD8fI+(h&
zVCe$d;#XZYFE&%b!gwf|^8~OejhV$#s~Jv2Ou1jA7Qcu=D?LbX3PSi)ncB*ma<rz`
zHg1=^U1cWh>|I5R8ueA)z2e2^e@l7B*}wlv9+CI#F}-BDSVfT)24<jD7tvt;1Y}kp
zuo##r6VTRVIHBv3jJ2eo7G;oMSh|&ql^C`D!K*NYZ(E|ee!Ho4B@AuSqL(e(&L$9?
z>gCv@sib<BhQxeCG!n8IOJbL%c`q54SUNDna_6@7<dVAbXb3wqH}-n&vzcfd<}+37
z;T-9dBj=-+N%chPP#y&@r6Xb~POB?Adz?ZF-EJU}mG&=s=X+W7IvR7ZfXmj&`qKif
zXBR|>m+hzo71233mI;LuyXo9>XdoVjO0xn*b7_?8;Vka(X3!vjU#P};QGXOCNH}dI
z-UL#Hh=EHOl0>0VnR0HSs5J_yMP<{usCyZP!x`S45M`znsjdx;)ZfDKp(I;l;Q|=0
zzvCfWG9JWkf=8{O__<hSn32)L*>WLFo^M@>D_%=!Fr%MUl+#ii*>KL;0CP@SuYyS%
zOsK;Vm7a#UbY?U`;~aK0TvPC4NPrc+{$w)lMtEoMQ)I0&3t5`FpuWOW$vQ4@x-B%|
zS7?CU70d84Pe2TOH*$B2MaIPT6D}L_FFehb5rSFu*pYB%r>(!<$hzQ-+$$Yi3E|6@
zRdMrjA2o4LAJ3;$nnAQCQpfCtq7Ip+={l1Lndv3TvLzig{jJ?w0>oX^ay5*iV~~U%
zZCPRa(@JB1EK8+Ka%BBd82Yd=Syd6{OVE)R1s?54kXnNXL|RBe;#)EtFQ4j6hiPkt
z<e-vQuZQm1F@|gDNKB9rA{Yq@f#8S%m5#3vZ6P?`@2A4zz|30+qPJoX;BgtS;k^cW
zE);=?wTOfmR0*9Gj5czcAgGDAw5%r8H;f#xSTIB*3hXEmZEYYTGIOxx3t9W0$&CSx
zo6n$yqDd4a6Gix;?KbnWd3O+68aK6&|Fl#<UY}U@k#J}Qmk}qd2=;{+k+=@mceq4&
zVMH{Yu^qi{AZA|PNK3)ERcD%88@i*L!YN<103g%e1jyvAeacF&7NE$eF`Uv;B8OWk
zm%FCq6feAP*1DhMGsu=YTnf7%bnpzXNBX%I^m{2JS{iU&qJkEo{S6$ng2G4$>qCKq
z>EuY^@N%ZdE41^a4QDdOOhpH*sk<oMA4akllq!)4p-qW32_o5lT0&e1J-`@_L}(QA
zFA?agG+D&b%xT*biLg+`c_>t-T-#U_IL;pW`D6IZcK*lZqMcJd3RL|3PyOnK6)PR*
zfAAr|v*&+aichVO#MtE3@z~C&<JflDALyu$%Sm&W^Es(q5pg<)O3BPR93699`a~lO
z*Go;Um8u$xqFTuW$YgiNqc6_*LTACf3it<{HZ%b_2T5sIZ#IZEw1PRr*@?p$ojAy&
zx<ZLHD{@R~bfK1J#Z`5y#;Q<XgP&t~Hk&3CK0O<I5Tq6XetB0wz%dtaBw&3paHeM_
zWD|WA-|2HY8i`ICP;x6k6>+B!k;S~R$}Av?MiSGL8c^}8_?n?{hxh<Qp$M(kXkvo{
zGRPz`NHrRZ<ihcnLM_$AhE=dr7Z<v@N@axB<NUTT4`HETG<iYY_XW=*e2$ZAX9nY`
zo?WxjlzMp0N>SwWnpTvOkFQy2PT~BT?^F)3`A+u)8zj_<SIQYSnWES!He0;wF*cw1
zLe8-jl)>{P8%y$H;tE%Eg|LF&BUh;{VFmHqDk9q(&5G)p`sEqCyR9NZ@<|xa7VHCE
zgNUeunT@8Pd=-ZgD+a8AdF?tJnXTNaQ4X5bUG%q?{sPI!iG#zV1z{o<6<Bpnv>;Tw
z2q>!^EEqv~5}72}Y0R@Sz`>2IP8~&*^&uc-on25EN6IcOr47uEfI9LLvu<onU7Z6%
zCR1*ScPpX_nq<=k_zPn@^J@j)dqN1gk8yM9rwl2Q&iQE+0`l;ZymRot-8*?ifkzJT
z55=P{LxtKmDsHu)1_`mWC9XN8$XH^uQ5kw7=aWUNx@ekUfO#90O`_<n5-S0S>r4Ph
zT`pwO1x{t!@n>xq5>@otXDS?kh^A)IdyYu5K>4Z^K2fT|!3#S~;ozk<gWN|hE~~8s
zxR`HN(%*IZihuv^P%Pr-8(M5TL+V@?r|~@YGzbcUZMU8qF6OxHMvS7SegSzVnM)5u
z8^wSF`1)A}R^!zN*@0o;RflZOfRYG<NdIouT@lD+^FW!9?Ak;^GH3}8WMiN#XV6YF
zlOY_Cbyzoi!=!CFKC2(k&~^)G>2$Gx$|i*B6`Jf(kT~@d$7`>`gg?fElhAAc3@^$}
zrn96v3(4GzyEghoOpUVCxLs53uzIjIjM$JF7Z|z5HzxZjmO2ui+#xjDr$0y64Y7%!
z7|G?%l2yX!Sabkqy+f)LsOv*9fTyEj7~R8>NDPYt;~dNv5(80z5s!5^)8hIvt&BCw
zJ9lvULE}FaHHUk)xWS7J9f?Jd5~L7dA~7N%w~3CRohK=_$63qO?xC$rvn2(8uf+va
z`n1sDY}9JH%!s*(l?rYHaUN4h%9e_iIkRF-Jf7pWZmg)TufwB-rdhEf0=26)D^}%9
zYfnv0QS4oY!;}tg(h(E<6R?>OFc@~L>h+4Z_{>LmgwIrSB(wu9EmH3mzvzs$BXc#s
zs6v9+6*^&YE{;Q~eBDqBwF^+7wr1&JA|(@*ugT(JQIP)yQXfg{P5cV}k1%wDNJZw4
znfQRpp_yPJ1Vz=C`h!1_N+kkFZ9NTZHa>e6wDTZzW;?hT-3i{_n7G-lAy?I~6pR&h
zpscC>Tvd+xnxMFs4Tg3+NgqKF4|f<wLO5z`-S&|k7+jND45*#sa@jFJ4wXwT(hAb`
zkPJcKAsB)X(;5@}X_#;TpQ4-vGWgZ`7*wC};EXs_36#e(Gn^5h8fj#aCw#K4!~|@M
zf<`e$nd0Q0?4)_J!_%**UQo60BKr$;H`cw$%CG`DP8FZz;h${QfeQyz643sr%alp_
zndQPA66*`Htel(*gUb*J9Hio;FrubXR&wVm3WWu=&k?xun%wEs<0Ez$Cv%!^+^L|4
zN-2n_b>V!aGd5zsV9qo*(5TzX#}FXrbtX$8(=J9xG-`7sK51;qgOme^-M@s>;BOed
zM^XVtsxH;L#kif}MImtpC=t!>N~U+pV?eCYmpSAS&fc`n$go<a^raPN$HUSjGS+hh
zYCo_L&}+VC)A49MTC6)a${9$Z@YE(-r+_DcR}kdnBMANknFOkdkgiUwHyf;3liP~4
z8y%FHfrZ5}?<*)`qht*zS~eNtdtXGs_yA={iYg3sOseFqjE)6-JfcTdb2j}@(<2=d
ztl8J%teKQaw_sbkx|tS_^4P3#5oeT4k!)6crdnvCUaZZpU{=*gNg~l9Jhld90Uk`{
z<kF(G*!RLNspUG9a7pc%F#l7=<U%F2O!+i*Rq{p`Sr^j`$3BCxT}#RKr@WIaPL(U>
z5_*+cpjv7Am8*g!(4I-N#M7`#WTW4rW{1K$-agTG^h9whXRHbc{IWAq)aNX?kg!hN
z-K!c@i4av=>)ujbc8C>tG2Nw+nMW8(CJ@b%nw$L(X^vW3shyj|X(CFCUBl5tbd;{5
z;E5<qA?e(vF3i-{(v>g3i2M|I$#${y%ZsPZnvuMSpIPsJ)!(K#%{*qg|G$3K3cmk!
z`KpyGS0G$N{p#f_&f0&x6rWUhU?-l_E!SJUyeySVr$E8;m9x@SaPxwuYn7MD8{x|r
z_e4iy__NWhuUTa-#yeV1VFND%8WiEV7N<Amr<69C;-qxGqg+ny%rH#vdWVy{+>TQK
z6y3KCVo4<@UDQgRkjX{3&n1cd3h#0=iD}R#llK@vuG75+Fhkqi(-!P0FT|j0LpT{t
z%Uo>{dful;*6wf`W|CQGt<$G1l&=MfXC0ZeR9^dHLU|d^FKMF5>BI4|NOUlqi)YuB
z*Oiyit-RUvI=Xk3j?<KviCy+}<()M77YZ_J*PMsF0VxxWhFG{BkFwcM&z)M?XjCn!
zaL!y*>BM}g+#Hp^)h<gp?S05%;H~iL_Xw0HmWoHs7jXcbP5!IbJzhW_#pQoPecj3x
zO8&20v6}DyJ}duUk`GL@&1SysO}$Nodne8P=~#5o>>G}XJM8!pFu5KlRaZ<Qn3wMj
zr(H{YZzP(g#h(7%=Eklq=0^3Vl^iS@Y8o19>Ir%{n@u&=*6!N1t7a&Xt4XGZYBR~f
z>@JuX)~c6F=y;IXw%P33+-Hs?Be6kY=a1#WD>ZRD2Kps$o0)j+jkvLn>l4mphoSKH
z!a(UdlhE)tCCv@dY&MpNk$w7AvB3n$1G9BgTW3dCFx0MYlD0zd>gbL^a~EB`p)T#N
zG1-i95R<pJTptuygCYvewh^y0CEG^v(wTNMc{UQm8^>|i0g25QsdAYRXDBp~isv%;
zm9NZ{x0b^sg;8NL4aBuJ-7P)6LA>9)^}3dg!7}0iZf$JYR602X!vvdzXm@D4r*`TM
zR)t*iln+*V<SBW>J6nOkP1TTDYq;M$j29Pr@GCDoWU|RLZ%24Pl~Z<enQ6OKcsLrz
zy^wgS3Fuw9R%X{tx)O$ywIRNp!amRg2GTLgl^Mj2jz^YgBs42toUb9H(9<6AxH3z_
z?1p2JNHifUS9NMd5R~`KT(-=tNNI^qz24H~(@}XTVrbIAgD>bd0k`5Y#=^pRVk}lX
z>v<NJ;<3^;Yk?StM}ZheQ3@44`{F2zw*pxLwd~kkN?8PZv0@jzo#R44+tS{wMYUn{
zAg2wZSMhX_+qt=GBmF5et(^|Vnb@!G<(^%cwJK7ls=>?iB8M}Jm;{+ANW{4D>A_P*
zj)`KZNwU2gJy?X>FS<1J>TWKrQ9_O{ta;Q+OS9M1aUI5N?Q@1P+Z%|Hm)1fK?5Ep8
zoMmZE<jAr}2d|W7SA~>v%vw~L_`Nxmu8;_hgr`(vwQV7WAj)DSmax~^k;BrA2_W=b
zNOFLTdj|ORK`(sav03qn=SQ6Q93v>9D{K=d1-u%-SwPYTm_Bxrf?5MQO_~wy3TK#z
zu0*q|7R8WB$m$ZXN^PMSKB+Mj$HW7BF+6;eNpUPZ8W+RDgWW7;u@9R?3uObH1+Q(4
zD~8oJ(9KS_S;;ngxn@DaI}Ee(g7q4nH!sk3u>(`txY=aW*KxLK+1TNPQmj|;vs4-D
zc`BHW<`M%;DNA>^za<%>^cm9LX^jPnk&Z>8_TITmlo5{_2>|(E`fg}vym5H9vfz_5
z2RSI<9;ecdK<H#+;W%EI?3IHBLbDPvaaOmLyS~$@@~YSJqPsmpXigP_>5FH6d<95$
zIL!Oz5?<AVE_R36*Juo{k}Z-`c%(lzlmi(yOIA4WsOgm;p0z#(%I2w8TPHDZX3;%H
z8TXP1JT&NMhLgE?MA{9A>#WQXygz-2moma;e^^q(S$!vS(nOV)#l`bA^2nH+zFIHb
z>H~vI<w62alM{cAO7=pr6BZk8Y<$EOr<Zs~!aJoYFA00Glv#QPm}=Yh(sGdFiY+U@
zHI21TTWNwRX0?G+D3NWP!=gzGl0N6;-&&<=Ciy@Q=Q{3qa$7C1rx^8IRj|VeW~P&g
zirsh7vV`C|Z70Rt6$2{v%S8!caKiVu;hs5e8t>+dO?RO*8l>l7x;A%lwBqnAUJOUG
zVzMfsUTl+$X2FZWg241}a8;Qy)_3cemFj0;Rx+D@r3S>Z!yd97A3p*CO9zJne>4rl
znADJQI>U&DM;jPUVYnFCtu0bn$n1l|X^#hIhX2fFt95Cq)G<gy2lofG8<^bqwc!jg
zY?VV3sB(<kRW4)enPKujHoj)n$)m%{O#n@lz9?b}dE*SWd@&^v-;+XDYwccnq+C6N
zBL-w!H{|E+@N9|b<Md1et=a=XG(1wHON(5V-@xS7fmqQrc%@lxBsFW3+Orm^Uc?4K
z^nTug5M@7|!QqrBY5MV6>;P@+)Nrp8n>=!BhhF>?C!TmKg`?HjHfY~}QsvY;A*P@$
z4nDmUk6JB}f#R0l0t))YX#p%+2=nmkDv&Jw>9htf=u73#vO9J53t<=>Px13cO@-<0
zs$FS1-7NXp^f=Fqg}j{wmw=L3B$^H-?lVejh1p3wf=7tmg@YwY>1r?HgcvTF@O;5~
ztimom?Lyc#o_iOal7<k{e~JRDMV!9Qn33JU9I*j9rZ#(>=q#K?6tcSpy2P>>Nta?Z
zmUqfwXOss1HiRBkw7t<)d<IX4K3#~!`4wwl9;GsP-;7)TP(@fOtBMHxbmoz5we{-S
zCby(R(XAt42^J_}Brc5!5XAJC+Ob8w+oEB`03uWDi=qjI#4Sn%syrT*j3lEO@kXJ#
zBG_IJV2)^fu*4c$SUhWt2LVd2v8-|{YHArvpT(Trby~FsQCf*f1=Kq<%)vuyxS$`7
z**TR#s#A2~oqkf=lyiY73x2)pcu9U$k<!Rg<yenaMMiqLSr3eRn+#YH%H9Ek|IoH7
zXFq5DGpqfVxQFY7<S~o=@9Kt?%a#4ts#Or)P`7Gz!&&>Um*T_szx9aQ-qiX!jp1V9
zR2-SIAFtByYU86w$U*k|+AyS+-!;{5Y12-qT8GZeYBNK(1MtB%aivV*Rc+3@IyDS8
zTbGpK#Z^so{9<Qxw{?dP-^4&C>l0Zz861j5#&FNVtgbglqG1>=_}$A`q*lze%lWja
z!c2Q2jN7?WET0}i-65b-;nM|lK|UuUAdxe_Qfc}eS<s!l49)la%3Hl*GNY>Nv+4mL
zA-Xe$4o_PIQB!t#VwNDq$>|ax@!)kyfP9p?$ph5%Z`zOvXyp}Uoei^g#Jd<6mvS~J
zS;X1PXuEV(W3NiQ&X?*a7!~i0%M9R9iHr4V@q};!w<~e`0ES($c<qHRg%_{-Jz^{Q
z9#-qRe^PR1#fXoB<WgQpi{Yfpod5$JAT5T#N<x|(B%v23J<FsiZ1M~(U8`;b<Odh<
z{SrsN_deBG<k}qZ>WRB3$*2c><P#@+`DAh07Ad;>NlmTV?(h1VwDK(<0he#wvn3nV
zhk!t>Ohbd<)t6e$tXF2_d)ncCH0nww6e4l?OhNrvJ@j~L*fE#-gT1|Me|lMze6|;u
z3;FDi{_!*0_SXx41I?8}f7CB-l|Z1<AKr3j{QpxAJ!xI-OuYBhbY@>E5m#$!vfq->
zB<!iieD4|GbKB^Ax`#|&o9U7zjrIPQK2suE^US<fQYbu5^WuJSe?rOkqk7+tS~z5v
zdUyXUHT+|H6-w*2XXJ6H!nP)Xjn#){PGsWrl)K~NQQE-aWk;p~L^T#b@ztIlwjH*s
z7iBT&_Qj^N%TF)Hr?mV3N%Wuk#sA_n%l-e$SFLJr-2Y#{x~}f*`p--8;q{+Qu@SQ6
z$A&kn!;bZxQ@xAdY!7yJ2YY(8K7IzSu_lpZwk8v!Q4pxj`n?(`cZQR>bfy+GpB!$0
zh|Le{0#n=4xVq^mJQVedx0>~f6mP~g08WMp017WI+E%}6yX_+U7~Km+2>krcVzrLO
ztTk8BE%}**xTQoOs#2F~upjTM^qJ)qk>QHEhR8^H6``(ORmVTf3c#biuOd=46<j|1
z@_hK9@CI3{jG`ES(B}_M?-c{jyS`q6YwMlYf)&lOZmgni#n=nXQ@=!6dzC=F3!oko
zkavw0-i)j-Ap9@A8CfnyoV^+O1>KB%_TJmG_uihp_x9|)w=~$7bnmT~WcWXNA#O|R
z)b7LmW4{gekM4e3+lyQFag8pN@6zvqB;1?%)GX))jpELEU{LIu*cQ~xkwS4OacTXL
zg1V{b1@B|L$an49E3IY9SR2h$UwDXEZXlW58H=KlS!H(h^$n%NiCQsJfcRPj8O{ww
zyI~^KdlQ6gDkuk{(h9){^=o#s*^PkJe(`Qs*1;W^Nnhs%5DV+qoAq@CG&_UW2G{S|
z<2sw@wl8m==DoqQU^(4egA3<zr+I0xy?kfn%HXgz(QschEn%J^XS(*AdBZRCW?r@7
z^s>2_*X;_GqSx|T_svTzeaURtb$z-~T-{n$g5FRUkk`mcv|@>eHoCM6fVbe#CA3O9
z+KmJFP!#^MX4%T}>&3YE_f)Qc^__tknp9xWj(Ha-kf~jWWGI?V?aIX>*2t0LW&7gu
zB&BQDH=v@;^Bs_?D<k|}joNng%8Yo&N{uNcs?%<oIF|Ps6aIo&-o-|=pvd_Xxp+L_
z9I-4N+9dT=ytO0{*_Q3xYRGWq^xzF@ytqv_65h>^*!1H{=J3u`wj7nT@?GH={L*!0
z`1A6+{I>K9m1zgtb|Z3&rp#*hDJ!*5wytaAraZ1$E=2lbT(f*S_iJBC71QrSrhs+p
zOi3D_eVyj)Wyq!O|Im2*BJKY))UQ~@_P;Avty~2kzW=xW?EcS7@p0S#Ntm_pC+WEh
z<YvgLndy<{rTeoae2J6rTt+U-Hbo+{0*hWfsczHj<JbuH$td&{p=ePky?Kd$*uryb
zAIkWx4J6;u`T_cWEECG5I9l0az(Ca2#pA)2ZXOMsm|@}*_2C~*Dy)96Y$FieXl^j~
zm>s6w+E+98<%iMGE&yk2lreoI{2B!JvF%1Il4-Ip?675#Oi0G5;@P-XRM_QVT(%PS
zTkntt$}E#&eEQOtAP`;$YJYR1$w_Ky;V79<PaXSwJRnjmuKLn2+A}H>;7LC=eAU-+
zZHZuxPL=46t3&{g<}29f30*bx*@~EwHqW5c=~g?O5ipK;E;Xa*CrBZd6bA%F${g0z
zDcUc?TUiMCQ14Bs@u%pb4Ln)zUvV8WK*Oeh&sUg`7Y#6F%V@mOzFB|(peK)lP!IP5
zmq;=s2AR;U@C%b<Sv4kT^>&?rW^GthoPIaj;5nX3HqJYsba|hL?J-hFyR$5y?Njgx
zqZF><CzL%LO=~K{Ri^Ikrzr?47(Tz>t82paBfuIO^ht9)RHhhgzXfaCxWt<n)5#HG
zLLSNt<FtdVY(`SKEWMA*flxmN(sy6fW~?hBzlx?t!Z^*=MJzc|C~1L?w^Nv(Q@G?r
zuaw>_rc)A?Ll?sr(dDhBZ5`K#dNyzAY71@Y_9v<$0n_jICjy|f_#?PWT#uW>etMxn
zB(|mS+q1?q?^am@RaR{RO@%XGmcJt&5?#%DeZ}_HaQn>Xy>liv#(Z^<)s1MlD|Ym9
zFzaF%pRXR`20_WEIX$px74s<{V^Ex0od_^^LoskFgSC6j3=e6Ofr8n2;&f)`&UtuL
z*Z^oLxFPUwll_E%jnBnkln(E1@;FD}Lo0o@!^dBEE=4yg;KguSab2emLnIS~Z9bHk
zO$PYhGjRr<^llExrF35`fzlTPtTZIYH5lS3>4_wFC916TgDb5vxz9Wl%{-U*prvUW
zh$rXmZ-yZSq@799SnLvw#>qvQX^Iu02v6|I#+4@-g-ctrqZXJMu_s=>JJ=`M#%HbD
zx!L<$2L3*)!GW9uq#gJzdMQf}jWKA61IeZ*wf0P->9oJT%2b=C>Hw|<j;s6SRT`^M
zNpKHNbyN8uHy+hR`^fOXPVO~TFofex+00`75WXv%j<6a5{fUN7n#d8qufr)(_4+{}
zyP0iqil?zDNV6vL^Is7ODWYRg|5$Z3&+RO;ndnFe$9kMuX?2QiuMy*ePhpVbg%gmp
zJqblxUjYbddkTi^Zjs6xx_Uvt7J`$OC!tB(D*#b?gwNJ7RWMO3;E|3<RhW+ZvqwVg
z*M)g;f0t<rX-Gqd&sBD$adFoxFQM?+Q%c>-o)}ZJ<if^QXO5Iyv$dXjwp%3;yiQ!<
zEWOl&$ry)Kb-qSJEG)`1Uv>(!j$|c~dz^fonp+a3tLmv(TjfOV84+a_JPHwI%1-dw
zx~N`f-a5tuf9uWadQo$)szVO-O)dZ!=_7hGN}6LJ94-<meX~|bIMMU3+dzAN%%Nzc
zj=Fdv##gXlq@WbXY}!zSzNVJ9NL6Xp*EOsF-NK`%5Ubb;A<q?R>(CAeid}COq@V?y
zVtlJo@T2sVrVS;NRE=3S^qfHMOL*p?Q$QuN@XngnrK55KnzI#47@XBDS)v=AooYd^
zQl&hVbUGnrTWH%u?tR3quE-~ah<x^5a|Vj0x`Y^MAgTN8z)Mc^Y`~=~FtKxw;tB>l
z_?^oN3}25Ax5-kiH9e{tn1v4d0P50pJUdn?T+W)JJxM7XF4B2%cp;AzKwGN^C9wpH
z?oztED|;4zoeswoVF(tPo{S`08n5PH*}Zk^R?@*D(f-`f5RP~l!qQkOi`Hx#d1?8I
z706C?+?wh!<k58%5m0mSFBE+p?%(6MKtFhFAb?;7oZ1RJzMD!=B$03(@ApIb;1}EJ
z_(0X{3HCO+L2?dim&mK0RUR!^<s4Bw&;Xg8LyS(}LguOx4OwMt=qd-lXntNE6X={G
zrOHM0IKsrr1SStu(NSE#n=}yP=32MJ?rMR9Up>2&Yp-CjTj|=LJya<7A)YiI&<?8C
zgVG>3&Fl&eW#uW;%sqS55{n~EsYAWeLY#7&!z&$l1GOGSaSpkv&#yD<i^xqq+A)2z
z*L1zJATm+OE!N};@68FGjcH$oD(F;ZrL$Mz#;doiqO5F^xoDk=kiR6Y!I@;dOv}Hp
z1yXiiveUlQA*9JeqIgh#+^N2u0#7(j5S&UGEk(}8sC2JJL3c;awe_$9FeTKAD+aSQ
z(5xDQ3lWQ{k)|un$1SPqDjrb#@!6+=)=6!UUuvEwb|r)`I|T)W=B6H{(11LtWfL-m
z*{<_Y(5zWlK}YtQ+lcx*89<CTMKd|`q67O1HC}D4rO@!g%$2+_)3*7|T>&}$l<S(y
zmZ=T?;`FqZDhpe~QmJ|A6Orm1%Q3W2%2DHGGBB6IJ$jeLCEAYn;pKub*vI%PAGgyB
z^#QtHC?St;<IG#GwE5nKcgbiK(^vIe7Z0nnO&q(`h0O9Av(9YP(6Gsc%`fdOY%f}c
zM|jPLnj}QGx^LSKcyFdX(kk8|9VeS6IuwU%727tn^mcTHdi#1h#G5{tqP*O)$#5YY
z(W9+PTcV_s^TWwq;@N8~R~8y0>e2FQsf3!<I$g(?94H$N1b||rrHjx{O*c!I!c~?6
z3teS3h2~N#GG;{snn~5y)Z@>b^<AF34#L;utnZ4PFlH=Rx@KH8MX;>^Y!$0>)>lU+
z#ir{DD%;x0V+@6Br`r`kL5sVzmmL}xM;%ZYB!FBdfG!c%#GuH^oJ*WqSn`Uqrtcmz
zHP2Z@p)|u#W+LwCEDmsQ#1ukUZM{^heU~N)M;j_$aD+s9czZ_rP&zef_K&S%8x&*Z
z{u<ir+Za@u?pLhIRIGOXC+mJ2M8xr5=T8M#fTsY}YHw7`oe~mR4h0BP0e|IE^fwbl
z2dj)Okqmzzd8o&_v2dMaVH;_!ov*4gn>er1=<2!I#XC9T9xJhoO?IQ?w}db4w*K$B
zhoIE=e^1x;3qQr}zgDeUxvI{2{=cs7?EAkj$p`Jf%xm%fb9}<q{oeF~H+!}F)#=Ft
zx<We%TJ|ow+KX+k8ySvKl!2#@t_&ZI#*-;DTuKgVIiMyI!%cm@je895&&AvS=~~^P
zB&+M7$Ll!dV={OddPW$Ti5G@ax&C-;z>FkE!m$JvWLFg5q=~ZjpW2;{4Gjwe5VC)Y
zWoydNy6VM!)7KsZJ<l122Oa1R1q-iu{1~n7w9MpF%@A7RQ$whhv5|&+(_Edx)eIO=
zaVzO{Aw;(Ye>9r9xG=YE?(XZ@+||3irlzJ8WZE-$QgJ-Rr+!dFEu&*=)x%Y0H9ucx
z`?x&ITV7SBPP$cB+n>I(CpncDcW+3TMLy2FtFOo6%C6-suJgfNPIW`#T0EZFsBnZ4
z2k)5bAJ2Dj?Ju$_Gzmn*`gj2v5$v8YN%`Af!^4C{9+>sOltxTx{RKR$2=-u?^fx5B
z)$=44aatsC))CK>5hi>}c*fMqV@KUmRmM&_fhnX8UUj4%HnS&J1?UlVk|p4ym!&+?
zd8!4lCszd+6)c7m!)@z2y85;gQ61MA5V&d9k%ggltw9;yL(eZqlK#WDEYf>=p09${
zZKuO2vvu3L?M=n7)R%^(zBrbK(y%lX$FjUMEX#{ySy3966~(cvEDg)b;#gLdhGkW8
zEUQbyvbs2yHKk!$Qyk0HrD3^x7OpFidFyB4yZTbucNU(hCkDo~#_`#QMGIqDc6Ta@
zx(b=H-`vu(G1Rpg@~JZM=e3<J8<htng8GfCH}op=xbr0f$Rva~m2?nKIK)AYiFZFs
ze!8I$0mh!f(+`$8b6LTw8F<xHI6ljhQKdf^<^Cz<RhBCIrFQBSEq=PPcs1m^4-KV0
z{NVoVL&-&8fHEr+$TgBnnYIUqGJzM~_%v`RZ}ocZNn+q~v&kOo_HwZcnEfeY7XbZ*
zVV7uL1n^Gn_9n0kT)Oa$UyrgB#ZfHU{?IN1jJq|;sdOl3n=rNT4iJ_+F0}H9bWq9z
z%0?$IOeub<-75-T5I5O~(3OJjNz>|)cK5W}%jd-bY$%*9KpK-uRC*)O!-RHaE70W9
zU1)|<nu1llLzRrEz5tQzawVf^C_uq%Rx*MW1qjrlUTFwc7M8%yt|enyRe;IH(k0_r
z?Mjo<(_igYj#EKU@7l2H3HwqN7%v<nv4l$}RI%YPOS7S=MzI&%Ol{)9>*A)rydop7
ziI8s;b+`2N27AD1y{=_rups>UU{7zy=B|JyHTBEes8{eAN6wCx6^a)N2St;%T%n`K
zqX~bJ1(*Qd>>D7h!a7VXYEUd$DOu2x%&BI;i3q&~UtUsv+NzC9u9&B1u6oj@;;qlG
z3R+E1ms&`sDr_{8m%D~>Dbuyk#IdpMg=O_9DZ?*Ywtcp8+WN#9EU>+=okGbS35!<Y
zRGylnO4$@YT{Aar>W4kXJ}lZ}y=Z%?4<RzifpMiDsdckZ<2kH)y?UEjw2B_5y|zpy
z`1V-GqspET;gSB>P>!x@D9l2CzurPI&Jw9u9X+U{zko>fFNIR(I?8nxss={*Fq7C$
zF9sao*7t5d^;D&LJb^=;qPUDM_u+N$5ItN7vk;npRBG*8>1bPiJFt@R<EJuX(OzB+
zB;Tp0oeRtkZy|@j=@=f}#RdLc0%f|04{<zaLH#&E4BTYf2f*nv{UI=AQa}mt2Yy8=
z+Fi9l>5;T!fU#n>#T1}OKJ}~0Kb)ioIin+~WICMQtv^jUl8a|!sknL=T02)SYeml<
zV5QtDOWDaHpqATYdtFu`5HBIglJTv<R5+Mrnql6m4$CKI+293#i#HeSH>W}pGAr=A
zW>kp|kHj7Y*~JGl*O~4J0w%MuDfrkGjQLmM;*)EDkut{^`hk1-0>TarWUH|nOq)Ac
zAhs#Pxa~c0@j5A{v>v01Wz4S4ee?urHVG3wdP;!~PYvc01L_5~f}SHefcuVR>_Yq)
zO;pMA-svH^GEzwm9S}UBfybIeZv)_f=m*vXFKkl;D4Wb>%VGi1?_`JME@|t>{w01t
zXN3cq>Z#*pWmXTyoSHO0!*fWbAR+Z?>eIH`7c8ORBOWC`D9ZCM7umDKA9m>HR3s|X
zY~a>>Q-ilRkg3DlruS8>M$mOPu2a-NmAS+%%F54kYKvNTS+RlKN!(rOSQd{tXOr|A
zwc|Z-+cFc7*s<<pu^LONZQCtDuCta<YZO+{Z`R#_N=`YcIfPg;h%!O;N-2{B6t@)C
zM>U0K#Z<2;tReRu@5+hm*JkT7PRgpRDHo)Vc@5SY`i$8KYIc<n_uNP>YlklHwU_ul
zxKm*jO?0I|;}~(}EG(0S$#a@$-iXh$)0@HCL-r;=2oepC6q%_hX&`ii;MR`5(6ucc
zom+Z>auByTrf~Ie7Fwcx5t<v-;bSU+rb0|ED~+?EwCxxd!);rhoDHi0E{0EA&72Jz
zffmE1Eq%_0i$IH(%VnK()&knn6vJM)x;h(fJ;iMFES;R?Rj!MXcyFP5sY^vppmyxm
z_#x4I$}NS*dpIc<{&H6!D;Mf$AOWZk_HJoy4fghGYJ1E}@L>L-On5YE+cm&Y-itT|
zA!HL@O$Aa(M1X%VnTjU-RvzWMwAG8#5L}3kjEP8UBeh{{Z=7wFN#<Xw6vJyGT!eOE
z8?x3g5J%%38O`;xHHMr1b1JW?m$@u+DgDWz7HIR3bX2>JWzgh3qbgj^7#LQMp=HB~
zNH|TGo|m}G#HArmnqR21^l55VG4~TBU>HFZ0ZubUYx>HoSQ)%~r(X3^39O*&Y-nYm
zm;eo=c8e6i$oec+W<OrInU=LWY8baqlRKjcze6+9YYPBXSZA7Wi0fQQ`9Nd$<X|Y8
z83?DMbUWdV7LyYat>9u)0<WCf38cWX52vXatIVZUOL?B7BUIh0wvr9xGC90BAS;`Y
zp{NDbS{2~7_bQr{V=&%r05n~C!wpO2oIsxyP%iWc1l>1=vn;e3Bi!<1FF5#?)mufE
zpJmHzm9V15#V<A6#!o-!A3Gh2gsMZF_R{=J@c@RK)ifKE>|#ya36l9q=&s9f1*eRo
zs@Iv0LNT96j@0L(052q!oYLGVe+U{2rEkd+e3*vRMxbpW)diTKIe??lH3kA^D4e<7
zEeN5jv<3oow5iMzUfXV5whZkK2sixHD2K6B6pUYbL)}tc91-u9*b_q7__K6}7sBO#
zTnt?@*Ir53_UsW{NCg)M#x(NO>8(N;>g_^<H?35vW?^MLlpNqkoCef`Oa&$dwj~0Q
zOlTOksF7GfNO-JZB891OKL8u-<yb5=C!z&g+q_86gnB9Ncj42G&^Wlcg_|OUJSBnf
z76<|4%2g5;nik*!T8te|aG)|G)JSD#LW$H?>?_wHK>`l2!W{79t~ga>1hs8B%a@$R
z#Q=rK=p_x=b{fIyxig!fmmk>x^@Wk+WnMmY9@19pwU~(m3pAPyW_?LyPlrL5DNgg#
zA)9;_wjx5tzaCU9LcuC3y^~TNBpi2@LOQp43O@B#E<fRGtGqkei<9(ph^5MK>SQ_-
z6e`w<nL;DU5(!iS&z&ZLs0gP@pVQ}{ac%UWBslsMP8rXLl^%;|k(nKT11SUpG@%;R
zrd6~U07G!S=OP*o<1d;C;VEooCbDXRp-pc*jUGK}D-9Cq&NVfDSrZ_Ao@yVlTql+G
z=_#?r815k3&@>e=f;Lksq<F)bAvcfyVuQCz?ApmKUH02Iaoy-#w0-2RWtrU3t<m<A
z<<BN7*0!T;c{GTEU1fVf0c64MvWP9*SbiZqv*eF}X)L0>ATwDf4-~9@iMo3k>H77q
zW(iZi;ZThYcHC-J=w(gU7uWvSL$`)OlNg<Mv{e&pJKCZ-W}PZ%1@T@MbvImd56788
z`BHV9W~u34T0OcnP%iX*eRwRaNhF$zCL%mEheyx^K&z6=c<Go%r@{j}@q9bIp2D=U
z@4V+ykubh{9G1t?D2}?Z$$O1BLzTu4sU(R2+@0a`mIRO#ShS?LFdVYdJJQjfEX6u=
z6FQAc8te9StFhQXAeJdCdMX_qJw;sMI82-)yUh&f&>AK-aiEv;(nC2~w&bVuRJJe(
z4Vw5W4IUE36`)<=jCH~)pqDC{$;A4B?S%0K7mdSogA6BfS7hRxSI8=$2oXhwq0t<`
z#aN=aQ1lY<-S{wHG?mSW)3Rb?P(vIRr#2`d{qQ8Sb8Kk_;<*oj&S6OB6*&|ZNiAWt
zWQimTvt7ecC@8fp`NI}TkqctUT!t&DrFHaw3kTK`{)e45F<M$Ozrj<)NS=Wta8D+c
z<oopFR{zuzWQLRJtVarpQ%wy`zqEQO%Be&$5w*36LkwDV9L%Mu=o~C(^kIjs2g+;F
zOHmdJBLV7iYFFCovjqyMZRw*@>#2@akxHxI72weJyJeP+{;+MkUH02Jybz!JStG^1
z1qm!YPH~|_^>a%h*o+|^d{bB^t@M&lL}e8h6iwtl+)3lEWGX~Ig$k!91RCL0K1eCK
z^);BxB_cF97Nn#o`+mK%ODtKU3sGFD(i#^RKK#zk4=_4w1YFj!tgdk7I%IWg+Uh<<
z-BDd#k%0koM|H)((tzwIT!fKi8buTiTp+&1{WcEL>cd}^X|;%VWB3bt+BJop58sou
zY>{lMuSM%|rpkW?3N7@8`X*Y54#(oS+AS6u#>*sE=wq^@dC`BR>N?JKWMNRN7>46@
z77E~fSp~UBI6cS$bjS+^X3lqt65{af%>BD!*@0odlncT>A4f)ctg(?0Rr+Z*Ss9?r
z1Az2OM-UTFEwC#b9bJ8lf^P5%#{(|7Kqby5;}i|^B-JD39=#h{d9bpvi0!DJqs@*~
z%i10hC|(rH;Isg>6J=l)M9Mf-RZU3rMtkVEO@l}6jFe3fZ}PwdF=p|06m8%_JgqLo
zw0=t*B2R0=k@uzo0q3^Ay)(xWfC+|ASx(|OwcT&;vPi$ux|}ziY$SrxrlU%Uj^SjS
z)x8o()U~<Mo%7xE6*L3fKqgB$2-qaL7#;)Knxm#j=tB}!Q#ZhqC^UF|8B??66p>6g
z3|5LA+fnK(r1snk%?7AEnjNY9JFahj)lpWPuS3M+&M6Z&0Z#Jd>n-7ae4DI5Y2^2=
zER$<t$E*}dG!6#M6WcRs`XyW}zB?fDa2jTND0KF1Qb1G0CVCdtPSf9<>V=0w_tNTc
zFRFq?bmV>JD(NGOb*tlLfZO8MR8Y>s#g;l39>fUuHrXj$hCNqK6N@ccF^Gd*EkdMV
zu}bL)%dsLwncBRrx2tE;o*qOyTDCW7eO#M2Nb6#>$|g86V<)3x6+ouMxLU5YgU(kr
zGCSC(%JgIy#S?NJp8mV(bOpLkAm9)=S`h*yQEVaBHbpU!t&Grhlt^m#U7eCzRYTHL
zkhYXYc12K<nLsI$k7RGwI($HK#c$nnq$Z_8EX!^Z&;=K95E-@8v*xa%iX@eo99u^u
zECX=sbce_Jj_gQ^>n50;5|lYx1)zYS^0bE4mfEQd82T{Yd0L}zpmK$>qh%02H{qUK
zJGg)!aeRd1`0R{56;!}Bl#Lg*nOfj07@g=HH*6+xO2pQz1uf<>*0`=Bl)Q}Ky^5d<
z3%Bz|XL?ZaWI>xzelbofnD0owkxXjWO61wJ0Q5Rx*`<@sKJQwl(IcQ9_*J%H0bzpW
zs`|OjQLKrMuFvyq#VU>iq{K4e^a9j0O}3oUWX3F*#n=n!h_V&eBUy<Bx6hEQDqN+>
zOf?NED3VJ_PNx92eAVR1rjy&{wrF~FRzzn&d@B-<j;yC6B-Pa}hplw+rvg3r$cvZo
zQ@|~OZD=y6RU6IzAUBqe@<gOwmD6AqnX^a37cYbD9Hm=h+pxbdq^c!dTGP>@)T(7x
z{`#!TEo*Dkcp-y^n3R#08_l|GB?yfNMVx)tf!2WZwHn1Oo^0gKL~Nv3z(+O>%3EwX
zk7{xsF3zEJWtXisc#vD1)h%obTO*w^r>g3CF{`tpryvBKm3NA=QoD;ALF~tkEvndi
zioJHTRLV_yx>doW<fmO=TQ9V=rtFgTP}%fUIa^QkUhA;Nb!UyN!P=^khLY<d&nsBj
zE1m9-`qna~MGeq%K&=239?0?rEUTUAjM}T`OLM)nE%{hMNJu$##KUe7U>2)2zUdLS
z{H(oc+dvYZ-R`E)3dkO+_CbV|k8R%_TY|2xgecAO$9cH~W7JH43U8R81!AY|kNZ^<
z7N~Cg<8kSP(qXNvSYdFz_CvQHDf56?;-E8wLQ{mTYqqe-<wg|wfDT$2b>k!ffO|*l
z%%#^XEwYrM8FK^nsJt;q@-vx72>uXLA-Rfxi)`v<VE)`J&w={t*gB`mTspke9Iw(L
z9LG8>&^ia8I;p8)yWWXS9Kh5-!cGuzu2KU@6ku_jlGI?6PFVY?M-4LVRyKgBR}d{O
zxEvroP7rmb(AS`VMjb#DY$K$UjH)RIcr^rjH;oN8tF-dr>%)m;x}~wFakR0qyvo+b
zRpt_%K>|(sO3r|Il0}RMD6)cR9YxdR9g5|a0>)%Yi7VnQQhES`+Y%5d4Zu2$h|uL~
zX~tv;-Xd2U*@i|}1231(XQj|=y@a(|K$`$?Ij`5t>AX|Aq@wDVUA@?)4RdL|Zm$-N
zq#^ZO)+{(Ojdu;wYNqKD$x5$t@Lb||)ThP<Eo2}cF1GGgbz&(nRj@2n>nWi+s14P3
zCN5k7wE+wz(OQPrGn;g}xJ}7Gv{=+7!E<`_v~GcPp{IVPe0F-;pwAdR<zaQG@QX^%
zq@btwUFWCCu!T2ix^b@CiTe^**frUi<7MZWexTBe+6ZZlsrBLS^hjTDJ*f-fT`nV-
zo$PLj&KS`hmmL62UR08|Fv(}XMt{~eFg*n}-*Mxd&3N3$<08{fNu;|qNO@hk{aWl(
zXqGz1t{Hyh{SiRD7qxMEyc@^<M!;z{j=ExZme$d(KPv1$r_eq&=ohtt<Q*J9!WXrL
zM!AI?Z!p*tP)BPN8U`%U=g+ef(Ps7TDtJ1&`U(hQT^RypKEouj)1;0l(rKC9^0PvT
zq}{N#-~b(-svb%4LX(>J=`(`uHR5Uz`^+w&9!b~BT*~F1jze*$t_FK3!dCRSQz^cN
z`K3dcE9uKCRw@A4q0OO|Dl>Sh1?th_aDh1z&EUnaP6QqeCKuJP1pT?8M%fg2lwQ8s
z6pc8Mxm!<)q`Ab-L~>W6(X1Gl<!Y6Is?#xvB<{o)xNWU9PRrsvv6IGH7{v2HAqm|!
zM_r|$?It!t=6xGmk@F{%vS??;88d^)UTRx9<0+IEetfvJD0U=IQ2?85vGed8mP+;@
zZir<CN6oauMB0HNv5w|q;h|YFc-=|kz95AK6s53V<>ELCRc5;;K`=aSU?@`t0J=a$
zzx8-43s=54>eR~Pnfv(5?(x9c-v6W557kvaM3Pf}3$Xb6fA#e%R@FP+|69I%)#|hF
z|Gg9+eE+Y~*M$~is+Akfk#GVZc`G!IRI4)S91JLlY&sTYb4L9oJiOhS-loI*qFQlF
z0T*eZXIgibigjB%cbS!mREV!_%E9=tydeY-e6z97D?xM@o^Rzu_1cTc(Om)c7&8I!
zCIh}0JQ&V~<Nk82PIA!kMmXBhV6}FW>I|8i6*(B8<YUW#sd_zzuEU@v5de}Q6pP^c
zbag$1iM|g}`em<BwRkeQGnX=}am8^b3^sgIu2mb(xPLc?$M{qo-k{saMm|_vM-pX=
z3dLzOIkuw>4N*lN7}>)G;?p9W2~-Zr!Dp1j8Izha?Z#l2mf_<2JpizrRc5K}HjqoJ
z$E)EtkPg!~nHX+z0Q}{R?|^s&nge3=#yg?+`6R6gozHeXkd5;6E-(<DUF+OnE)jcM
zE-J}J3pic`F0CB|sW~6_3<#6yuY*pu8~hQRFGqRk3UCw9c_mTkV&tBcz&<|tBwzj(
zqj~96x02GU=#sImr^M}4#_OM}=@MvH#zv;}pr*7yq&=(nZa(Nqw}%p!QZyk6xG;Ni
zP|HB{EM#yfh}9$+)VniU{|1giJn~*B=&e;I9mrEhOi^qQ>}(waGV*GsQ|}g4QmBbr
zu88#sjg(<@YKs|w`@?!dOE0=}QYA)NZux}V64zQ&n?oQY^_hILH9m4}O1pla&Lfq@
znZ&hk5)McA^!diEt5XyTy;g`|t%aR1N>m;-v)4@}<tQx^QAPNO87LNq-t{gN*JfK(
zAF^lyAYX^}<N|6|8f{XdDb#z?f>xNP2no=W;#TJgr&9EC0~!U}QD;o(Qn)?l>{Hs<
zOno983-#v)p<7tJN??VF+u&d{t%)b<PX$6Oqeg|Km@F1${32x*_jDoGuL&V2m5nF_
zsX8E4dn7F)h9k&PetUR;Q#5&x`}|0^>LxyMWnyx0FoXBdk>rb2nJL+W7@@8P*e{dw
zSvNIT2`sqbPvyfU#a<*T-{EWpcI8GOXxTDua8U&)g-JCd+FMnlddYGu6L}C*fd0@%
z%M1tt9@8Wv6g^Z+C@_=S!2pDWh|>uTi{Poa$w+LWZb%4#UU&219Di3hzSA+RAqxn3
zkm8H1Q%nT0<C7cdCL9pHY#C5IF6mKRXA^RBiD*eo$o-v);zJYWbe2@NE7!?;NUU%K
zhWdAmIf~Lc63H+i-yA5TvayjU6;qcdv=$lK6^mqtiCTbgn?k(3h9a8mz>#Rm9hf2<
zz=>2UVY&g+k^Syyx{DqYu%!{|Xki+TaEeiz{<)Udv=lx|FI9N7+;BmfOQ)!WLH6>*
zB6bLR!BztqNT%t{V7m4~g_caxlF50hq&x*O-ds?dWvVt77i!eh$QdXEQ)G3})df`1
z*GXU(SVdBIn!2P=v~?%0OA1BgqEvQCsHp8h?tmO_=_{lwP+82@lz=2SE85wyQLf4~
znsh9ZiY1ekN)v}X1gp`XrBIXCn4B94iFdGJG;VdwBGl%u%e0}CVVO04*Zhi%q_d8U
z#Rh1B$8CzBW*!0)Pkfa!Bf~sr1Ln1K-%WpRXh@vt6h^h7H@9^4g<3+JS~|Ky-94K(
z_OxsY^=%II1$+CHl0_@WFKE(p3iBKFmez8d=bIJrc+RZGn-^;<U?ziiFPha|^mjyD
zpl3=&x2jC)m(+`^OzX83WttnHc<lTl9d%VJC5ammMSW7Dhkhu2TK&iVMjP9b!W1%=
zV7pOeH5wi*jXR{?Q!ozK*9DpiM`sN60Cow!+BI~|DZnmw4Q+4@T`5ADcQh9D=34t5
zU(&D`Hw21tDKhgPdcp1te`Z_%O@U65il#|`obtl2_!M9Nt!r4de1&8Ex1p|K`Puc~
zm*OJ^JH4637&ns2W%WC@x`P=2OY@|Wj3{-30`+)RhK4;7Z3<(f&qBjG79#SC`=|nh
zl9GYA(-LKq*>GGf(2l}jPj9uULcoZ-X#=H8N8d);b|X&f-j)M4203giu)j{+!$$j=
z0hNm6t}R)17<W!hR-gQ>-CGz-cM!zldb3`ogmT%^0Z?^!(7Lm(2gd~zy1k&6^r9iS
zDlZHGe)YH){86ibIXsxI4ns(4q@sZu?I5SMnxwaywPbFmy|(zIQx``e-nvuKl89CI
zATcu8!T=MEGOi`TQPx%jvb`M%kHtoEBaVyGXmY8r6jmH?>7_Pnov1<MsFg#bxJMD%
zxv_Bct}@M$R4Tfz0&P63dRdhg^{R3sKMs^ohx!^mA(4h*H`|Ez3D)gDgYjfC?N3ng
zYFWd!b0F4~Z+jGZBR7#2QjB&`W0gRZqM=AB@EnFm50LrS!8}pc8c%+y-)n8H=u$EX
zDax_jUXBc?AJGCq!51H9a7oE3m*~<uMIZ?1;X-<vpE+3ARw@U72xPR1KnMDrDGICU
ztfw;x&Mz!3t15^mo^l5A{Gzs2DQ=CZLT2%583&Yn64RC=Uf%Rl$bXmdALlM1-KMbi
z1?GW+{OXk}y~cn1(D=V{)yh@N>sP_}U$?wrm0_+tBQ>1m^M8E&FRA@Eg?C2r`JvOy
zV>bN{G(c+qx)m!|uUG|e4fU(RIjjG@1fQ!w(@X76$A*Tpc;LlssH?lWx}mPV-fYFa
zucX;K8oTMHDEx@T%+_#xAQ^|Sffz)hQE3u1q~6xvK-pDhXKWyvzypA}M1=0(jG7y}
zJ4YL8Ag-%rQ?R$Jucc>Wun(6b$+S*qu`--)(fS@vX=?_`Hg9-iFZs(LxtNCambJE`
z#?rXX92yuX>)C{!W$Vo9(X!t5U}q;r)b_^`wZPKDl&+<(y|=8RtF^782T;}2)WUyZ
zs8H6~u|W^UjsSh23=Q}pYMr^*&kz9_RTD;>Xrc`TA;$-KcP#dE1_87B&Ea@lL{Yhh
z%&Q=ft&*x!(MULf_i%K!0peb?7Klg3QsFFO&Ey6LV`FAI7msGjDr%A@j21Nm(&ngU
z*evrc_P4eMpj3<>e+9Sp^|XYfzk0x2yw<Evnv0vu_$ie}fMJ@$C|6Uvcx_o36tIyk
zW6R1^B*G#65hv0eT|tI`OTK@Cq@g+lM0UPryeuU}Idl6VN<o%d5-=g6Ak;&if2BIg
z-^FWcubBc=i5snpoN9)Kl4U-1ToeX-thH4gEZ44BJTidKl36WWSu;R2-PCej5b|ra
zqR=p;tgNQHeRJ1a8tKi{1oVVVmVbfhC@af`hcY(2fX$9uBSvKn=!LbpOj`0GNnV(9
z;|gfaaG7ra(E*X6rkF(|5O=Y^XHx*MCe6yXo7a$nXohRhex(mNjS@gC-o{*6laAh~
zlBgC2va3RiS|}iwHu&GR$Z_`L`6+4q=RSSfdCW5YuZDj-{;zIW1pqkyuc)s(JN~~E
zAF}@w=R;AV=xaK<`5!I#baY)G?CRUx^A?B6pDy9c%dCjCncbOMyf*~G*K5JJ8*2|4
zN~i1rsa+9!0F0s-fEx^nWTHA9MfD`i*Gy%zyQ@j3plh(i6Y1K5iGHm?h~Yz620#7@
z(L<FZDY0yB-MUYaFtBZFSh;F@lZq2*vdKtzw`HJ^9Wg68X2cX$*ObE&#x2bd6o_Sq
z02M>a#qvQqXMoE=k>~(UL=l!!vkju!U<gQ*j1kMh=NSMQ=Sg7*fG6W}DX4KKW1?w0
z?ipKXhr@h#eFkqgplA#rd^k0Snhjc10-rLklmP)+Me_R@U9Guh)rz`0I$R-&C4;bN
zFmh?1@`_MLW5p)3qPra98X+KDWm<t%g3cq_3|VGtduC8?S{U?)(nGO`AHL8;Lv~}U
zp>#5rGAn`A_`taW4K&#so>C{N=~u15+a7$GgqX!!QHmH=Pyi1)U{eX4qS%<>HTbuB
z{ZN{(l4#WOC{~-)t{fCGN43e>YlD}(?E+l`Ito`u!8)sx^R7;=usXTI>f~(I!3?j#
zzt!tgyJU58_UhR8rnEav=rR+kMl_KdF<EfPBj)OWI^T|Hi}VMqzQz%;#3+aWT1SGP
zBF$hfuEqF8;)1m4%qYkQ{3|;qp|EwzF-NaN8!S-ylng9aof`A6UO$pbg@%(E`^I9%
z#2|BNMP?}=!`tTpGM0h19f>7kK*#c}!E)sOt$i|9;X(8g(T0}Zj!v^8Q$=4|3kSVA
zJ1|^Ji{y>GXIU#=`SSVtN2z5pbQ)-zAj9@5(<c2WJ0%XNGgA;D4pAneOdLX4i4Z=z
zO(6k|hSlo_tJfo4dBaU31r$szIh*CUI^IEc5@t&Zeq4*gi~4NA_rb(xBlTL7RuxT{
zB0{1~ynNUPCFk8xvE@sxy3Q;pMw3Y`ZHjnw#2zp$E*5Y`D4a@>eVYOfr=)#=2Y3k0
zw$OHq)(R`Jrj<^N!d%|OWm0ihPK>T^9N1ag*>UY=lg><6nZx*w31i^bS<=$_RlcvB
zjt)YL%4bk)Lv}V3jSu1t4~zgsd{`6t$w9Pl!>2L$g0Lu>eIq2=c*(bCp{z@mp!tB#
z9b|k(B%ug4veYWu8&2S1Mt<|Ak;AHOWn;TrdYF!EaV1Qa?oO5u)<ws{1KId)dPo(C
zN~t6$-z@H|bW3+f8_d%(NmUbJoPy;Q)?=zhXvRZZ;JR6v1#bb^%~K1&$j%2MMJuE5
z5iJc;7+A1Munq-+0A;&2B^Xxa%!)N=Z$MqEv7&l)#;mAb1ApLaR;+;N2L8h6zi9(>
zd+OId8&Y?x164d(1n#}UW`Topc!Y`0Mq(H~RX9L494ya)KbE!yuWi}V*|!u|?z=bi
zGF^Sv5KFnxkczdd66~Rq&$4!K(iQhxT*7%r#wCLH3n<1Wz_>0Jj|SALzZ^-{uQNAX
zA6g!&=XHK-2_F?K6s1F{0ivx573T)TsBIdV(Z9Wljl=*366he(yPb_NZAKc>Y|>Sd
zAUDz<y+;Z2Jv5@}*t#j$$kKrYgJ=OVX0>%qrTqDPpg({Pj2E@*)vc&rl@Z)#|0sro
z`j?x`V7}rkMIe@HLJ`DFk_kLnL>P8rpI&G0ippLS0z|Kqol{un5gE-U6bST(?fN9m
z%1qo*wdz}>`)~myZwe%AZZl%zKnW;VkE7v_uzs@!c(Mwzaiv3W1`3T0ix#v>JV8)a
zVrfmioSdjwadf<@KEgB){?jo?5ML<5sKq=CX8zFg*0Y~8^C@Zlr*OUgl=3LL|5v}F
zZe@dW|8GSDIA_;?UV_gp!Om;v&Y7e9bB*-|`c7QdO#WsOe&<)LG|k2u!v}w_Fy3IC
z2mW(4xMri-=AIO&r8{51ode%_@Le;P{cGkn+uX|qoOI`C?zy%aG~3)~?>20%VVEi%
zmUaH;L^(dU!{**~9*1xB+0)Gx>FPwfIsq@;dqu(Sb<}U3`1XnV^@(~(_XYuXg9eYE
z?jL0%`1}6%u>bz|G~3*dpJUiuneI)H?mVOPk1S`8$j^%|&i0e-fSY2gR@BBL)p1Zb
z#;V8Gtg2qMq9&89X`nJK6tEk+wh+9eBgUO8+}>Po5WkGKY%YdfYA|nLy4LUA_HTdn
zvGXtb_&Zi@eBDR>;ga@>h1XR;Tnl_x!?!}Kk8Ue)C465Ye%HYF3i!SbzHboaYJl$r
z@GXb$I{01#->c!<2;Wukt%mQ#26GhJJ}igt`S5)$eAmEtEqt5cy8^z8;JXaImldV9
z{Ud3J^_2tZxq?2<&qXd_qUnuS!uQpVI8(UTzDUcf;JX;Umx%BS4Q`9|@LdAm1@Ntc
zuV2K!9=?_0_ficn_CqO;1Msbf?`z;IyUVMzvaL0&{&vSfbB-~u*|%2Y*$m(PKYZVw
z*5JQAbM$RnFaOHEU%c}8zyAD{F4d&ZJLhm)I6uGk2wySRJ^cM1@OK*S;rl({kzS@t
z_!~UJU+016jUM3@9^pUpDCf8b{IeeLAM$`N_n_O?Jn%Glgs=3#zttmrjz{=DkMKYB
z2>+r-_&SezMLppE$s_!u9^ub>gg<Avhrh<7JRkLd|GWqOr#-^s9^o52@L%jvo-z+S
z`#tddi3fa-2mD`nz+d7K{-g(<M?K)*;}QPv9^vPE(82V8|CUF1wMY1`Ji?#x2ygKS
z&l>LGO&;*)deqDC2oHMT*)kW$&R@*z`0@^0_y!Sv_x<+p4v+8zmH$Fx%_YsY@L>vH
zXxx?L@P#r0^bE3+LDiz(2!)15l8F$Vr3!_N&>KfXJ<%bUutd|Xaa^9u7+W`WhI*q}
zTtjXPXT#(Vq6tnfJ?JCjgK;_%(;iMl;-L1224jhsL3dtD2z9|ZqLFYI*G)G7wWg!t
zY?J~v0x-Qlo$ZVzqFrR`u0dW4b7~1kC237YqE<XsaR95g%7~6d2jWI1n@$Xjq>NN7
z6*c5iiy@a)I6Af4NaeB_V=$TCiNA1r#SqVz7$Ll4%t$3u(S#v)@ql!^d>O(m4Py|{
zWemPJ1!3X<8p2DM63GzXgpy1kzK9XYr5cPu%)-D6^u~<Afp`*1JtFc74d4!Ms2`IV
z0AP&n3a1leT+DFRpw}gH@wk!Vbc4InF`!!}nnh!i{@kFkD;&$Fpkiv*)DRbl8G{k3
z!{9(78#nO5>rBSrOW)8T!zw5M(oe7iZaNuqd&|JZ3?N1X*&r0^&tw=1f)Wv9V`s;P
z)=)#ua`mgBW+inD{>OTf<+s8M!{&nCW2LfO!}hE2pA|Ys{o~IZ2zy%V2baWR7ov9c
z*0~&(du_~zsUu>3G5vY&XQ*>Y-IqT1a`-C@OYg+bd@X#g7QRpmKSv8+q=nDZ!c8q)
zpO^Wya9yvh)53jP_!=!-UtDU|!q3;j+qLiuwD4{%{6Z~!s}`;+n>)1d1zPy97QRpm
zPif)0Za$`k>+<tfE&LKK{0=SrQZ4*WE&Nql_+47KKG(Qg3)klY_h{jZwD5bi@YiVJ
z4{G7B)xr;I;jh!e4{PC<YvD(<@YieMN44-bXyMaZ_!U}sUJEz1@Dp12m0I{oExcR{
zH}12-uhN3&YvGHv@P%5qKEGU~g)h;<O)XrXFZs1_zZPDng$K0oHClM37T&CdFVn)?
zweTt}yju&e*21@H;Wb+L4lP`_^B>m2^>w(E7GAG~k7?lzTKKJ6_;M}$4lR6z7JjD|
zuFvW3(!y71;dg7{tF`cZwD2`r_`O>A)mr$2T6m)teozZ<(!$&KJ~Kc6A0Q#~H-b3K
zzv>1IdVKz4Gb=yzVbhqY_%Qrka78oxLO*IZ#<A%c_*A?P{V0l$9U=dpp&v!?v4iA)
zC;CzJ9=n(PH=!Rz?y<YcpGH55+GBT;|3>tqh&^^I`TNn2#XXiH|69?I%K^uBkiQ50
zD0+`|lmCtAN0EE1nfx2jkD~Ti9r+v4k0SP%N&W`(qi8+0ko=YCN0EBWAb&afQIsA#
z@jUupi~faFfAU|9{)?#o<S#@2#Z-Ut&qe<wRDbgS_OHQzDb=6+zeNA5sQ%>tDf%y?
z`jh_$=zlfUpZrgve-YK6{NF<VYpDL@|3~z{mg-ObN74T}sz3SnqyKWMKl#6a{?}9e
z$^U8ezk%vc{=Y^46;%J<qyIzbH>v*Qe;@j<r23Qp&(L2^^(X&3(SH@ypZqtWe=*gc
z{Au)8Q2oh&Bl?$6{mI{t{-soZ^1l`ReyTtDd(a=C`jh{S=&z*ulYayHmr?!6--!My
zsz3P~&|gjUCx0dSYpDL@FGqhZ)&DtafArT;{mFkZ`s=Cw<S#>i1J$4WbJ4$?>QDaP
zehB<4sQ%>tCHhxV{mK7R^sl1&lm7?kUrqHV|C8unL-hxL{eN{#eDhD*Cw|br_vsVe
zeI17m9!xYF?S~#YLe8P+YnqMxtDz##-gzhX@%G7czjBvpwBLR(J9p*?b@4m(&zAOy
zAN3;Ot?Rn~23k3H-Gk=mb{OrGuX-bR+p8C$=aG}=wC~$G(|+}nnTiEh+)DCe--bEu
z6X#X{(9ES@UBpRVjY+P4GW~-m&OHZy&cW=qeeJQq!NHmZSKLN&XG{Cub;%DxMv!mz
zwGR?1DBHp*KcMKCc<gH}Ge1PR^R>sgY*4hi_KER)`@Zpf{=LAU?GwS1?R&>hf(F?>
z8O%>Sdf+fjxu#Gc5w?6KR32*mu@6(Nb7fNpVG5VOACpdQ${z|IeRJSV-#8R}>h8nA
zMS$UG`>&6+Kl0lP+b0e~J+En>_)dG^iT2402O$0Iz~5{SJc6F9pU9Q99|}(Y7|L>d
zKK?oZzaYbt?UP%k51{HgHI3zX3QG4N{`%TuqI%}U_%oB^&*UF{jw?60>6!h{L*fI?
z1}6H_4pFc7{TAY};@JzKB!Cx47@X$J5DEgP3y^OAof}f~0f}^75u^xZA#Hhc%k?c=
zTKbS##wNx;-ae7LEB_xL*C&GaPmbT8-}MS?)8K=E?Li}V>4Ec7wpNK??Iwc{E_lGW
z?O=8t{sibdIH-9MgdzRzyfP)|cjw|1(r*hu<nEkW3LeI@=QoUJa_eGr+9r3H><(W}
zI48#z<{w86n0RzDc>lz~_Q@4rz4Z$_jFyRi4^B+CO&ptAqj1#;T<w$1e)!HuMl72e
zhWLEc88s0+c<lEu%0VFDy!9@FYJ2$j1rTr;AWZZ@-48P*F=!#O-?Tk!5r)0yD{~-^
z{nrDI17AQg9mFi>a+VA=*}MizBOu?bAWtIX(UTGqdGg?7UHjztgZZz*7}tLFV=~QF
zmBxG<r1?0ffwGUy&wq?q?J=1f5b*-*_wYRHcWS=%dky}a9J?Uj#^fbz``Q<>JJyZS
zos$bii6(AXY_?5A7B6a>h%a8)Hj!OCzindeVh~;(&|M~e(>d|2sjY95E%Aw8&&=!{
zzklxNYD#|V`ymk9?$WQCUmzxguscQAimw_FHuY|mI0lKQPyxvQ%dcf6RRbNXnL~{x
z8)U(47l{C@%5IAgNS7({KEmJ&J0^}E_z2`Ob+^ju`Cn;mG!Z-kzmLeay7BlM$-A%n
zQgkQVFCb?O$$6x0vU@&x>wp3+6Ne{$(*^{1Od<X|M$E_%@!s(xM(*>R7qIj%u%AvG
z%KtOK9q-|MwqB~Dzx7+$Br4z3JkFl81zKFbSY-W+iQi5<G4<~%?|@Y)i;EZBe&~5c
z%)6PG2O;Jl#GItQ3@mr?MWTCypdZ`Tazo4ZmOp6;q3CAW{f=jvk=PRt=SgsX2^FaJ
zeYc+=-|eVAwI90uFgS?g!G!?3_vHNaRS<}(6zUZ1lMfTv<i3OC5cGeyf94P&!DNSS
ze-?htqao>G49xFpH;mg4?!FMv92G#vtJ)_&py$3X1&m6?%72KM=5{V3r2ro~N|<)T
z;B$~9XTAbPWbDG1PMuWrL+TuRFBM??;C|F>54<J~UG8oFGcyCAzC*!>P#r!#YQ<lH
z@o#d(UvI@%V0^t5kLvaDRTPh)2bN*{MOHk@+2fZ@)uv^cP~U}c{5=_bNYvyV&rDN8
zerYS#V&Vfk$k{gxPUpn!DRTCWfwSO&gFp&Ymw{~L?3?!iYUe`~`u8wEw+9$tUr++g
z{9r*~>=z3HssCIM82;vhz>Y^21h(F{Akh8M1%dWIUl3@12XKO)(%g=!`61fmlb+^z
zb(XJv-<E~#6VPR(_XFKj*6xMv`@TdWlY}Y%_s_y`ig~LlBF9ZU{0y{T^BrHx+ZgUB
zGu#n3hC6IA9J9c_!^{asnBgXGN0bwUidhaDYQY1{LpPm|biMr~Cm{z~203tAa^Pyo
zo=1bkp09zN5HSDG7ZP0+;43AD2?_(>w9bqRC&r(f9DnWrHS_!#ml$M*{Ld(j=!d93
zfvzM5vJ=Oz0N)8A5VPk!5&X%V<JXBG6l*7-pPw*tPl_}PQGuW|C+%roErJ$VI@yyD
z0aOY8#L`5x-2D>M_{sbL6#X_QCbJl(x{2|pKr_e_Q;$EDKX*pg27%bYBN%n;1JsO!
z&(8|YiQ{ps9^l`5a!#&~e_y`$<lJ04l;uSJO;na#6GbefzjdT=K7S#$RknPpTlK7U
zgdO4%INpll9KS@6ug)UhKC0%=&!@uV<!EuHyW*OwUM^j+{1^7F2+e*d_#BT8<9Q>y
zQZWHg)UQ0Effnu!J~yY3h{q;_&nbLI2M-0GCvqMY%#=UgU}Nly2KNS^pHs-nk^zyw
z@>HDMp#@(KK3^2|H8#|5(EH4}9{K!7VLtn{e0VN%RFXx{Y^&DUP8?socl<dc`#NUS
zc}>CRvJ0kCiUs%642Pm7g3lF};?crVeAGrqw*suT72q<)KYxxJ{vQ_Nf7FH_y2Z(Y
z4BBiNTtyjl&lCA9`23@w+s(mF>&)#tg}HqRyLYkbOcOn)MQ$FIn`c#SeqrSnX_XVW
zumq^jPdn&&aw7Na@dYe$q^`tLDSJMKP8v<YXLH}z3Jt<qq_a)FsBggZ;BIK@{olob
z{yz9M^%#!!KYWH7T}ZXD`IEV4^LuphuL)YI{hmC&O_SQW4N6`E3$Iij)?agrRlvTn
zOAmY#^St6`q+yg%3{v%Vg>w2MI8_wnpSJONej@kuWbSDamgniRI=g^GV;hGZrTN}b
z*@B)Z{?>*<o36Se(8Q08CMS-+Uhm2CG`VA1T=10DzJjt^Nv_tHA+oQe%G?2^Lkhh@
zP(vq1GcxH!?%R{OZv*Vn{BLLi1*~)^c!E?|C<2SgX&%Q8(>V5!AgWdNyd2x|viN(T
zoO=(>X}_y|AIyt*XgZPK_+ONPsLO(>?<&!eprSq3K5*ng$}#}+h)X%)#F6$#rq^Kz
zOeD_Pa)Psg@VS`aE{Hs*{T#q~2%!x@Hhafha664-NKKqaz?|Yt4|hu||A52Y2Y9&q
z5@MpIjr=QrIy2*75*8V+`HGQ&K78FA*@s`1!TxlNsKaE1-uYT79iGlc-NZ-k`=rcG
z-Gg~O`cqxqSuj<Xk&XISq$X}TIT?I<VlLxf$X)hMLN~c3&)&H<mOQNWn!{3X9{Y&m
zI+BWcoH|+jc8T<mf^;-L^SrE)E-?RD6TT;ouT}jG>r5g<q<-XiCDZ0xRr_Ay81Qg`
z1YU6ao0-)tfnhe^G5*W}Y)fvOKR>2-)AhHGKVxLq99XF0-*!w+nt?Rkeq97#M16Wk
z9_I^cC_8CeT|02MLgcq)<m0AzhMK}5@EPJN&T4E?etJd{0O$BWp=tHWeEui8R&S-d
zOn}Z8=>YU$4^WRR?vd$kpCt>wmBm{<K-URSQ_o_4{z5&!0n4)}{~n2P{w#&A(=qz<
z?~v(^{J40b*I0%AI}gyS+zP$OqRWU!x&VVdh>f2A|5VZkQ84E}AXC?Rq;AuAD_diY
z;%oB1wt4`K>Y}ou=HLI3K>VE_mCO;JDIN4y4^Y{%?K+W~1(5`$ANjrV)a6&pa@KfY
zT%wa}Qs)rAEX;pDO%qnFhfU=2T5o;qM|vl_MC)YuBR{cWo=qtvQOxtMlg9%}&hEOH
zexgqObQ<QKKeXnanglum5jgk!VSzF-v1Qs4;3q)fHU+0gU-Ol%DC}_$=s*;HO{GxS
zH~f&6)=wDOg&@54b(5AcX<YqirhPIuJvsh#e&LK1Kv2wu5-*6K3l0UJ!eQ-L>l``U
zFXT~m!+eM$?ZD4bQhPmlJjax=GCN6`y-{TLG27mW%Hcl^iG~&^Ptw>oNu&QsJ-e^S
zNj1u3EnA^VgAM`|UGUB^G5!;=M^O}>kfRdMWTw+Tf%15aW2d3%jxSfuGClfA`pr{=
z335(~(w*Sa0k?eh2SU%#+bv6@-Xu~0%5(Ljqn{R8O$MJTP`oF%JPRr`AzSo(ff_xL
zo7N|<FrMFzO}tYz@n2vQZ~jjz#WW3b75a>(jugeI-=k7kILXMKNAtUH0MK-P$#XL^
zy0%#-W>9&d8j3<3HF6iLC5~;l6R|gVbWZ*mDgsvg=z<4~_5}xE7IhGmrnw8gY%KWl
z!3s1!0gn2X09ad<J#_qBPI}*LN#}9W{2j9a&PTunxBc}TEbz0`xQhk!f&+(v(i2A>
znVvi6$ee@w=FQogubT)y%SEUu33Kj?vh>y2^CyGPLVF*3(2x}P>2!(Jcw{y}QH^gC
z-A@))<77cKPI3{3O2VA`qNM!l+2HMscH2RkM{@h+)&K>Xf+t5WB@RAG>r~hOju`Fu
z#V8Dct1)Z?hasmyoqmrSEYBIa^DUeg5s!ZIH>Bj`)lA90Pz9<YF1t<SjyPEIc)LC=
zID&&&)9<xu2cplNppyLUb2yDenGK%RoF7ebj$ew?2*A@6d}`0VB6oe>K#P9QU~goZ
z`AmMuWg_{FbBvPbf!I_1dQUYbz8QFA;_x{%k8<m*xo@i831ty;wc|8zoZJHbr~mou
zU;jD?xnKVP*du?=MDUrtplRm5ulMOiT#S>pVgxAvv3dEA(%kVmqQl9zp9k)QS_sj~
zam1#A@@yXn`d2fuozJ~x!GpLeJ@M$WEzeDCc^W69PqX}c+CGvIMQK%TX}etHVzoDz
zXTs!h@E8SI{cWl)V8U*JgW|XE2>y;TPIJbLXZ5Y)zcXkL_voiHVh{J|XW`f6mgi8+
zbwZ2K5xfI`ujVcHeossukQ#fa!cD<EPN;L!Q)sb~Kk$?oPUc$`Lu?1WtdQ*do&Z@W
z#+a)g%`T>&?eiRo9#V-`ea|+0Fo74`z8h*p_243GN6@CHw84R9^3)k7wj8#ta^d8>
zT8`FD!K2weR7_}QHSt7y;5$5a9%?^l{K)=CRXGkk3FWv;m0;{*Dikk+odh(8)3_W!
zc2&oIAb4kTzBlH_73Rn(V7^0QCLM%HNb8Y6Lhrl|sk~19`)qlY$-HPAK`yGwMT5gq
z3C1nP$>2c;lZ#an;<><cA}b*P`l);qM^Jvwp}*91Kq#E^q`n67D#SnW#MD2jLNDW)
z@kx?Wi260WYqNX7)U*ORPiRMHdKu$qjW4TZjE&HMciQZ2jtYr?B9w)a?jsT%*-yYX
zp2w?95c*91Llv}{bqP{&YACBLD^Td<o@wV5*{#&x`#{?t`xTA^yDx^gsXYoBcQ?;8
zg}c|jtSJ1W@2Z6&8+qS{<OMnMQJu_4kNi33YGhu%SR?bN6yd(c9Y@g^Ro`cz^D^84
z1^3bCd6EUVrt{ZdrEr~``nG}@ci<8#_jJ3!cXH~Of_;;&Fc;Rx#x70sw0i8k%C#PI
zO037f|K3ee$9!in-$_&xpeMJXDH0cAnhWv3Ifmg{;OPr<)ByR*(ABGta%3MN6b+I_
zErYW-xgY1H`5oUCa*M19AmgXXRrT^yW=<Bn+_TuLIoG_*^@-+1b5u1>oad~@&*vI8
zaJi&#O+g?pFp9#>qebDLU#Zc)MHTM$=RDeSdaEE!j0y>1xOV+uB^64e%UkCfcFJ*~
z7TorP;Sym@8&4i0lO9qp*3iVwVd6uxqYwWZ>5fY*05Q3pxbsdiCFS6~C(jxE3Zq5q
zAwxs^eub8;iGa328`}5lXy32UzMs&}8~uv4E+O{V$-V+n&iktQMQO{2CU?PSeton8
z_wh+1d&by(@q;*qfO2-;f-cnS@~BGj7IrZ2)aHO|sGYz2ZGEneV^<!=t_3$chNY8d
zT4>(h^0E?|_kPPJG^y82Y<cR~XEiNaDq+Xp#QIzv%=Oi*WEy8BZ~a%=#HzBE!4Pcf
zDn-o)pBKvD-h=aH$JrY^!7DDjAoDS9i7On)>!#KzEa~TQ8ZV=13uT&>nz?&No|?0F
z{3&C>#I=PRc0v+ty}8-Z+2`_73g~0qygRq)gzDxnmhKNj3NkF^{4+<qP!ATM{-mfe
z&jZ^f+DXw%c5p8_k-y?w)Jv9%1)&0<sg$aI=J#Gh!iR1aL)^{IA<h)T(t&#*D*uIV
zQak+#ii|_e-Ac2@Je%inrgzJd-A=#@ZhMxsxD&3BQ}?RkeH*0Qu|ve^!Rc>siSqx!
zCDLTsdw5k7^C!7fiymkI#yKs8(km$U?CV7=i;klc-Q6N!@5ysVzp5p@R3zP$SMm^A
zuw&xL{-4+z@pIoGisTRc7AbP~5u(T?64Rr%enV#D-qQ2-I9vMR=hXg;7}lO3`u*T_
z+F)?W9!aLZ{~d!r(cERYXHmO{TXTh;716GTtpZORSPN14r~gIH`Ej&S8FW$xxsx_d
z#`@<kIieYH-A(dr{0_+VR**y}wNQ+Km_nh3m>yhXSjJl@@LoZj@GyZ*?&Ez(g8uLJ
z&!9c=Te;wOi3;4^&+a`*cJIw{h3<qZ+)74x`FBuR$M1yDgZcYjs|a@I)J>{Bdwxsx
zxr++9Z)`p*1{<OBF#RPKx(XYNd(0P!eeTX5{sz(JB8Z_ihokrdhOd7+6nsjic}k3f
z_fwjC^6yt^o}x5Q;SWwTEz?YkL5|bho8O_*OjDX^{BiOXqyI+v&COj-IOgUqrSLhq
z3l80m76pW^jDKS#7d-HRJuHjvxT715r|x*7c?<T<I|_#UzX0$lY?=I3zY#+i&mW-$
z=khdi0~w@(yO8J28y1n4IkCZHk-mXeXTYs1$bn=rS;aWQ#cpS}BZ=74qnx44?W)X^
zxVnb>_IDg`PCzu_C6+Q#Vtfdh15I+XtCYZ?{<WQqw#TO-^x6ErMT$qJr~X#)$kvli
z&00_kxqJ#ww&efk*Gw&3+@uZ3I~EpdP6iu?ycZDfzXms!r{LGr5$tVG{G&@2kU;2%
zSGdMW@^qVp(SoAX7)PIIUT?75BHT8MTsweV_W-%>mW$JB4NlsH$i+qIWFO+r_Mqr|
z2VXHxo$g-j7G&9)zkrA}elLU`$*+C2BIvzS7mZ7TzT(%^z2-W)7qk6?`Pe^sbT5d7
zPKu_}sd@S7%M|W|Q%e-?&#*D{Jq3;R8*D5SLE3+1W9=i%U`GXOPYc%03)a5JwGn5q
z5ySRIobxr8tW_h*MExpJGZZ@i^~2tY#F2)pJmb}tk842{gKU`ZK0y$}jn*Byex-GX
zx4%m34%Qgc@juL~AVlW4c8kmKunZb>=fC%?=oVX^CEeV+Q|ty>?B8Rj=Y5BYbqbki
z+)mFuMG}csQjzfOD&ZZNuyvM%!hk{~zQrmkCO)rNV%bU?7M@<GTl#kHbXDvgy!!3V
zPWPA3Vuv~+$B`rA*vuEjII`%l>`+IjLmj~%FfYOm^`Pod@4W<@d+ZgH!EdwDl6!Ef
z?d`JFd;gm_L-xP<`5g-Ur~p4W^(P8^@qf?S4IlaC3-5*|WVrut>}+$>S7+&ll;|UN
zwh0zVBwD^mH(Xd+H(Y32hla5<chIH1kIZA?dQ`MC&f5>NiuU<mQX?Fpb)fHGq`K!3
zt$Tj@7X)(@8fu!_^~)CAQLTmENpQi#q8lGBXem5+GezpqtAAl@sp)oMrNk|D*tKOw
zk;wb)Ei?TXXX)88l;|JqE%TWoiG*FGNcfP|p~0I^&yr9y>(Zhys%c$oYHNMfyzhAC
zPF8>Kf)>7$)nCwC4h3%|#kWo<zOM(x7bo?+7y!ER<UX<cek-fIcMDbb9w7kk{Wdei
z-Kf-Z$N$I)QeW>@>MQlEJ03@Ia8QbagDegXeo?5RzQ?6FI7s5)ApT%g^l_QyaWP|(
zBIMs5lW88OG>_vC9+QK26|jyESqJNxf9hv1M2)=5m9_o}8~aH+Yu)|`QLZ3?Vl7Ja
zA9mL2E|N%C*<R%tPn}&Fo)v<qPQS>t)2mKC&22mVQ@yiO+dld;)!CoT|MEhuv;SIk
z;)nmsg~yd5PiBX2{jY*CICqrFhEq4<m#vQy7fp-MIu70Ns9XPGmE?-T2sz3LlSMt+
z(nDC_ywPx{N%<{IlX`ZiuVy#D;TDZHq*#2ytKMyMq)OiZjO09=t&Shbzu^KR;j4&*
zn{JULyb0iF^AcBD=hGVL?H1(GshEQ7eny<#L&ytgAz_;ZIX(4u1-S$v{q6eF@;ffj
zmX_bIAfG$#Nn~K!O8{>FMRO(7-V5IRUmx&DA*_rA)4Igpd6yw=fN09#(SYw1V9-#V
zRzr0|Lp=x$buVflKjnrwe1hF0zY-1iJJE1rdXS%cD>d9f6e0Uj&gK7$>-%6q!~Fys
zt{Fi-CGBLLV2Ui?N3vAd$#UDnE{JVpkqMWmg!7?z`Rg8*D;nbm^OI$&Jr7Q`+$vk?
z`k!h%f~)a|^I;3}@Km3Itoo_OBlC&xR+njfH>4ov{#4_U3yAM7EYn8n-3sz3TOFS~
z)VxqFYMq49$+jvnoo_gx7PT-|u7~sR1OX_nzl!W9y7{Ht&9@yWPB<Y5mllOl!f=f{
zn8tciAWn><$;0b*wW1ly<h-xm3H6<smaD-l;0IN98T_D$VmbVvW0tRhA1DNlFN7bs
z(1R=TCvo{`QnRnH@AQiu{&w3M=)AAqic`*s;0b9G<40o$wHYl9=JDh-OlH6J5Yg9U
z!x1+);<AT|#Ig8Jy+a*G{>jV??ncnj#Nf%j<0oGsVD3~fPyPfge@+_NdR2Nfvpbo+
zQu*8G@jm#Gse4tjkNt!Vh8EoRVJ3Zk!2|Q%inic^=J{(L$*%E=TkybPPHilB;0R@M
zLS?f4CuI7D2I><B7d+55cg1Z7v$Y<We7Es(;*lxyof5(Cf6VKZXc=`uZoyQ$ius`x
zgS#HM^g3_^Eo<8TeBNY^vphjr?rWdJzB@YSu5c$ltBHP=c_bn)=ihaZ^2pS%$`#Fr
zkB=8YLR1$>a@?iwP^r;;`1m!&QkQ#1f*8SZV&;gbG<V<p9SdEi?z3>S!Z5=aO5*Nk
zzw&~)dl8Z2Gm0GMk7j0${jK6YZpSV8_PZwA-brWTn0^OUf}cMP^veR*`f>?Q)4T<k
z=GE<L0la(u<OUNJ&<*DCRRW|BKq~DZmlMe4uIcQ7iz#q%p>@^7mZ#d~7M0v-S#aBj
zxHc!aHhc4PXzzk51jIZ4_;~x?;Pf2aFUnu?ptKR3E@mV6jdnI;qLuXgfiFX%Ums~d
z*PwIv(n@9tZcGgvApv^zqwNbe9??^w@{x}`AeY~JVFL0Xq;H>icx>kRnTaoD;cxrC
z+iyi@;`U*5aLN67UWeadSnKdxA@BmK;==?sxo;~u1dWrD$=eYn+6$kYhc@-(wFc(z
zK~wI3RE@7Ez?(n*C0fZJ<J^yeGv(h4;UD|b%#3v&Z^6`>y^;;`XdArmOH^JQ*a8pm
zqSXGYwly0E-n11Gzw65!bl{E~z_;~5N-&K@dl+lGAGf9RAN>+aRkY@t$&tm%HkHNg
zKS1gDjIrSTAhr%qZh1x|r~KaXWjrXJ0`K03@vo7+n19S&aN9meee8WS);!aG*Aus$
zi>Y}0?Z;x`T@>(p5VJm%e=h|+YLQLf1>-R4yBE+ECwOfEH5D}JzT2rd6K#v<i<=)h
zCQj@h+iTlxYQ7)OvL69J92l`a*;h`IA2q6<;)W)XBfs>1CRe@&3M3)2ua(ULI_lKt
z6>ab;EQa~@(e`&dj;yrpOm9@z?Dzw2RNGEHSKY5pJ>Th+0r@E;utDYCM;c;&KaptT
zmqfebjCEoDE&!W^PE?+|kb*!Q?man^t;*j*5kTq-WH|gLQ#LY`q{F4WhwBse{HV*&
z9+GbIZkc?FQsjGPNLs%Vr`1&Yb~g589G8&dN5z|1SQ5UuGx915#=vD%HPmrfc4p1m
zfP~kkEJ7vrz){6We@<K%JOVO$>utR6gJ4?;>~{+6Mhk3>2DXO4%5IZjtpv8|D6NF?
zvAWkoWd3}STt8*GQQ6tFFJd<(j%UzyD)~uqISuR1!J{1$UxV(|KJl9aCZKGeIK){V
z`J&o<ZAJn2TH#s4mqW0p4%@WStz>U=BriYqMUt%md5(gNBIHJZJoc_RBJwg78N<j*
z9SvIAlBEQl9sl~?@vj@%`IEt~<5fn$I)~?o0ZP_TtN|aL+;sTBt%{bv{=!V5`E2eU
z%Kg<U_s?MNpZ$W$eGN_2?v!<Sl>&SW0r%>Fc%J1to_EaCSCa5d3k$r1!VfJ?lWw5@
zfoWC1>%TB#9jj7?G@o;jXVLB0{#L%SDcHju>vinKtwms`y4z(A&)z54>0*U$4ADJw
zpF-EJnYe!L_X2#00>2Z%_uZ%Mde<#PLGkeP&u3=#e;HyA%v&f$^#?wOLWrUc9KnS5
z-#4R~w_b(g55!G<OBL`vEG>^h=rIU9KDPg-w+rb%wZppq3-R71EwUyaMQLpWj-ZtQ
z()Pg}bO+d$6Zv_cpP@TVUN3WcR^^0u`#o{)x4KP(Z8JJPPdoLPGbg8?XL<;zAAGqC
z0fEoiFTN0x?NIQU-|~9w(MhsSd(Braei!IB??4VP;n!5%yO96>UcfZZ6N3II=||^E
z_;)J!c7*?r&r;D)_CEa;Vx)tp4h@|vY|frDc3*k`gQl|Yk}Y)QT)Z*m=>zXV1poN=
zI_JD0|5_;i<eL1yQt%T~cc@gy=ZFS<@N=q&-1%4m!FmZfD9myW+@%tKTT2sseDC<<
z=PvluE9T%B@Hoyj?)g)kt)orT{spQ>d>4y%GbB9rKP)6pXuazv82Kj{c~nL^<Ymj}
z<R%)eoSewN|2F{rX_jb5Y0tlw<8f9>V~X4upD)Ih69;~)YH+baM=KHY#nBXQOuFsw
zi0@CuNrxjB!7mAnO8fYUPMYU!I&#4LQ;G2)keF^C(7+C~sOXRWJsZ&EPkxrHq~DOg
z<+Ic@Yd*_OvsHoKhiArU<Mn91{aI2|aUI|r^Uzx3Ac!40li}AI?NVwqm<BU%q!g+_
zI)3)dneLuPVlOLG!sw@GX58%Crw{%aFE#!Xy3)vX5)Xdc48wBYb+zHhUSANX8Muli
zyy^VoZ1&DO;otr<N%}>^nrcC<dw(;AmVM^T3(G&*`Lb$_@7|-IH=xt>Nad+}-Xlr#
zKcAL|nr5;KVD5$7{kBgM6+y|&lahHp2EO}~2$**<rbRVv@5#Bh(7iL0YglJO<ebA^
zm4h+1fI;%ts2rZA92Vkw11=iPbva07TOqt-)8nKdQ1$Nm6rs65p08;@LAnn*cIY0A
z7`qTW)A^oz>`)h=72L?DRH>|(Um9dV3~IzdEW`M-`44^q7yhR6@A(AM8Zgfx1$^xF
zC{d_-ZJ!{G`~s9G!}NQ1+0-o<Q~60^0vJHX=FtfPNcRRD9jNRV04&*p;hEuDTf$#_
z0#8RhZe$ltQJ+}%ydL|npP+T{qezDR7MVLG3c~(5D)))}cm59Dr}N)Hmo}D;Oe~xv
zgW1FI0}Z7Q>IZ>#OpJe4m}5N={2lhqhbG5A%af@$eo`Y@zVnkjmztM-QiL>8$VBkj
z<2O$R9|D?UDzR%heaqg1bB>(6?cm!lr??{=M+Oelx7-o@-L3Hdcq_$Ca~v7bP2YkR
z#T0ykgHM1uTw%-@#v>=^W4iI*-I6;W%L>INYJKq&GkhuOvlDX{JP`cd*Z3aZ|Hj*K
zwfk|~C*q5bc24YB3=*OGs%uQ6bKknl(V2)Wp6-OmeCI@V@d><N^9T@TO=sZuEfddo
zO#Dy#BfmeVW8&NSt3Hm3+J9v)um2CcN4In02b~i?Yn%9W%gm+UZ{Pdaoc61ar+?Z$
zdBb85P4UGGJ0~NH7j?ofvvV@L*xxz1XK`Ko-pAKG=DZ)cee&G9)|)&tea%1M^J)vO
zXoK?ZJw6{79nZmt+%Lq9znBg$7^l5y2-^Nw{x6^z9y1C)bHwWn);r~q=m0)$YdsHJ
zkuxen9gP*6%!+QKB3xx^zMMfXW_Z7HZfp%2Tl)<AL*|fQ$q$l?#~jMe)QjI#;F<wh
zfAPe#-#?m+Ma;@TUBLCFXDwc(b9?I99ZtUWT;fyDnC>(lcs2^2B{ocB_P2WtcVwP#
z%W{JZKqDuTS>TpzG(8eaM9pAg2%H-6a<Z|!roN`msIOU0e;ep;Jupf<9!m^CrSKi@
zq{(c6kmA!S@TQ{a!DM>G#CPKHMfPYS3oU3EvBYq4BsmmKM3cD;$MVy)W-2@sH8ZiB
zqBj24-caw~4a7ARu23!;i)VlnAOzc-pM7V>)EJsxsR?JJz;!Y4nz#pkdiOMy%M2Ty
zTw~Eqp>-gTaEVMP9EpU|;lxnXz-nev(E%uIBD#wop)uF4Gtq-j(E#vBG(8kW1Q{cn
zbY<IZfHW4VsH@CD>!M>+)88uW#ntdFC|X6tDlfjFl!y?)F&FfwqZw-HqMt2YZNY0h
zx`J&+aBFMlmfnu*gGO(AOAiEW>AJ3K^P9VjuFYLRqq}D_J`KFN$LQSL+R|xk?A*Ko
z9L$eeEgp{!h2y61o9mE82a>sXgo_;EqM@wPo|(mzz!pO~2GPkFgY=BVG9%c{NIof0
zk(5ED#}3M^m&v74$#m8j!N<8l%o$2}DqPxY+mvxKm!(Iqt#Zi_0ie|ad$l1S3O6<Z
z*@4xK4VX0u#Z)qpi5l_;Jz3R5iK7vtHJKQUrAOdbPc$A4XW$2SQ=>z5sV+d;l1_*D
zm(iV0;xkFfG>0j#77<quJh5D5Nd7H@ESS!4e>Bb@^i!0MA~{$KFnXgy_^cL32#?W{
zO2uOXS_&-y>Nqk2tjQ6=qbOiiP7l%eGo5QDcSiW{-@yKxM7VtPXhAFc?-1eg$=mmc
z@DGUaxgz{GZS4PuBm6UK*?*5C{N{D+zt0i=-y-}$M|jnG_CG4Z&zWQIcV8Fb@~x#$
z3;1t}@K-qCr$xAY*Y&0j_Wx9b%eR(>uVer79(c+|?Rb9P$^MH*?Rb9P#s2b9cRbY&
zJm2VM|8f!TtZ$nLm-XFv4g0%A__+@F?IK*hEp=Tx`=cK4qYn6!z3hLdNBQ6DDE~*d
zu>UVT%Ks@x`8%7MjbfjAF`&ZGibE|6(Vk#$um=Qe>*lU&J9;*Ozcbj<8#LauB?y|B
z(b3fx?CAoj+WVH?zThUKyQK%<!PnK-vK0ot*6V^@mRL?C&6dvYUXx!*izPrhB|uJ*
zx6#aj#sec#76!?IWFiv7t_Y(eh-3r=Ig{C)$wo&^d?C){(G*{-1DT31+6koaiH)Sv
zNf_)+j08+s=<jASZDz9R+(0&$jv7=5)E8i+6X`?|xqQK%9vC&Oab{}+Dx8~pjrJ`Y
zgWW9~gU0n8J$+lC+A!?28NHjY?K4{X`g&Sg`+z{*!A_<NcyVmn(9+l1Zfxl`+K>iY
zx>~Mp>F8|P&<Vudyrr+BYopP-rMEi>X<OPhb#xg6AX`FX4aV4-RiRZYT*uInTs#{~
z#iKN0n8|?wdg>2ksgX<Is2&@P0Tn4SoXw_V{W*Hx4iu?ab~k9wjiv!K$|eVral^Qt
z)hjg&HJYj&P(6dR<`Co$BgjZJJDkMV{=(T@21?zE<Esd@MzL689j1f92V8^58_Fbe
z>4B*54B)efsJ|1j!4(W(00p~xJ3z~Kj<{+>vjy|8mI#9eE>%Rt2sGH4NbW-3!Ewn%
z03KT9&>v0@49BujW`=Y!9;GTp-Bp6<a3~`Njbu6!1BO*_BHR?=a#Tll3=P1rk)&Bc
z4wU(RP}xWD(Z3qj2|>NbJlvEj$Pe0Pz!-q$-WeJhzDZ?g%SHw&Fb+0m&98br6uVf?
zA$~=%bUfATDVdjwdOCPuf}@6e=OhS<3HrH#PduUu6%C<=9@^Q@9ugP2zNO;rbhCg}
zGXqvx_fsr4ru3j_oD`Hu40Ndsi8LdJ>_J=`9-soS$Aia-aRE<Q(@(VD$j*cipX0<?
zD*iytFeH@4#ArAUXzWRZQYzv^dV8QmW)HP6bGSF1NDua)$$(I)cEuvuVIvYv!QVtG
zl`weTWxynOcDy(TPPyhF3rpzj<)^&tL71^UM9ltV1s81PantE867H44trYHZ;jVp|
zx{Dm{mP9%_kQ_=tFNx5Y$&<lsc!)``B_Spw?$Hph#tRyBU1NA-UDvK{+qR9V?M}_9
zoia71_SClBsX4W+>9kYZ))dLx=llO9KlVAv(!KUR$(576?&VthV*K+0zyit5U}>MN
zq-|%Rl+c~EK+i#$C_BV4Ka@#JR80j{f{5}uKRO|iEi!16Lh|0?npn&Y;V7F7Er|7d
zG=i1*lfK|a&yj{-a>P9M)YjXKKpVby!~Vy?ce5;RZMOdCwBVo9EHJYWNP>&fEZ;q7
zu~&Go_Nrm*2;S@2#};oeiWTxJ#cu@?izrU{9Fd}39p=cEzJWcwGg4;SA9UIu+Ppjv
zc*0}8@9ay=2Y9O&ue$rR@JiJ0`L*sQ$#Rp0V+s#8i`R0#XIzdT9zvjE&hxy;*6U-=
z@w$b3&o79zGN^I2A`;Zt4>4Asy7?Q;7%|f+Z|-_{hn1_BCx@R;GIJfmH7JKHW11L$
zahreT8fu~DdR)j2?${ri^DfOLHhv$ipB0ZckhH8n>@53Ah1M3HBe2NvjYvWEbs&W~
zgTQ`MqqZ!oidEZR*{`;Wsluu>9v`QW%Y7hbR{(ZJO2u&JHD-F<U`kguGgsk9eXEeU
zRW#`Ul)qD*9;>$@TlNH0twVFpiVN3b40rSgoNHPd?buO%n^|J#E^9d{l&>L@P%bP3
zUW)*8H#!c>1&t(_do`Q59^L7CRb;Pe-I#NlJk&Ou{x|zw2TqcKQ}Z)8@#L<_+Nvr<
zg6?6ZInbAq`I_G*+<aL4GOFj3=3Ux>sPhu^p^UBe&N8#%LBty!1NHG7Wx-E4fvwf?
z5}*A!S#|hVgtP>>u$1}>Kbvk0dBSyn|I&j%EBz~hTel0sj6*#665pw$Iy^?eqtF`$
zIdO`UhU4H{S7m7Q98iv$2BnMylwnRL9w<8O9DWld(SaU&Lm$p84(`^?L9uvM>-;tQ
zSkQw2tOvg<h6%3*&31+wAHUAQCIJ3*PE)@l$-SUTJh9&fH(>A1K8DF+vaMo+WOh1b
zKV|v5q&tkZf1HaOetwr=8`Xn{MQ`E+Z^j=B3`x8XUgz@2yg}<Ba`j;U<&s~ffaT}Y
zeCv|b#PWct-D#_tbeO;3t9n2Lo=IN8e$eRU&V?zMbujP4VYU=N`f;8{wXxP`a*2>p
zz$`k*OuE|?wBpy#9}S@IgO%xM4(iqT3;=qYYB2A-2AXQ4^H<jt^qdJi*;*fHIz;ZN
zq3@~x9`V0Zv2%^>NYcl;uA~psPY9qlQp_tO(~o@k_c_uVw}3|=*<k_L+3(-sYxQac
z9~Nny%E4ac*qqO%pbD_Sf<U17mEPJqKYY94r8z50smP5#xEpHYt^X4G`Sf{u`U3J<
z>pCy%A4f-)u+3*F|MRPOL6#5F#@FkdEFZKDX8^l6{sC2o!@vh;2Y;~PGyV^<jpKn2
zjt=m@KzVfM4|odEj%&DQK|lfQvs>l+6}qG^08F|;d;>Mu7xu6dh`NCu`sN29hXURk
zf2vmW2cT^*gOC<O0h^Fl1^{u`E7Tn_XdrqA_>oc`{fZw%v>4ibJoG7g)fe)xa|QqG
zMsYOPZ;X7EocP5TunluX2B7c%^#2_ChS72SA^xf43n+l*hr2rL_a}!1vUY64Kf?i5
zH&9OpNFknwc7DNcME2*wJu?E>!3?GFK;e#zP_#FF!0LD5SI$pu_<>F6s~T~%(}9lz
z`cE{t1)p)Fg}#w=m_j|@-Ail*cjIk%K7Trw`%e;I9QJg86^v2}{~QP?fO@W<`$WwC
z<|iKnc_Oz{5BofS{Apy>pKLKy815=^-~+b<J>(7l`BQ2E?L`m(#@(1==Rx7IF+sla
z+}VO~Ucsjn{pA%X{uvtaf1)%UY9Yk_5R0LH=p7k-iyS7;R-aLt2Es0ez72oo{OSW;
z!WWQ(_QluZzhs-feS&Jga+UAv=UD%R^ogS3KNPH{p$|JC)DChu{(qjIO6_36PyHg7
zPZ$NFP^b7rqYEyd+7{d2KPL%xL)1HVxbHt8uV|mZR(Hf;uTr+dKi|4hkPpI~KNbHp
z8C&ovI|~MX9vc3)0<?C-V6K|3r9QPXBOZ24(5}kxKezVd7wq?FKl~Nse<W}p#hdln
zgl)(Sq92FwmnZF`+Zp&q$l74^Qs3#+V6=Rzcqg=!w=e&^T$2-jUE@)Qujmu%>ahEU
z9-AG{N_NZN58QRw*k3^KL-)V^xbE^XcrWj5@P77rn+=%jdCsT4^(XlS?ESU&QY|Nz
zI0Sqn&W;ayh?3LlDAVO37JrB;;D^7o%8s{7d>O$2Yefas$%&8+768kD{xEwgQGFBC
zAigLt#&F*PHE16L%rhJ`R0F~Qhr3<BilW|<cT}&PJXM2@hF&m-UGA&Jhxyr^`67DH
z&~Kl=ykD9;M!uZISX)HueozLI57Ynm`kchOaL?zsuaq5+`=7uY#jmj%P`}+Vixc0I
z&kFh8b=Zqe!>iPLpQL}R?#Iud*PM8?|IWRlT%JBXfKi933%Y)j?}-!H@`0BAi)MDx
zdf)xi7D%_fcI~C@KW`>qn_Wvj55zPMRlo?tVAkP=+fQY2#M`O=ydUfMRRodjt$zI+
zi1s3DVq3&du-{^LOi^z8Sibk6?wOkI=ZQhx8d3!Iep&&sQ-E0i3)aTI=l)aA)Yz~T
zgwa>jCjT*T9~k9wS1s<Hu<UyeB=%;jcHralTOk(_rT~$PzC82YX!G&xBbq<Ih91YG
z%DjqugAn(wbI&z`ZX6BQ9D6ovK!tN3Z;IETUV=S#(-lX@Ph-Pl6oF~~`32YAm4lry
zz{pX3>z_zXJG<8Oj#t2xfwrjQYKboupPR~c`H8PTGk;Pt6u)(6aqHjKa8qnbJPfu-
z0ltp=x9tn=eFgt>oZ~+FZ&=-zDHuC$4oKAfm!CiPBoeg$HHiFgUnbduA<FI0%*^Qj
zO^LV@+afr?do{M*|1%Xf*}bRFuFGJ<p2PcnF%$Y5qP~KsTi=1tgs|^>hUWb@JcdFx
zyk^{XeHpC*S&3|K?=OhEFz^3!u9E|iv+w@{&-J|Y#jQeumSe$F!#&&7V4vgrckr9Z
zqj<sbTZT`mi*Ui}9NVKjxnRNZalu`3(DA*0&m1&(j1m;IwW0jl;_yL9)`0kup79>)
z-J?WtmIM~bOxzuTQuF?A&W!)&#J8Wl{C#Hn=t14PM)Wx^z>ErdPO#pe6WwzOeOZm{
zeOyYEdPf*;|6F0mX&6BZf@Gi5*&v%8PwrP*0DnI*C*3>MN3gfe{l`({VC8dBjp||k
z15!4C{eOQZKImbT>VtfbeEn3V;4`N0-p!l`n4edJ4Tad%3W$Ro1d?yQO;z&ECEx4u
z6`S@uCi)G3?4pmrPvH**z4fxyc`f8}lQ!dLdYkY5G~@T&Il4CY|2)wHt%t{^*u~vN
zHY&pXLIA5qi^^BwgF(8<AB&)!t<DQ@!Lf~PPMIIs+o8zi71z_0&o(KQ36WR%@WHkJ
zt|`riuHd9!0^q)2TcSRf+ZmT|;zQPM^}&8u)%1@evN+58qKU-OvHMFw!S3q@?T$^D
z>EdbF4Li;)`%1-?yRgv^yo_)>(zEg4M<GC*asxi>Eey~{y+MxrtUD-rt?9*%eTB7K
zk9E~L*lW@eI!KCp)gAUGu^TAafsXKuxEm<fA&z&|5azd4BMllfrrP*A*sIgAiVRfm
zC>adM0Ejbes3AN<?ut=vTq8V>4Su8n=DL*+*f(egjcGR=!u@&y@ZrKt8$iV8(7}%|
z06E4L_TWbXAc$pS=_6BL)s%4qchCvTxd!kz{4e!J9hyMH+#lzJ?fqS$eHYv-r@>yS
zj<>Kk$X!r806e=ZBwm2ITY!6&5%%^2K#qMSj`VDZCZdKEWYW>l8NJE0%ZJ=<+R>SF
zae9pbdPaPP=3l!{y8U$C5dP*0pSvno4z2}U-}i%q0qEYt;%pn#WVhRxSAEFOeuE!X
zfFQ<=ZQoLUXKNs6=IJGL@M_7weN&WSc8Q4C(2!Uq=sJC?aE(yuU=->oCi{>V^{A~V
zB;*XY=y2uZ6z~l~Ejw;>oT)cZCSYjg@$X!@NY0((WQOacCEepue1t{{;jHD-8lX=@
z%4S(hR)s1n{&$PxN#$%rWLN|LDlv|DBvxlO$cTlkDnSP24mFZ{NLIxXNY(5Yw&|E{
zaiCzeiT%C9gNID^%PlkR+_x*vmz?;7fo+MKFGg5up@h5(yYps3B3P)Xa*OG6m4xGT
zON8S;@`&LVQ+>Q?_#g=Bnja!vx${9N!Mm=?JTvLy_CGv$bGqMh>AO|0Lti=0vNh+7
zx!d(SF8VeguQ{f8**Nu%(@x){U&S!Zgm#Vv-*tr+ZDd|+5%dm%f#nTur9tiTU}wxm
z@AL;-ZrA#BlUeR7Tu5Lnl2Wh9**(@A@68A9$q83)ZiWYPFmHsP?>~v0>igFB!=6L5
zq`(Cqoi0JyUH?55xH)}GUqSRPLuG%>*YXUFKY_N*+$CwIv)p(w(SL+V4-3u#@TmDi
z@_o5u8?=j4vH7`&8RQ~1LTW?w$10KREvrik;)H>*)V;ZpDzOi4TM?(Gr^$hmsY<iN
zhaw&`v|a-LT<_+b5*{|G0@~$+o&~+UvjU3%+voB@k|)RXY!+Zq!^>0r_1;l<lGDSU
zfAutP>^WucK~8_(z!FKILV55M|DRnIxz|L&#cgoD^==b*pX@QC;Em+8{a3?h!sLi;
zKlTp_f-p|4`0ht~KC9pVT$g5Wf0QIV#7e))(3XR<se4UJpMz#ew@&49{@k~GMrlmH
zxGg-EuiDjQwC#By&ib3RT-;P7=v3@bo_$sHj_RZ3o+M)EBZ(XQCJlQ22`-cZWhUH_
z;CEQQc3*E(ZY(Yqa3EP_=YtgB!Gh*N)PrXvyZoZ#S3U`V_D2>uyob)OrG5QLMTYv|
zOtD)6QuVrc+D}qr`Ie`V@e(w#8r^yrpw-9SVa<YRpRW~Zcx<AEX`kC8Q*gB@<n1H#
z*;xkh(IkEe$E+Hfi#msj1p}$dMO?Y)E*uwxfWV6;&S}}Zz~6#7zsUV6<(ZqTNoZp4
zl^9&I%Rc`@dxW7UQ;j}<D=E?bl?DToRkO@lvG-zsP-1(KlU0_!-C5lhLqf@tBa6tF
z45;|gLI0rT-zs@4qMLO;;8I%Yw7Qpm7VXxak|SiIdV1>W@-MHf=G1x5STA0X#&3FP
z;RH&5vvv>=`w=Jg=L-szMd;6v(o>g9r%P0d;@Ll6c#@)Jr;VtvRC3mKD?5L7ySEg<
zQ8`T2V@=!D&sb9bw4Ax|Wo5{Sl3<891$1+_dLM{eM3VM%_+cpHT0~RY=)o|VV*D;p
z9yPvO!%Yx0w~&ekY>@E@{1_MdKA5G<ljmtR>?cHevV2)IbRN)}R^Bx+BTMK!;O#aJ
zTOCK-`T28k$~v<+>4d_BWpTBMa~bdA%3;ip(EryfoHCP(>%h4U)`Bvgi%zthT#2hF
z?X7%edqYKugGFU~fV)~jLtvWUMY);bSh~uuL)5E<yqs>nSDiY5^kN;L2-UE@m+ueu
z*`>F`{A#0HS}me!{`bRAik=SH1(?*#Xo`l4j?U@!-B<O1%2^|u-(SsV1dIbzj7tbn
zEA}~iq$MTyFDt#fSvMV#5d@tA($v%tYBvda<c3c51R$0BS{vH<iLPKFu`t^2s>4v7
z5sB?bd$3}|rZ(M;&=5ZiLtBYUsDDaN<LJV`wvgzCon0UjGwdSCXDXqLFI`x}8Ss(K
z(k)eQ;eGc-hxuv^$L@)2BfT7xEmV-s4NzV4*2>y~+9N`CgmA@1XoL9o(iO+R8`*KC
zzU5ma7Ur!bpPKz;BtFWd&`zwBy>YQD+N98b8(m_tHO4y&GX^$=9eIIbMP-Ps|BMb3
z5k-3kDW)4Wnx-`^+anDh`z!M(E|9|<97C(@$_i;L8(j4oVL^xuTJr4O(Xl?!sZ{K$
zfM>5D9z2iN&_<)%zdc|>*@639C3ez0D|o9p6y>MIBHUOiG$AQJFrmB(eIYahpnH=|
z-5K&JPk)Q0G(;RsWz(4cooAuv>9?VnGIoVQc4fgA@C{a^{t!T*naNwQMY2aoi$s!|
zuiv$W8eUSZJ?rN%smnDdf`Dn-PE*yOuuI7O1OL6Uz1_+}oy)=$fuPaPUDsv)uC?`{
zRXa_A-OYyh8EK+sc_V;wAdyR_sUixOr81dIDn|Z@JK<Y-b=y6e1S4ECw9rdE`zN6^
z>z~D?C8`}hS?mQ5$A0a0?Gr$A@DA>5S)rz`XvX)3W_R~<7C_GT9K{K|ejLG=JtoSj
z*>u!_Sxqk5etolGUPTF5vsEB&h{nDtd4j$O#haCodzL<VVaouF@tn1%{RC(4K)euX
z9a^1Rkzu8!D+Y;P70*uk)s?=wgKm;-w2-iY;j0iIFi*4Vw~A9mzW(`<N{&I-x}vH@
ziC3?TQ%K~gvHX><jI(1pU)^(+qsEJZ9VL6lf=}VhS<DEwt&CAUz6PZ$`+-~C57xRm
zJ~3VB4qajDcgA4*fI5O6ofO>Z^`~}}cRv5C6@!6V)jSeL4Tvw)OkZyL9pFy)M6+Q8
z;9+AzC?N~qcZ0K(7K9<afjb2DOFTc|?4<Emp9g|Nfz*4Ua70V6Qr>pWBr4x2eMGt6
zUJ|T9Gx)qyfS$UnpjhUdHENtlCZ<&gWu+AA+yGglI{^|Exw_mWR3T|i9kE8C56EWm
z^l(nD=9C6JWVsh-n@q$G?cmmnU+o^$30volY!;rLt0q&nE2smNR~p&@#(CO~(|mz8
zG?=7NsU>^sk|~4POV9$43Nh)s1mjrjpm=kfO?$n0ns+w^dE`52JZ@18hS1-G1s|0P
zI?zwF*5;>0M!!6Ga+K&PqC3fu98D}Hb?jl*rEighit(g_8x~r|$!wX3k9{9GVjN$=
zF4t4!>jKH2cRdzO7c}CM8fj=;1u#bu;T-Q-Bk=*>#%`Ce7a<JJ4dN~P9s2q{YHAv?
z76verHzU0)O0%xcuFn2@2+4-n=OqV!6-V@QLecxzxfN0I#Z6?8E<#h1&{F+6!n^*k
z2yfqFq?sDQl~;>POzhMbk3zhpeqW&GC0)+JTFN(OFc}m6lk99DXS5PWTEl}AKs_Px
zMohLjI0wZ2IMn;4XE1CUjU@(GQ=P=3{0;OI9w(5DWCEXb)G$zy!{y&0PYW=28AIlw
z_44BNvR@4aiOrOgr^DZK=Snp=Hi?|@DjgkILq|D!h!0DYam{wY3N6e<8E(=Zz#GWN
z0K=y?mLyIp*V^uKFqNJU<vkiQeTW~ywVi&AHER^8q3+(0pEq7YBckrY!Nd<dn!lc=
z<mUaS`g7&58~KF;kwYef@BV4Dfd8cvF(+A_?A=#xcl9t5`b}l;Ktu1_WZFG@5HW3=
zSI2QHthmY<);|?Q<%8~}Kf5NH7DBFa-wYeRIq9Ef25AZ5{F_zKxHFowd-?ip2r!Gh
z9w*`GQ?iMEDpJ5p|3Cs9G@U1I)pgmr!>@=Q{;JH1z^H0BSh0wzQbwGv$S-35S0JVy
zh5Fph9zy5u4KTx!p^$BH8o_&Cvy4x<jAm+z$jSAMy;{hN&XLet8jdfnoR<EQqQPLK
zFv{FrIJ368Hn-!5iqIPs>(<cBqy!5bFcV-s*f+F9qp3!j5)QPGO*3fx7?`owG{xhf
zUNPvIqH}x|z*)25JMpdMgr?=KCp}9T<^g5!8$>=<+!T1{U_k{s+KfJxsbY?%Xix_b
zuA1&0C*`mAN&$IXq2+hQZ<<_9$b*fdInBjs7&yd7Uq_r0&XZ<!DefPj%?9R|hdb3+
zGsoXGj07{~Sx*FB79^MJwC#%m7F9<puGTv8u?^?7kIrH4)8!~kIqqvt(UIl8o&5!D
z>q+TwD6CV)#FzYlsYZ3=CMcA($NaadqT{wXnAy5bv390Fxxyc{$v~JMCz*g#q(>Xg
zDdj{}n`)Rrc;f*v41|RGz`fIAyjZa7U9vX~h_agqiPaIS|JR$b@<%XQ1gO9AXCiMh
z(I`%J4~LC=5m7W3ZS6oWxLVtvxGb$=4>|D)*LIUup*j06odP%GJN+&xwXu4B4~yC1
zi4nHLFD}S#Axydc0fg?KUASUNW4C;0RmrA}rVbw2VigsD^TwgU$l|(tU6i)j?Mw=+
zh^xZs8=+t^;+p$c?z*L}p_-|AbLHfd#qxtb0Qu<!{!QHAzd;6-lPyNk1wxWByqrJ!
z=*?cp@j$Ay7%ixh`Qc^qP&vk<(cc9|nsS^8?BGOKw+HJr`4Pr@Csy;Hv{7x0AGAg9
z1U*nyN|HZX1)Q<YI_@!W5#9(+$?nx7n2_mnjEuNMNL&9tiNRH{+A*G5a%!ZHUFI0k
zIb3bV>CU&Q7t*8VbS|#soAnlW*7q~nHL^^qCu)nI29yiu7N8=2rBAaDZ{_Y<E73H#
z-`RLxuu>qqrk4t|x<Wf74cTSJOmC`2Dkp5vFmqYar8Teli=dY!eVDb(0A%L5NWJ!a
z?J!*$;Xd}N3-wTWEyY;!iQ|B;gqGC&#q@d(K@_g%l2jicQmCErE50t)XRO$hN_!U*
zrh!b9dE!kT%TJ-mo-U0`tI^sgLr1O4+w)&~sQ6wl8rMlXcRxW;jQYbMqXsQYlG!||
zTxyT;ekjXnN_H<oW^PDuv{E5_MJPq#-9E+OYOa1#eawb&itrnvk|qo~6EF|^yXB+2
zP_mwP3hh!Up9R+%DS~!%a>`!`UTbvaGc!wcy7Da7<|@lO?{*}m3sU)-a3NEvcE8Q$
z)U5|C7Gtpc@?ekXm$O{=Q^DMcBxPi`tht<(pFqx@=c9PxvPVdS&42~ub43<abt~1a
z63C^Bc^+7WKZS0ovlGnaNN>)aKc>u?Y?QGWsPOREN!HxV`=$_*o{b1N;W81rd&;85
zlc~hW-F(FBVvIKF$s=xD<b`c%YLp~tGw@Dx!?AF6;;jp!bv>h`OC_!t^IWLn>743}
zxDPMWZ{sHgzd_Qa`5oMq6M7M9NK_Zd{M!9cT#;(ggyItFjQL}(Zaxt~6&@WTH4ud>
z%Xfe>Xm5jj>UD1`)w|~Xh!l9_EpESRnAezn_k`ygp>Gom_ByRE3)C6w(JGHLcxy3Y
zIrNyzX$c&(FFn3R=CMJM<tfp~^h^;k)BRGj-fG^U?@1bJVgp`^;&LLevt78%F-;4^
z^qO|h`;x-txH}@nf~%g3-mRquG&sd+aw9;#a2@Z)*C=(XgxaPj-6YL2w%!&UIIa6p
zK8K{Y&KEz-oK?!Z6ifXJdH|JXD@6amW?i;oa(4D8hlM}U)XqMnVgOe;;F8n|V@O6C
zIi9f`;+^w^Pa{&&#WsLP&T=9YS5K%|AI~t?{%o&jMc^6L<7ORmg}iCtyRyQ34P&ma
z<~tOj{2A>E(hz0~l~tzzpNR_Xv%;JRa^~jT3Jw;2EsrG%!-SQFmmbZsWRQD;ox<;B
z=)LOlQ4aht%%3}4(1gqE)h&<wXY-x}u%5VM$a@z1*fEVsn+VNGQrk20gUw+fKY!9$
zX%X|P>zA|JULq@hMb0_x2o2-T13j^5B`5beV1~d6Q+-I-6M0&7mL<d1;3()gEIhN(
z;ls6P#yMkHCc=Ip7I_{0vpD7f+wsCN%5Ybr)b0nMqORe<=^9~5E|bXg$%KPy1Kefd
zX}*N0Jd4$<FlkHuvv8Y8fNSS3QZGbyQYtFdR1bqGMRz;SL_t;>NQfJLl(<3}!!I>1
zh`mx}jxpFlcPrs9iX7il?|hKB>K&D$v5v|vM|XSX`^CWIFFtOy`l5NH2bQR>9>rgQ
ze?@m|=RrI)a0A_ZLt3v}zYE>C_U9SSSpRvvuMP>pLyArVnP<)_ok}&I3p+%Vr}mPm
z|0N4uLLd&|5Wf?To>^W5FMFv{6z8>quABBBt0lH_*r5xXeT~AP5a|Yu11qw=BSAm>
zS8c(YZk-H*+P}EXPIRGDT-Ft2la+F{s;CY^OO{LLaVJP89P#buEfUSCTEG#>cA$rM
zT!_k9>epIjSxg@3-WamInY@S^S%@AjU<m?q106L=@OG)oH@gjwE*s%;(=O56R#UN^
z9FLieWQ~TV<6hNkmJ_&Mjm#45iPYD{2YAE_7xwa`j1ZrFG=0TmyVCrdA=gTUi(x&i
zf--!{tN<<c`NzUg;1UCxAWwK%Dz9Ci>NdeE?^}c;f9+$JQH1a4#S8~62K*QKdsTB`
z!zMgexr9e{3DK<i->9$p!kvTII9_)Q6e;AEDvpQ|RQOT*C44vT?KS;O4-3p}Jjxf0
z?R<GTw=*#E0#A}re?vKkBm1#B<4Vm0Hhnm*GL@e6xY2`MU`Jc{-}J^|zpZ-%;FI~R
zbaoI?t@cq#vUeH&I<%b8t=cRy#Bp&!?xFOmoPC3u#bjaO9F8C9DT!;fgC5HAyJYF-
zbefKH(vX@?xEzS$#33prhJ@P2eEo;8|6rmFmi)@pPcbeI#jU>IDctb#{yX>*OI;G!
z%V2<mU14wTA5_nKaL^e_r{$5r($64($d=VvKC-Hm7JH`5GVOFc?yspz=}|nLAWXoT
z``{kUML`cio?Kv7_t!pI3+`}Sm5qJ1dML!^eEg_0jp~huCFWe}&K#CgzV7F$s?JV=
z>U}P$Z`5gv(>@{=ndD9W3FsSrx(B^TdkmwZcbI%T#nZ0kf*YLoFQTC8((3fsdZ`$m
zU>n}+!YY1@Q6hVTyQF{SX^0xRQckr_ZGHNjjzKw7))i<(B_67Gu9PHg+&vyCcl)?!
zPe(?h9hx4S_TRKwpuX^R$=+$hr3mi>n`vaqXd887>vdHJktl&p1dhU4Z7jlmlY6Qc
z$Qc+gJ2p)l<Ss>lM44<O2AL?1)nJz<Fz}e^-R5nUq@VsRC?h>RV*(rm8xX0hJ9z@`
z?df{o0SAzi2jHtU&k66*T>xQ=A)5X|{vrnHgSLKm`W2%Kh(HPbO2>Y-M8AQNbJfS1
zvw7~+@!OMUU03*1J^RIo=v+Epb@p%)-8c5yi1Z^sx0keSz0O?83;3Y+=fblavew%b
z<PYo#Aw1VVkBp@B_ilL{G<Wp7#$1$yKEhq2`&}PCgKa}EjTEeN<|G0v{#_=H@LHKp
z>x52>{JK-P>NjRDF*!iDO=w+~=ME9v)^Vi+jWxCtLrYu_`eqZPl-JUwasbXlClC1w
zJT!@X`pKqrIPo>OX)uiGMK96jjmx(16awvtJFX@gUg6Yf39W|UYiwbmH9U36FucX6
z6pCn3kVRL9?(2~l3?C`Y<8Q%hf!CNV8NIlRuaMfW)Nj?o?Q7?`6s^TFaDGD%V$GaU
zf}yzU5(Ead5f+u<zRH_f@jBWqWAk+y6~aze=)Ktcl{0QZE$!@PRNsAdHQW^{MR%dN
zz4Z8hU1u&7ZdIwR`u<LU&9F5VWFr5$6a5g3$DE&D%6877w}0R%)Bc7PupDg6jh}3V
zM(X%jqc|Pablv9etx=;A*>s)Yt}sJnm`Ma=?Ht(Of)G+$)s}CgNz99f^Tg(WLC5Z=
z3MGh;u_UAlCR+cB=&}c2$ytBeHKCG&fE#;_9~7Ft76v2;TUb#x)9AOJpTSe7?;Mq@
zz#jVrU<_(FxX#drsSLVTYkHSE>7xJFU}RzvK$Hg7QB*>|+Ya#814$*J$0Mnj!Rh4&
zU~JFU<3uwZNWNAmge>jdHnwUJaA?)<gF>5-o!(m1dWGrm9-aT<{@Nm4<ui35A-yD7
z*>WWR&Bt#RxhAH{+6ilN;q2%%<;RxuYQEuDRiIJ}SEAy~e*9gk!Z}b0Lz9nTw%h6W
zu%rBilOGo{Ev1emOd3aqmLl>o&iMv;zY)ICOM_u^Bb=;cD&x73BtSh{OwWx>nyPqw
zG{iC6`{yb2z3pb`e&|rUTr1M6*WGFvE42C8=H538i6LWXIGT%@bzu}><0qMEh^o)K
zW%rZ4pNmZp{lM_#S_aY;i0@=k$C;m)a}n~an91>eII6b%0NDg8_>&(SH&)4!*qpD(
ze$xNz8d8=zYxJQuKY_^uTtPJ>)P>8bes6%3&(@!jBPeNUKGNF}*)&v;V0F9H25;;O
zjOfvKrMsq8<AkqaHebBHMMZixX&aJ_QRJ3r=#U_Y>*(Kw(^3-(nfkVjV6Ztr^ks7q
zcT17!`&Rs3DCma1)2oEgm6kGkoNRdA(kWXteP8YKX^8iX;yzwy8#c!B<!jZClB)K?
zdQ_4Ms!E;SH(2Mq?-#r!qX2!t$MIhiQV#gJ;riW$7x>3U4ZOy9|ADe5<AbZiZaI4j
zleB)te*djc_K>z(|IX*wDRwcWv)hS6dOTz5y1qbWGnVx7dg&XgmR<RC8%YnE9Dr%1
zlfrD(RHpvX1m{x2=!xcsWuW*^nkiPht(8T;FJ)y{9r2P9CKCt#yD`m@J^d)pO{U!<
zMl+X<qL9ngSj(bmUuXj0B2>#D_$3EVGQlx%eiCiwf-ia`bvobpa155`Mce_NhlD_$
zQ16HSAHer2bU0lBJa15mNlh$j5ADihmevr`mTrbUA?Qt_ud~n$=gnU9*G3fr(bJi#
zRLWTv)K%p09L!#GaO)qv$)(XIBbIdC88fUm!+Pb*Mr$`#L6#@BQIB6eLT%4hElaQE
zP`A^7imF^5m9&?6rs;|zx#21qjneIO-Pn;saz~ry6T&Rrv~Y6mp9n!rQ%A<mB2<$#
z@2XW7eqW$O3?m8C>S&Ba>{cbJyR_vgzCgr6aq=_2p7kgIRs+@NeiyO4Yw6GW5Cp`a
z^BRzg>);I;abXUnaS{ZU2TBuA_#{AgX)9!hl)a#^KnkW!v!pUNYBJ4gQkpIwlHb@(
zpykxk@aXs{cBa5o!ao=U|3u}jQ<gzz(Dv|cN>%5yi)o(n_R9)&Nwea%^5*x8a#c@H
zK2<Wdapqml5WaqDt09SYlE*c){cLkQV=a9(Zn%u_1_LTQQ;g+lygvw<V)Ubv<DB|^
z3bWg3l6mb^Ra{g-qp2@>KjHC^hERAly6<F8vo!FB!APSIf6*@ThW^swmW_%HAd09G
zYRC!dP3|*Iu6@>sE;01uL^vzZZU!!tjoDI6NVwNV5xNVN#H(Jre0M&juM_9$mB`Zr
z2(h5}!3vcV-ZsWGMQURMY$U$M5)=J^a|zCSh!`?~9~kJGsAI&iV%8RQe2!SbX+|^F
zkM*Ar)eo%-KU0eb>!#(Fawsl9c)CHsLUJo{g<!yj+{EW$`hMA{&@jfEJazp^$D%o{
zUOTW`IE-NcCC=(V%&~!dy46$}vO~+1tAXDNO6Zby`vv!Nn)l8ZW}a6&K;dKYTc@q>
z33SNFjiCC}dOy3ialaDKZU!f#t#^+?BWp2sv#eMtIlUz}7@8v^Aq=6p@938>a&AGC
z`&&vsCv5jC&kvYWZDa1be^f=6!;<a0m4FqR0Zw13E4F&)8hyl2p-JP~)a)varHe}<
zJlYe_b(PKXUI$cwn-n77Pa-Iu1A>e-rGR5DAEUl#xEH<}tTG6l@S!RHZ|y0p(#lCl
zQh6L#b`UM2uSbAVXDS*)Br-64Rd~u{pGSp{jgxXGn4P^&r}%Z-t?IoH+?$wq669x0
zO4k@5BmA`n)sRqlb*CN)#x>%tgb$&x7oU1?yIu7KhpeF5AiiN1h;FiCLQHFSP*uZR
zk50w^&cU-tnzR3=ehcV@4T=F)jeNtLqifBMw$!SzYG%o_dh4f&sPOq3fvwL5pDO{e
zx7j6BH-xy`2BFdW;XwNG^#i8ixvSS1(lQipizETD_RnGn!Wa)K)ZnpiJPOr0ju~`d
zk~OvA^)YU0sE$?vd*RF!%S!e0sX{NFM~*&-lFF-yMYDBX;1o6DA|!lt-=d2Tm$ms3
zwb0ah>Or^)xV^YN1QcMF>=P;ONBdY~zG`@kz6;86+__rM90W5Trk#9jPcEgwgM!$}
zkHFq{tUE2x!f^GmQ;GWT6c}U50Ro!PF|>E9y-0Cn?Gs38qUDwhjz#mr@p168&Eq!J
zSWc^9zO*wGc*R`(y7Q~TX3fR&h|4O*(aGUOr{}p!aoC|jsb~t@s0-1!38lxL?-~a>
z1zH?$RKbb)m$oF%Jyg{@a1FtV2^Gy2Ar6v=aqg)W`!4VC+}E)^mw$fJR++@t9!v}q
zw;2eO$$P$I2ELr+${X-J5#?sAjnTq+giMF$g(5~`-?Li4LYW5$AxNg(6La7SE_>W|
zaHAQ)7(nmi6^iQDD&?7da=oKqzw%uCr1T((_Ye$T*|Wu(1Afw;=Gi%3=Src@5k=$?
z`r#3oqSK0NTP%D01f(+_s%#1vNVZy%favpbKw4tH;IY2Tz!#h2AVZTnZL+(*&3$?y
zu-~H^Ak!WmxHii32If-cn+%eNN;{-Y1*h*mhQ0&Q3$Cx_Bi;hPBRuyCZ3+0d2Kc9r
z_QA7*Ozn*kdvVWCc|`uK3u=qk)$Dz{J|Kr2jSv&s5Yb%md}Q!ju;(ZCT;9Yb7Gz<b
zGk=S$J8SVo+$NNC<^(gu;?|6e`u@zADl0nVkA!bP<9|w6%*mNQ(=SO9+xWA+=>sXQ
zKA|eZmtaa>27KcGiI32uw>(z#aDEwIcR5s&x}7Gc&z5Is#E>u5uxn~Rtu`9QXX~LK
zlKu*<%zPvll+@^N7Z(=T@sS)^2AOim#5%JlnC9K<8AXfd_p!To*f@`lmos1&uuS$^
zEzFPy5-XlV45i%iT<*>tYFHRuQ-Z#;@0CDZVa(jz)xevbx+0kBd@peWX}}&o(Uvd%
z&<6Cfol=Zjs<Y|cDe`cTK~<k-5C{FOck0>o{~q^8Yll7Dx8RK^zv9vG6$5VYzHk!{
zsKkh19F;y)_&pTx^Y7Vj{IX@uH9W&eF*Jvb8tXGZq-H6*Vp}!?8=Egh&ew}kS?fn^
zN4h@(3u`Kx<e}D+n}RcH4Kolg53fd<3%OoHA&`WvxxVfy;|9tA!c7&}lb!RZ*4x`m
zdjiU8L_(Mh-@`ocXYb|K>Mk=HM~_6nGv6AjH@apKeh1BoNMhEmM0U;I8(lr4JU!lA
zUOj1ay&OFbfZmwqp|c10B|rx*I6jy|52Z^ttxS}DWL8$UgSMv%xbu?@Y_rfkNabtK
z3M4N6S<^YE%X673&@&E5^0tyooxHWbRmwE`DmWco)o?+KF6Jr+3?>}Ckg2)VP(NJF
z<z~Rod)LVJTa3q02X~>e<3rH{L#q{a9_tB}lOizcxvzv6UQs)IXo&4Mk2a4deM?nx
z`qm^b5a;&!+`1Q4)!EE^g$aPz+}AS>+S;y81yP(RScW%cTwhH26_4ussx_iY$$~92
zN#YB_rj}f2(L8(gdg7C>*%5kWTbG9q48dQYmIuMr1>>+1E3uXQ2M+=xWEkS_gfQU7
zw2NUPDvW9f|M|Fp$X&3q+RAE!{d`e=V4E7b<y+90nl|WOUwL13J@Xs5ucf7{`Vg#W
zhnSyHkj{ku+UNJL5p+#!U_Z%MJwwT{Yg3~8!_)`K5x+1}BZ!9ur108@;`vQ|DL;MW
z9>rJ;@GXt{_}9RKxdsFzp0Aic%tZeh#_Hl$0IJ|uQxpR!?1KugH9H&As|)9v@TI`v
zwe`d!SzY{;5|m=EtJG^?69Q{V{s@!^6E+4W-)-m3voc)+T=?-3qlsrB<_46=GU)BB
z+mjdUXGg}K=Ks<a2CpRUA<@Z+Hx}xy<U_HEkMO(Z>HXQ6PR?SiGBoaoXDw|J{u-wm
zsGs;X7H&9W$Vg<=M8r0@ZtzxT>1W`Zw~q6&F=51Hp}tH675Pqy7k0kJ2J}603E2rb
zVZwDS^zYN7U19i=RCp|Ti9iJcSWyuOIlUsmACbuiFi*NZ{pJpN@Fb7ZnaF?fdAvq-
zsb00#!?;YoE#MRpB-LFp6y8PWrO=IzIqMK`WWeF&!9KDOiLdf0w?N`vS}OiAj$@Gk
z_Y=S9o_y!PfW01|H)K~Sjcbfc-dTF_eh^MHys+D7{JxA5_{P9~8nO&qRb4%~czW`J
zH-^R$qn!VT#!^v-@Rb`bJ_%tIlZ7kTvmop@4z%_w>qlOk%HKFQ-5%NUP%{xx+aU#?
z11jBi*e`dJiBk*>Mvi}N<L<Eff~MmKM)m|Or8#FK+!M)>xzpjfpe-qLHA(n{vAz<i
z#?KTk(2cors(Y*{XxLxf$ox=UC3%9E$P;z*D`2!#_xp3!PI{uL#uhdo(a4;bI=Rf*
zf(om4S!`-pp>@r7Jn`aM_ylZo<UZ~l;3$z;C=-4YiIjO1>h}hdia?|6Vw6v|Ic-*J
z!+dF+D_B-r*Nw^boL`8LM9Gap3q=VW!X(L6K&E&+b6SsN1XFL8-_B4j52>_`m*?Pn
z(i`Pyi*ydYbrjCd;ux<CzSOe{xVhK6{+&})&}anpXj}I73!0_Bb2S10zA=Fsk3a5;
z+b1fD!lN8ue4Pof4|-I!h>R9_;jcDqf1wCNGAefU7M;OKaN8=5j7TTnpF$(D)j6U0
z>}EpE9S_>XKIyiU-p>Y4`sHp_9Q+g^Gnb)?lgs>Wpw%PH)N8Y73d~XY28mZWrk0mx
zKNN}d#bC0@z^MUwP7pzy^~*J)dx8Alccw+7$%b}3r-865H5j`sq~}D**4EX%aX{O+
z%Y>sX$!i~NG<7T+^ny&^Na@f>rZ)qPG|YozQYvetzYaFT?_p6+gAAt1z7+mncuz?J
zrKP^o$((?U(4o3ZDQ)7tI~F41i*m0?Bs)geDty~q@nF~}kuOjN)av#LgO~;Ie3Ruc
z2KxeN>piBDnK({Q!bsMdG}ewg0D=?sNP&g+>jT*c$j6>vDOwD0(jjzsq2?BxJxFUn
zN6gzKn+#Pm5qgsR!tjtS2Ik2xRG+18&po4>yVv(VmK-VvqbHC3$H|1DCASm7iL}R_
z5-OJmXHr&HU+F;_nG*;H!RX$9{%rr*=V?`2=Cb)1fH3bTh&Zm^CJVPG^M%0@;$#rZ
zlUL*9>PC!{{S9GE$rv&hvgCUNWV_Cf3_d{$qW`u`4bNE&@7=z_9U3PlkjZs!55Bx&
zbI0TpnWAt`%3Gw9mk2&S9`xC9?%L!%A55CSc&yNxx3Qh~(BGeU&=(I@{p|IYb(NbB
zJ2amPxdGYX{Z$Y0!Kfy^;yFLD#{%Bmx;ZRTNkrh*;EI2}mt$UjQgAKW6O}%xp~5SK
zO|u1<fNn|EuVq9g6oyWtBv3(dLNBd&>?U;bx`yziYZ670XTmNoIGWQDd1AWq1~dNC
zE>Grkx>yGNu?N3S{RHgG3G0b<Y~BEO+~CEn#&CGO$Aistm)Y%OjO%E9)G84DY^Z{A
z`kTrx!xursEmQOqKi{GCo_mCHD*DHzL$oH?sP|8erz`D{$$>~vxj=-V3hwAR&U%9-
zDXOH2Xxm4SBm56~aYL0Dy*EPND5!IKi?O&bd#LeFq`QgGnFir$s%P+6#E(7uRP9r@
zEehugUR3U!UJ~m^WjgiMM}sJ*oSBQe+s^29%3A%G3zGMi9%o6lTS*6mJkJ*uQwa+<
zLna1n>49_aG{jm*S>5!m07bK>*UbJ*J{#wa5ET($8z}Lkv}k4*`l#?23~9g*h&;ry
zw7>mdxH*DI7@E#5I@^w^1^~kfB1Jc70pAn!9Pq*ri}^8Q5D5jzew_SB9w2};k6WBb
zEe+YDVnUN9i)*7E^!!>X4Wxyj?bl>jRipv>6ed5)A&QRZ#mxRwi{9aZBY5OM8&*SN
zq9VYcw$onUG8ci;lM`BQc$m*$noqa45=gS4>9K&DJ<1Y7!amWtufpXRt|ezd0#C>o
z{Jf=1IZFD*PB&+<*I$kbmy>k<mx+i`>i8y=&YzJzE~+dQpYAJ9lnTeo7p}3}Q|V{i
zcC~ew597!VjHWbS;R{Azd?L7M49lrlZIUSlGkk|eH%M>cG}!O%`hp3p{Z#mmzZ>C-
zDa2*-EB|TFkFP}>-^5_}l|0Teh~CY@H-z!SXRu<sc-LofE=;G6Wv67ylJVh=nFY-E
z8$Zwe?gD#?ExZ+2t2L(xcfJ2>IDOilnqu|<HyfGMrz+Zln+^1h13JK+iHXDDiw7Cx
zL!&*prT~ThK1x41VMq4&CFjrsS%}N18>;c!LrSq4DHyfsffNjC0k4#OgeLlc9Y-)z
z5ijq}Q}Ip&i|vJ~^2Mk#(=eN-ENks2?Q;)Y5zChwbr%Pi=(Y=9`W#(iAh}yVJg9T$
zdh5K$uO>EFIpJo5i;A+K0`q{oO7RSgtlH_cwfm1EweaEIx27P2#Moanb(x3@P_cTB
z7}B;$ofxF(?Z5b}jcX!pb5aR{tKcwucl0_q=DJ;4lW~V`Oy0FYgECebFjdgdWw2Nj
z8q|Q01M@}bYN&Q~FKe)*<sm{!Su6M^A~Bd%=(w0c6X;cl_x!Vq+2BA+Xs_wouFT@;
z(z8AFpjju9O}PEm$c^~;XEjg!RVK^iN1FvkE~Jo^LKY+Krn3db`X2<Y68c`^(anVI
z1fhEwAKK}rK~tGLWPtGy{(=PH-ak)GS625+n(`O-fV1cP;@@V@Fp{W4fN-b`Ly=Ka
zA9}Nwpq7pwjbN>g+3vp|73)E%I#Y+^puW3@)6kFWUVm@!cA=y1>sCYx)%Y*4O{Z@>
z=OaheP;Lvx#TM_GVqcQE#R03>G4bUi15Ph5i-*V#@|z`IA>lS3B+uV$*Lf>X0KG^K
zdl$>E?UF0Xy}<HXoJubVR9?=Pj%06NG31v7MpkQF2|WBieE9I==3XB&y<~%S&)<uS
zA3yip>2aStF*GCk9l!c@BngvDSZ~^Y{e2KexINw4r1Zl0EyO*{i}<64B_3TO!p2AR
z%as3a2s85V0RlEU8lVWe<@Qz`|8on>mf5)e+{vDX?e7AhL)PXAs$Tf|fuBSd<7e;4
z5rRoQ?RE<aEf~V~g<@ZQm)0!DGr)r*DyOiYaM-E6zoZ_BWJiUK2eHv(J0B-E@p%Z8
zAeJEhV+`S@W_@E*r?}>YsOX@fr?F45eJjcY?X#N=$H_8$fg~yK;W_!TR1Bg{Elx&|
zcafnELlH#!uSNVbjQ8m!^s^Yrka$bpL;qh${l_SP$0|2S9iDzgtS{N9(s;|-C1T+b
z{1Cg%Fs)UEupPq?hiW(!YlA}z``F_P+a>5q|LkDQ*5ErzBYRRs4jB3u7h~W7g6mS=
z{SX;>WKcaK#SpGhWIrf5*4wko!Ik=|v6v$wSQ|aRPLbz0p$Nm_N0|MRs-o$_+ixI;
z>F0co>*pmKr;MX=zRDi$Fijp8>xdb$IUih*srA7ELXxH}FCmB#Bk&=|hb5~^BP^H<
z-Dk(F&_Zc%ZPt?7LrSPwao4_onIdS7--XhP+golKMWfgd&MmI(29}m6FQ#Gl!AVXC
zL$0dlz!N9aM45+83;)FpF1!4)onVkFqoGVk+;zCxsOvw<BLPrc2AbJ2hP1ZAhp!Pw
z>n04k3Zu;#J1*U3@^%pk7%``M7;sm(4my1{k*A~j;Npb$m}CeMehhjri<_qgEfG9C
z{C+a_h<t*~2@MAeWGMBB@;o0ZpG+sELDEKgtbc`YCwx|1z8KzKT#sPnnXwz&vfU<|
z>L;5TCtI5LcK3P%YP!%pMzQhlZ#H%P9xqJkc5tz+<X>*o@vQJPQt>Xzud(_moaCIB
z^f)8)ZS_d9&thuCZ8VxaeV)6*VXBqO@SC*u+teS*CBtDt;P`3%)!)jus@SAg{{f3m
z0sh60Vlqw@*Ees6IYw@{-i0+m=D$^&ZoEH(9_n%Ybd!HAozsR&IU=w-Pj*&q8o70K
z;+&Q{*+u)B3ugus{DpPkkTYC~UC7#cM#7<}<&^Gg7Na00qV<A$_gj|z&GcBiq2iSB
zr&iLbe|;$h9Q#nvX6jI7?bdEz-?#R*SWnEBBNG9I>z3TC#+1+PK<&VzS+l(6l`uWk
zqJfAcF)+^yirKUA+G7ybi>&*mvsWpNP-s+B8&92k2Q$b>JQxDbNiet)&WAJ`Ge}ib
zrO{8s9nLT)RWVI%C!7$#q^_WC`S)~yBSAkc?<|wo6Fw)AxlHO67vYgGhzfDzs46Ex
zvw2-g<JWw!>OKU2rYsuh_Uqg>(m1bPg@s&~n0Ti?kl?<po>_5D89M%XICrdv>FH`)
zI9^4hYvGKK!E?7$Su(+Mo20c~OLI<S0;)26b$m|q>{v#XNjVUz2g+SFEak7YzQ^~(
zH!UYVxxzCJ4g;G<+2r_D*Nl3t7Ve72iH~ei!~F;6=#}w;atq~oFd}tt`&-|N+uCqY
zr8X077uYcHwGiB$I2ZONo=17VeFQ@1TiG-jGK%|Qsd*h_KOi>s%v*Xqu`Y@+>AILR
z^Rc->aLie)7HE+*q9LFsV&{qf&)k|d@UU{pL$$}PX9TvSzu=aP^Hvqj3cbhn`v*Y<
zkE16?ySsz8Or#jr;)(Q_wUo!+uz$pA=fqO6-T;CXfNC<KOE9PP<8{>sXkIEnS9fk~
zq}r*g&_&s$sMw#F!<~LmjIkfRLaW6xX~aP6mSC@F41^0!Jwe^a$hd2s(<%&og<uGl
z%E3IaS2O^83m(c3^CY(AojebeyVmsSL86WSEP^jS8ReYNr`-P4m7|{}7;-hN-Tsr&
zV{53WX><`MnvpT5;<IS^zF?kn7}Tg)@4rl?m3JlE)N^!yW_-Eha!dU<6~7<9S-p@2
zlRTFsqM8XEjh@&>aa2+75l+`2cb%Bf{!HRtA0=%gmrM~!Q|sjfusTy|^t{Sbm%i^B
zf5Qvn`6o52mpDnKrlB$Z`PcnZVmtaXzn4Z|kFoIQeJ1f@05%s!`TZlJ{<<gpy=uAD
zPuuWzs{CSNrMycwpF!`v-zTdn^uM?y8q)I02Q3I%q-|8P;s!}XoZIETP1>d~BA3w;
zqG($9`?|-U8Hf6InWL4=r_tpR7VUS?-ck)N1DUOD@0`(Y+!T|UiA#@L5}*&gC33$~
z$RJFNzkc_ACW)*XeQL+<NKc70pklfMX525UY3xr>mxU6o$)bb?3gjC?h9Fk<A;@)o
zej+*TsnR8b%J(ta^d1|Bc7p4?1}PV<di`LPF1Wc+H&S`I(Z8nih2~aAnl~&|I~dA;
zzi&PzY9eNBwF`6)0jTLQ#_Jqjutp9?>6+pUX01>=-PTErGzklzpf8o9BDNLd9sQ1d
zKuueRDOp3>H4aWlmhlAy=UUAVu*TyQX-AT(wunVpw3yMs^`IA$qO1O!SDqJR9UR6c
z@iHD<Cg}2TnHO&W1_)9OFX8-T-ZU^P!Xo*CApH7MCt}sAEtmPuNIg5cP_2f^QZ!2A
z`wdU>aY^N}G0821MimT!`1K?7FVuPE%2f~x@Q1mB09G^69p7b<FR4Nl^Uix@NeV_8
zb6j^6iDv7>K9WAi=H3r(>lzgcYKp<7163Je_o5(ZRD^9v(`$5J8)&41`3s*gC@4Y`
zh{i!?b0xlI&zAc$7vb*i4*H<L#>x-$e>C}Eg+5=?;~;^L1~!Mcg2R<bEfFw#8u={v
z`7X;FB)%^8J|YYzrj{r{cwr_50j1G`r5uQxt@0AOP|qwxo;?N!a(U~<FZbYwdD)j)
zMQIUZj9RH91NhIBN~u<g@aSNZf9jX$DLeCVa)l?<L%OZXHZyKb*OH~XSHBo7SRZD&
zd=Hzi2yKm#H-;br0kEWPi`+F=02<{Z#unG%5zNo55R+<t0?H?c-f<|esuWhv+<_kV
zgV8J=udg(thk}q)_M9GK^FGVE<GsoP*H-Y%=9_5M+C*VY%0v`*LJ)k%)GU}wIrx*`
z%v&+LyxP)ax?+QV0*!<PnhaSM88?~rAF&LIG*neiAm2tDMA3AN_s4{4$GZxp(7opQ
zF?9=-^pXc78<^jqp3Ux|As;U5FN}vX3HPFerKMebKQxGCXnZ61R~%(E_|LaGv;Pqy
z9CKFyN!28AF3XXOFPHcqjnYm8M|z8nAL1Q>U)xGAv#FQpF_+#x*uD<1+m|y*4(K8N
z52Zj_zoW%WtCVMRcuVL41=_>F3)K_}(1kES{NQZr@aQV5MQU8mO+01b@tKJSK~Ow_
zDu)%o0}AU+HI66OPF`3%ZIhLXpY?UPSzZ!-$o!W0LpHEF3^i7Xv8ZExYi#IZQz9`<
zj)65uUWS8yE@z2;>^ekB#{f&Tfc7_P?~chaKJ)H4gNra>`MF`9XcZy|Ce3w5jw~Q1
z%XLPOJY%|BCnV`d6hH=-86!m=d+{_Ptm^UOB<_ezP<WqJBt<usLElrelpUoI&bE%8
zMR;<UgY$~LP|Qza9ga&zoI{}T1+oU&R06WbaOsQ;n<NqIyc?Th-o%r^1%N?ns;S-u
z=F867$Ymx~f&(#&XVg01tCBOj*y>kP^i7HDM&#J*fzTR(to3)G+eMK}?u0E}pDX=)
zF(tvFw%v=Ul3SRr6Qn&l;g+=Opd^_;;WUUT0V9>h-^%-pOX$waIe{dT_tLW&#}0ix
zKZVJ~iV?4iaXDQqf!X2X>vOvZPj<o+P~fKTn75HHwthg)RGMutg+4<X`TCW|f*b;W
z5XmK(S<@;vmI5SQmb6?$Z^jw~X!~q(@G|Q(uNeQ@g~oenS+(&VBFA#=Vv|n!P_0K!
z+&9Pb8n;_OWYZ%TJ(*?M!FV$DPGzP|Nq8jRpn(vgJ?F<7GSFnjud#^35l_4rplS=m
zqd^bOC5B%XEyEK_TxNT5R_C)fD46YzI%o`&6DH>{D~4n^R+jZZg>KGaT&4_Di7wN0
zIF&MGm|y8K`yOx7WtczdG97^vQqmGCM0|!kCiH<oqkcB-BanK14ot3+d6BEajqsu8
zR;Oz<x{mdQsC7N>3(~NA(|HY6a}#MQNtlYK<YBZ*`6+ycicVuTK#yy)Tq>u0UWXEn
zn=3{-hKq$ThswQl%FpfMl##np!^Ih$cBxNTx5#88OV@zad>}~}bgrAmU6Io?CtQ&;
zj&9{^i^siSnAg6xzMpM*78hMk<lFPL%but1;uHouliRI(0pVWNGj1EkfszU$W=w2~
zwY1AP`2zbv*ZZIPiRynj`E9|Z%)JXO@D%&6m8H^B#Q*c@((1~C{`WpUtp7dKX<dOA
z(QRsf5b%EGWTOx1c8db6;3jLbob!FO9Jl1zyY1Bu=44;#<Wyx#9b-ai?J1)@N}nY8
z6D4>SY40}9OCRD_$uj>{0B^P6?8%NiQJ<z+AZsr#^HqGZ)TL6(_|FeYdG1$U;fgK%
z!_>k{rf|73rf|i4UG^cDS0@!-^&uw}UNT>Qu!UEr7XHzPTv?76Ue;gB6(4d^;T2PO
z#T2fT#uQ#Pg>9cJ%YNbOO!r^lsv`QFvaM_PW|4gv7Oy2X)<Vhq=Wat~<!PzP@}w#)
zme+LAvVK@uTyoCeD3JaD@~C7s2Y{0jq&r5--3F_@)br=cWcuA~!|GEd&JrI#<$GD}
zp>Bn~)*4u=G(%#0r6%UKcjREVrD21W^(2D5+<CXRU1jU5_E~Sx8lHE=g%mT=geZLt
zSY3F6etsZ2Vd7IX2*vs-Jn^xbtI%MsBMhyw#f|1P)6=D!qk4OWvYM)oy`G`kUHgim
zJo(OJy1?h&ygl4juikDS9sPQ6_%|02a^w^8Swv0P2}-^Q;T%SK6x7j?P&H(_Bv%v3
ztqrt<Z5ADAbJ)T-+rmasUOhfIe0x-@Y@v`iY!oj+OhhS}^iyO~2BZOC_%_Ct;m2pe
zn4)ErX0a{!O{$3{e4r~uf&DNJic@9<us@~Iic|b)yts{$xFf18J&|+h6u<}+a?j+Y
z_KA+y_)BYm(WBLSXVM6(&>&vQRA92q_@1~+u-nKb)9i09DBS7Jx|A~3QZP~NkPqRo
zWux&<oOYE*k5Pi{jZP*OSzmt+>yr~UnFV8rh_tZph>pZ2CS?iIMjWr`211wC5=iHZ
zkWSd{4ehkZi?W_$erveXuwmRe4%x^iF0!;7LB(NMaZc50R%CAIF+H7uTit+FWfySi
zfB2Xi2VO0R;owAGn8PDIl>4x`@nRSKTsL<&j*ch@ZH(>aW97<N4$s!{SC#Rvmh`K1
z>YK5T#lgsuMq>Dw5PsBkJd?@T<kGU6U`#*d0C8hS3BKGWa#_qr3n+d#j^6SUJ^#tw
zJ1pKNV){;ufTo@QR+lQV^Iv88;rw?WA3p!>#jHt?k2iyYee&0_^Wk1}nI-l}V?5qp
z&-zc!>dxt#^d??eqXDG3u)spj#ZJ5h;?nT5@cc9hn%8)wDqUJgBUIhB-)BQ_?W39E
z&dSw>`27k8)!vymGk%(^dB!{9BpktWiz6)bo#)12)Qw3_H1OVujlLB`r`l%hB2A^J
zyLI*eAET5$-ewk9kT~QRQ^C0<ih__Ebq;2XXvT|vvlg7kY-0Z{J+4=%?PlCWO`Q0#
zTIiS#bz&W|wL8%cb!KQ8y+Y#15PP9%4P)hKt4ZCe8@2t69iyy1e!q0iBh#aHtVdj{
z9YsTXT5Dl&hQhPoo;*|wp=mLUwP<Q4Bf+7!!gKuL;~_E&WTur^LeNoo-actZP#I1e
z9L{HahG9{fVRd7}qB`B=G3g=0Yxo-JO?O&vIE-SanPC1`@AvVIT4<P_HDWz8^&8Py
zZp_p)G?Lsj@Hf+JJ-Jz|TTe!c=@z`l#1B4J>a%n$-YuOmkgj7AyObSF=ni%l^X>s!
z{q77*cWgg!_nYdq!kdh2R%XX>b{dy__$BLxYzJZ^(8Z|BelZQ-b6S$U?3?BNm>;Qt
zw~nZv8E|?j+63(A%)Mm7EvsDy?(%kSGNXjn4I>>mboM&$l6o<oInr8h;j)i0eAlKm
z%6gOP&&cwlRp~0#Z`{9AjxL2{0MW(Z5c+kEpd%Y@w5Ge)CgH&v!X}mA-fBC%Waip8
z`S#rX+$PwHY~|Om)s8ITm=T{b(_4!X-+<Odvli)wS-|%r|3}Y%cY5(BpK1AjOO@ql
z{P%MCA^!Wld?f#`@swx=byCZag0Fvm6;e{ZB()mqt3ye^RWORgMK$PMz`qT-qFYp`
zw1Yngf{Tg*sBzxv!*i{s#`h*Z-_Dj3rL~HWo=F4G3(uf|=g$QG^JfVD`7^BjoWTe6
z#<}SZIyYl-+D@lOVIG%E$FhxbvDjG3xs~J<YFK}uew%=p^2vs!VljN`R-)ly+SGar
z&{siI%4NBP(UrPTsmm=X)l)$W-9DixY3mz-l!ju9PAP%|9q*$bxrG-6)<ZyN7NgQ?
z_N=scR#y5xz*fR~+n06p_o=EzsZeT^!&0{_lUORyS&ng6QrVz*R#ZP4C2n4o3P0p2
zCWN2>!%(i%l;cO&sNfSSUG_>h%2-;Xxva8wI?%RGn!$P%sS%d22H}|D%e*W~JuIuI
zqf5|<ROzBHEAxP562@LGlyU55r8rfV3|UjlCAtMb{e2m4;WU2A=m|+<r^t;^8n1N-
z09hb~rj2fi^Udj><-dqBW%5nr5t)RwPOsw2U+y9!(T&aCQ4wphDVL0HPCS~a;o!p(
z2VO89_VV25734R1+BfD#g}D*@8_tcobL?%eZ)f<L2IeMjE?@HI3L$UqHC;8cKsHwj
zcKDh!u7Lo#O1M5AxMC6}%o~FX^CXImvzuW<J?F&6CSsL<w~@UtdZKD#tUD^t!&5)Q
zUo;!(c_<S6klKw4R?N^n8%!S3IOLdE3FHxjL!bfi7^MH42#!wi(37~)AjSR?+qW=;
zJ%O}tNhG8m6ZFT_&|_}oF}Ly<n#rLSYh?^%;t<c#B;zadjpynqS#D##0=1M-OJ9UB
z)0GFrOo^wO5_mJJ<i=7@o3KTBLesI%m@wC41I*(sQ@4rR>0k)6!enluq&L1VeBby!
z{J#4=PoXyI;i-+5I{Q9}=7r7^sos`CLUnYBcB;D#6Mi*DC}#aX26~8x7<blg(!zU8
zcdUYfU+`|XK8#%w5~bG9aQ_DU3|mD*wym&67s9nD;U?Y)PQ44{&YD?&wr+323w$%K
z=U-moE-J53YpQ7hgJ|p0L<-OFURxf};NZcM9d|G|>4ic2YLaP}K2n<^GmV*sCHyHf
z7gp`f`AF}L7=49WgGWG3)L(w9xBj`2Buk6!AJ9NP&cg&1SUSqZ5IsEM;|7D25#>Hp
zsWQ&LJd)H1(B=bJtl-d>tR;;6LkXTGbEsT$Ct=*7Te_a9_xmKqvB!WG$k8>2xNd*6
zh&Wip;V;S*ECQ-%^GB1N7xgeGR8+fnR%eqFm<i}BS=7hPgxV)r6Cw#pF&mb51GWQ6
zr#J>hmY_TrlZ?TWCyuSKwignMCR~@DQO7Z7I>BfnoNt@GmNC>cs%3vhsPTS5%^KE;
z_fDFGj^h*c#2PC}<4~<r4?kh67;B`bR+qVXzZC+CCe>JYY%|z9v|Ug?!GBftza_l-
zdH!5w0cgSY5^M1+YPS+JGN73&C<;VurjfQ(n#<_IWG@VKh)cz0pWi`q$R~i+)Yv+M
z{dApho8+`%qNh_CJmaMO<gbhdj$$GXfK3U(s-3xF3_!*s3(ERJIB{|kZ6aKIvFM7^
zafgus0v%uU2A^z(&yAy<&Dy);+R^sL(ecJ%m3To>_E6erVH%!d?%$KF=AiZ#&G6J`
zY!ifakw>v<lU|VaX;vUU&8f$hEL_e^ps5G6N9zhJSfO;eByVNE)^98P_NlzJd$+CR
z#i`~&Z`WRJ@9pgG;Fy81-@SXMZH8QXHZcn7bycNc&QoH`hJ%vnCm(qq%GoFgL&SDo
zwd;*flyhsuc`-QBQX9Pzhv)Dj6jP)GD0%~sRo<dR8<HKI$xvP)e5&uy-=cbl@u#-7
zk7{qW4{MtnFLw5~k13nHObgAH*DA*+j84N`BBUHm&stg{SSzpdudn(T6bd&baw)D4
zsYnWr`k#Ytfjb@;`>;@!7q$v0=?#ay4uv1;_4B%7ui2bZ4@zStu3k**$kS{nMW}f!
zLB8z(D`XH=WRWmVV@O72WKaK#zp9EPS2W?&bQha8f8@rZ5cL|Z?x;5k`TT39j>o6<
z6HSczf7*scjLk1DRdQ85{?;i+Hq!JE5-Q2ZklC1oA{<*B5SRzpEshYSv%woQ+4Ylk
zhEAhh%Bim!A7#S>yd`SzDljVQzm@!QSE0R0f81@vMCt_P7!d&)n<3xRx!T4Xo?XM@
z>=wqcngfav6>eeLxJweudKOz8)0~HaXMjv@M(*q~s$qlIb%Jf3vEWMNb{>m2P{N>&
z>e?}2kff1hN@S@h_PZivYQsfVP@M(s0CUy`&756rWFd;?srdIfOW>El@-I0rGUG*i
zJEJY>Toe7*FX1od^2Ta;r_!1Qd!@l$jRSX;1nw&DRC2(5(cr#}1NS8f+?TvlDVe;7
z5B^>M$398-Ke%DvWB$LVt54(k|5hIEf9~b8C>e)1Wk-Sb*PtmP<5dARicc7)w50<}
z=_6XR75)R0-|_A@sCUo!tRjx@q%@kZ&#SE1qLa22Z!6~Og%fy8HS&DTR0nzdB&T)a
zn9nX?Eqz|im-9~xKjbT={L)gP^5gtpL~q^<PBCxB=HbSBOw?Z6+Ih8ee3V+jEeQ6h
zxzY+n%YuzxpTKpDQr^DVoGtD<<9UdaL$y9qQRqj<-8|?vqiPGOIHNrtUs#hR9>GR^
zBK}>|PcbAl3fu74+as)To^@6ljL7NY=SIETWE~sD451*DIurtt2)9AOH{qC%CLu=I
z)}<pX6^om4khi=Yy=f;I2aVBy+}1H-FnotWVEr|WjY<C?f=1fBM)p;Dz?gWpZW};>
z!2Wn96$$$`-3n`3BidaXO#()S0WD}~6ii~Li!nhK;W&RLL3ZdB$&DuAu^Em|(8c)D
zQ5^_(f?HX&RFahf!9|V6QdNv`)LD#ZF6??`XHV>jsRQF8_&E2|YOrFeB^RGOmv9Vd
zjyO7OP@o(x*rmMbAbMkju;QAeTc)V4eACwT?R;ppKnFs@A<pW33WYvY7>9KTlDbaP
zR&V~JPO*Y<?YB6GSUd=D0iyy@WY^Ka3Ha+yO`PhYWM|MCc{X-2GOY=i1td2oB#eQE
zWTnCA6-qV5su=GO_%&*_fLU5v<yA}#a3}|48csJYSf+9*X>Ulp08=S`pEKd4Xd$`5
z^eAQutt(rCOT#;&OFOFPX05P7PW)L2C0HY`qA>JY<mcl?k8i!<rv%RA;m06zP;GDg
zDTbXewp7aeQquo52Yr%ji9Eu*d38}>9iVIpcP`V?h~~$pn8UUS4-q!x^uaV|D9X{*
z#^x+Xmj)J{4k2QHtuY{%!N<=n!E2o8=A_74+QpTAp%KPbc4F@JP4E?+ZQG&e-rT!+
zC`|Y=3Wr%&j+~*g<$}Z`W4@J(EO*yzcEW1Mn=P?_*L$DKF1WaqGGB5L&QDC9>=udj
z?-9?3bF<IiS<gG{nP@wB>m>oRk?ZnSfg1!B#Ke$NH0+HAjlh<noRKCT4_%)1u9)@W
zRv}i)IhO<4E+!yF`jimFhB|@OCTzks8hXj$d;-(B1VlD#1DH2!45HT{g?r-WP0+oG
zF<rK+DQ_1{JfMvo$XX=QiJnIerDKAv;Y32~gSrfu9ungDsweHFA4appK1NMvw@5f=
zp(@qlDZGLm^pf2rIfx;Km62$xz0t5g;>~}j3*VFh$WfMBL)F2^Fd$g{_q%NVU2fqK
zX1&9p@}vo2ITiP1>5}t#hkQ(Z_&v;jT=-BgeAy`c_Hp6&=96r}{gwNPOGy1+F{7S@
zT}4G*l?`7+xC7CiEUr!{dL0Hcr#(KeTE(D<V*a{1AxoG#EbJ5VL_X=k_TKxn#&AHu
zKCi>ZPPZIz)HrQjqWw-df<{5Mm<QRqR4f+fscP%23m9!mRS*kEszA9v2onmBdc5BY
zTcnF_f~e6knNYaA#;o*rzH~W{NW@T9#(go*SmZ(IHs?$8s)?;)QOu{~T(ycjRj7K6
zp=r<W^CD0Z=aRm_fR$k%<e5A*VSnquW0WN~KLG(2Sfp4UJ5i@JxBY-Fcksfy-fgfs
z3zX)eWnmB!zrHhS4_j~!Q}5=X5GCe=skPAuF@viX$dGOUMlN<&#N5DNLB4H3JDGuq
zP$M_}(UTDGG(LNwt*SjC;$5>xTbk^Lt&mWg#U9|sv%qz66eJ6@@jBBjPUK0&{Q`7J
z+zOkmv)0hQ7WG<MQkUpm9k`fVbWI@`s-R2YbC^(|D!R%n!L%4LfTcKikD&t#^E&AX
zZNcXJ_Km|x(yL~V#GEHjx@{6zk_JP7$Be=m29JZ=8b1o@Ou`^UIQW)Mo`N_|^*Awf
z7DmC6pE5sMeQACEwaj3SXR)o}F>ee|AkXN^xfg$9%UhZwu|*OvWMx~ju?rhQMoG>F
z#{a>mYkRP!Giu_dQ>za!ml=g^#)lYolOM_P3>#n(qCPn5Hmc4gd!Pfm14pK}wp>_E
zfP#_)wLEPdbEhyrilRn!%wU52;p14LARk@X2%%*}fR+#!h{gmKzUtgQy!l5s2{+gJ
zXpT71lanXkJT`&>nR1m}|CabnMU0Jnr^@Jb;awOe*r-`l;)HGXI(X@4d8vIZCUaa3
z*r^d7?tB_~X5u}(=IqJ5#b`hvP+ixglFI+&M;IjJs;c0WjWqZ=l(NH{0kg?-qPj@L
zR;tBgiC>%94s(+5<o7J2`Vv2PMs}2$RY*?NC(fy_;K-nS3ifQ)xQ1itlX?ShhgStM
zDWWTQ9hr+V%TQhe&-;jQG3wNa6^B+|s7+SrJ-@_A8WGE#MR-IZIHZVczax(#hBTZd
z`7&b3BMl_k^3?fAhGg3fBOM1oHK+4qCQ015CQhM-W=0yFy>2*W?!-GAKw>GUGwykT
zB@5VDY#X$}cujUaZf;+Vv4bL~gf7Xeg+A;!lvaa8UQV<S$5yLb<4QHG1a+ohj4ENk
zav#n98tuTSB6@7GYl=QCsE!^Aktf;kS?<+uhUbNL>r>!pzR3y?mMWF6VTl^QoU~x`
z>gCSvHk>c})90Nu$0u+Q`-Jv$On$snQD7iuBZ>&v3iuamG5EU?0dP2iR76-D)3PV<
zKF@3gwhgiTJMHIHhV=dmqdANbB(?GwEj!Y?pH${U2C$pSM321_7>rTG;EV6LDBt_K
zJ;-xrKJOWJ2|5&?OVdS10p)C=nrD3%(Rh2n3k;p4zG*FJ*-3(%dPs@k@?$2dMj>&m
zDVqoY|8)Q-BbWTxAe}YH4Wl<X8J(U617Ca219LV@t2c+@r2pn>2;hR+5w#0EZU@~e
z$7e?u0C?bPn=M1c&9vU2*y;#j7<5rUg>x(_UrtADt&r}04hE;~-i047opOQ<W5b@f
zXlN27gz@N<?|lFUcX|3kGHvGJJlDP6_LoSVqM5~GiS}TCzMy3un*VUO=+xS^lq+mj
zBA#cl1Z<|=9NIhRfy8vSbex#69WwDxAo%HRdbubjEs|@EyI?!ytEsPz183)fA|_Oy
ztPhc$jD;!FQxUzJK`b>Vds5Eknnf!zdZOD@Axl%AmF<P9Xu+&Ug*K0;Vi86j^!oso
zMFBOSY&uod3H#b@5jFCv-yVh9l@1QVbhV7<L^9{0_7>J-Y)>Sx6!)<sfjO{cO$qeI
zxgQY28l5E7q{g6tRVY5^q`Ic0=$hpn%`8cq&NBizxa?DSRjp2h-#BwH`b^FfECAFU
z8tFvsKS;J8i1&24Sh|P!zspO@rD*)8^6EqWw|n{UvE$v|t{T?Q6ovi5O-pa2UU2e1
z&1+GJQrl3FhoZ-f<aAt<{)?}ExmK#_b2DXd2k!FW#c^$K`{-!n)%M2}2hd9uVd(HZ
zvj^*~eipDmQ>38v@VIn&dYVV3H0q*WKF>bIl;>1Q%1%zfK%6I49Ccg&glc33z>v<n
zO?VKT*E-w)tmZuT<Z%1w&B6Xryeour;Ld#*WvoMaw;i-Sh%_^hdVV`w8^;^5c61Y4
zz>FfzlxJy%(hGR)VCt}~q;}9Hwi#)oGD90w&o;4nu>W%Ba4(^aO`Tl+T8&6FLo)zM
zikdV4O5=MQ*xUQBcJ|{_z1=;71O9ap6HqM+d}sfc?fv6}!}l@xCVBELx{hY_8;7u&
z#-Q)Rezk#0pa}G(YeG*19D%v)!Ax3U6lB1_eRFuQncV9)I(Ac1*H^C5b!-B-$-&|H
zCL+*Rrw#(IXd{Ez-96ZxFbETvEYjA~ncCt|Ube-(?Y(5Iag@N@q71+%ktqYeHnKVx
zu-5H)BC4e4H^-BScxD^h-FUIRn~bPk3^sFHJQ4h4(AUNiMZfXbbaKa?Xh*kQ)X>H+
z`;!{D_Tt85b{0Nv?jYHl7ARmX>RaId(v$SJwFg`FDW56wfB7Ed|J9YHX#Ah2<);tw
z|Gj*!A^(?EwjQ!=O0TQl9UGq-_Nd+5-By+2W$rI^N}#9}rvLK}`!OjiRhA5M>O(lM
zpN!Cdi)?XPU3>7!8C|xpA>ck`Z7Vfus)bQgd;bF_Yn!A8SwM)PqZr{9<Rqdp!&V0a
zC)7LrOdbu)Il)s#QDALpCyRg?&Kf*qlcSpLYL?#RrStHG*m~_~=eO;wJhMjMqh{gx
zs2L4Rn-H0h^?T9cP8dI<?WhlXG>oBc?L+A!#?3&57QUe2#Z(@xb~`w$x0NYDuJIW&
z3;;s<7@bLwz}ci?x>Wh2v2OQD<V+y9na+sihcn{C8S&wa_<!__c(na$%xUL{W8vOb
zjK>B5>*K&*Ta=d&y|+H)KE1{mu7Aq?;T})9Ki=0V_c|!M&M9{~vhL)RI~`AVbjrOB
zo@PJgPRG)J%qbV865Gbcn>Weh*yyvX)tGTyhf6^lyE~g3$2$k{(KyXqn_JawU1xjV
z+e#kLCT2J9Hg8K)n+JP)8~aI{KY_nCT{XC#t9vN_r|2Fgp8uEb<NRM<UX8{7eF}dc
z&j0uEnfd&`gy(;Il$KncqzTww<e-6n)uTUlog6k!dY=QW1PGcaJz6b5<b>}#iQV*!
zFFs*(s82;5%jlecyCXfEzeh_I|M0E-(DbQ0a_E-V7$78!9l6DV!T88MMkT-r24L5D
zwB4ZP*5@tKR--p+H`!jI+0@bhGI1BV<oPjYw8Y72=(o3>8z+)GPEj7I=<F;$TQ)n5
zi3cYI|DXxfd3VN4pv{{zW&(AA?^uTupT4dn5*NU_e5VzXGg-1WM)!7gIqNE59t$qU
z^;TX-ds{Xzb%EsGYUmcQE<dif1UW`t^ZIN)X2J8z+S2XSdJ(c3Lred62xrntXSGJ+
zl(6pNE+#}W+pq(lRj+abccfsQG)SXcNgSt9uinK_JD3ip7Y>n`8RaUI;N%o%{&Z8i
zuv<~6*4$E(7u_qDFpTEbE0J>;O+>FuyoL(L^~z(_%1KOT(o~I=2x#NF3+B)RG!6`3
zkkqdjHh$i`vK<gtTDoxoIZUSr4oD6nSqIwh2905rz@Uq(pOc*<eny6c*$sx|nXKnE
zZ>#HU3=`@bN7wWE=O6+}XL*g$Q{j0ltVx=Jb%~ck%aFv3d%oo$fdp?k1_62c#BVxi
zzN0rCF@7r$FdC@L-Po%RQzDuBew#>^iMLkK8>u_qx;ms<RhhGXgTH(CQgr|I7rvX)
zcI)!I!_ggI9PZV&-)-)`J=*zY8!wo2KoYmuGb#qB`?bg$ac`9<^Nmj5BZh9hQ!_<B
z_%%P~OsZqYYt{Bq;~2a6YOl6`upjB2De#O6*v{z}<NH>YzUh)5yn2Z}oAv5!>EY_^
zKkw=c6`EW2Do2fJ=q|2wOj8qExsd|38M)=T>kC2m9QsFH3`Wi+f9M^d-@skp6Gp|{
zf6qHZzvmD4h7b3K5BG)-_lAG1d&8xNd&9r-z2VYd@7{3C{U3Y3-%$|g$@hON%TJe<
z<o<7^TwX5Y{a>lF@^JrmA0Nqb>_!>EGl!h~Mgz=Z$bl+`J@UVOv2nDst3C$<bWIKa
zS{Hr@;Z?f_l&pg8s8bmB3KB4&BN>#2^^<lGx_OaWZ8Ml?YO|BiH8?s4;fEK$(6Ys2
zO3Ks%0b|&Mq3G&#K*gfvS_zA0%EeM9uOF4=QQ1DKL?121OD!8%75k`c9wE0#>~gK$
z>z+mXy#W(ixBbO}H|}&L+Uccur<W}(-RWg0MV&Ti>;%K}UNesSj>i3-83Tga16jNq
zd?ZQ!V1Jw8@5wQS9{>IA_ThW|+?+e&ujK4YFXUjYU&+~yUdhqX)}kYs)S~kqypj=Z
z4+gyfte0jGA5Wmiv+a#%o76o7j+@wq<Ja-_;r<3mHyQmyzdU+>biBPsulTEe0ed}+
zfnM-eTVwy2_nZuW)h}LbZ2oPVx4Vpd*RS>uP(s;>yh9o~!MGim%T(Of#T_1yxziM4
z+SchrsJ48kS=Wg!6YwGgI~F<Jp4!-bbEE=9aU!Nngmo%X1r8EiV&);tyVL77TNDx`
z=u!Xz3`U)RQAv)3!P6;Jv@PUd7i}e&{rH`6@Z9Ra$pJIkVp+_gg9=-q48v5Drhta0
z(d$9>W`l|`x{<kFS$%xAhiOz5A~G1{IFPsokuW2S&+Ws*gG0R}X)EBRuxOM3v>&8n
zX&*85`k-<C;l=)jH)rtEOxoN1t?idP``bcS@akQKaagfWj`_*4c`}ZAV(r7R1M>{P
zvOM;A#PQfJz^j8JdU0@Mp1gkhYWt07EHnCtdHc)G;qhCcG5K!Z&D>QwVsK}7{m$-=
z?<~A}y>ZCXj$h2vql1@>HT+_p&fb}3E9^s+Ex#jM<2$ld+c-Wx+}Pw+<-2)zxc!FD
zVj22oo{#MiQ<|Z$0)=KBOJ=Bi8T8tKy8T0+)DFj!+95Ls%R`~VJDS78+ZP+ho3Dkv
z!8h}KW+ZYL72thP?a1%e(fDp1)!x407jMjyE!`e|vE}wRe%aXB6)5z5<Hhc_d4G8D
z_L$qiZ#(<1%$uXPM{l-es^q(Qx3RUivu_%tUrviN4jt7ZA6~ri>G;ifI)+yih(#8E
z15RnR%ZhpT?uXUd>N35$q_5`96aw0wB;zHUuq6_hy|<C8A2X=Ky|IIG?~>@f`DNZ8
z!J_4N^vzbB8Pn$0My`EK$6h%dvvLrC_Ec^e^UJ*7MSU5)qi^Q<tKEYa@`Aten^zM$
zJIf@rQ2uX@8XSOp15TI<q{8Igq|wkU(%(_WZzvnJS8v{OpWnRIk7h<LH7HAX!(YN1
z<CpM8Yzc29F5!&{3yLt8dc|243;ofc-wOj1>XER6m%&>>gQD4YgR?pYt<JGnKmw*Z
z?X}yz3yO*)<gM?eiZR)fEntQNs-EST$!=JCUkGbbvtk0wRiOAsV>)4CApJ~XmMDPX
zDPDAAKxj9Yump?!V#6#+C@5;1i@+WpOIKrVh3TaY#x!#*CKKB*=s2-8F{l)WbNhJ^
z{Ig-L7SyNUDx?tK?G_U%7WmAmM;g5WBep5AS#34WkV0|af5xXZovB4%&ByP=6R+fJ
zoFs)OYXO_ibGCEnr4zX=QY)_(paV`o&q9HQ_1uKDr5k1}3K(|Uq4c#Jpo>ia*61yU
z8LLFEk+Cx$5bVc1mz4`+)?ZOc1oxd2G0V)8iFk`<VXD_)bBRWP<_`>OjmmR>@WF@j
zM|$)vZ9yjl;~I5!rkEzfHV6jDw#NkC{=}Hn7KJ&&oi@C{<%qKSYq5XU3W5biixf7I
zdAYq#G{BIH=iAy}gaJ@!Yj~w?EpBd70S7hduUw^vM9e_h?u|623ZQWQ^(z$yL$a!1
z4x<y5*e5UzavZ~%Frn>?p-*^bOuC$BIgGg%R@E4zK!aTGK7O2y3%$H8j{Bmuxh;;h
z?bfg7fds`RJ%)RBR9P3<%uR5+8HMYammto)J>1UZ@&u*D#$M+*SzV63Pp7jGlf{w^
zmhSgFC76XLL8m{w(y7I>nM_XGUQyt5i*1a9nqaSDbX$71L@V@B;$mBN`-`ZHw3TO7
znRE4Ah>`7v>I`$bhom3p+!AA<PoV=BVPnaFj3RDC2e1b=jjbfj7n(TnoXkWJ2p~og
z_|RH1Ik=|g&;SR-@J*D%GA{RY)|Osc$gp%;;t7P6XX1d>9QHba!mO#GAr-DY&$WnJ
zTnR><BL_wmVC(dXjrTBDD#s<I6d8U7;^=6IbUKmA<+dEkF*^Wv-Pi}Wy6Z?XAm^+L
zxoCPB1JYt$8_L<t2F|ns8uhdiMr)iD2Pc<*H`?`3&+xP<h8poHhKG_Sfi~61+-W_O
zvD&ilpDC+-S+mcR4Yp_?gyO)#2USS2)Vek!X1amDq9T(iFin@oG;ORPqkSdfK0e&o
zKiYw#e8%ac#lTlWktr0nb}Sc5i{)Z@amhyL7;C4`FaHP|ea9Ae6R5XCq1@-|O^yZK
z`_zJc32#T{N$j+b3=vUvvWY#xv~*33fZm`qDJF<LiHgRDN;B*~z!PAC`TjWP01qXJ
zR;p~)EBe!9TQN?|ZjD%Bcu7+|gXRCNJ+FX<asA82<IS<{AKF5rxnYyl!+-}X-OqcD
z1M@5^r8(=lBt5Lt-B`U~GvZL_*50Yj7ZkZ-x}!Ny*w9HaoQdt;I*&JFmG68K{d=^@
zzQ;&hE7Am}5;dnc)9YJ|M=j~EF}HEMZOPRBuM$q^M5nvFr2^Dc!@*B<?`VhL4)oR^
zjG>K|r3<DL&qhCH^8N>T@)d94&Sxi;AptTHr_{X7*^u<~4AF)=94YSClzsM0iQ3<J
z$Xi+hbyma#nXU^=ad$&A`<ZPTXNZ~>MXoeNkDn>g1UnDMw!S2-8!fV_GNlJZ8`K5V
z_()Uy@R<^wulsat50;%Cu)4`sO6&u%S@tccF}^;7+=mJ^jG=8OMrP(1M0^qpIRB=_
z9;1)++wmeYJTtz8X7zT9iQ*)w?KOQYI2mzF4dmq`PDv@Ou_hRt;E};uqA)?R<d>Z2
zy}^RWnAe;nbB&XlM)o#q->Enlezd>_Nc6kT#>c1bO$_Ld0nt3RubM5)CqT*%OqzCD
z|HO(rG0H4@2X5!Qgp@`X@i=O0#7V9fk~uNLfJKZFE78OaRs)%x3mgB%-k{m)lDeie
zj{8ax5w@ANAsix+n;*BwGD&G~tmlo4*6Dhp|IN&Vt>Mi^+eE0gY59iZkyMzI$kR=a
zIc-@Ko<qm3y+CZODNDr`oU~}HN%5FPV~l6jm;#R3C>Mxo2#i^8B5C8z)NQ1TT3>^w
zM5&YI3{B|?@<juv;Zv=1{^j<nym54o{VPU^KSKMHXkNBD0;BhNa~Zd$?DJ<zuKJvZ
zWA{4kZbVmvrg-8mmoTu38JI!fxS^Q>(ZN+dUe^`Sne=OISXk}aD|wdU&SJY63n)A%
z%k`Uc0|&s&jKQhn@QAVN*P5G;UCA|>fa~v5HfF=D=l?&&A%B)LH`?t<3bj!8Jb*q!
zn{{^P^-{3b4O)#OZn8{gy>Mq-Dt1P$v-jDc19ndCX~kTxqD#Ja0Hg5SnTd6G72gih
z*oDPT9AmDhI=9*sI|N$EpNV@S-rq|om7qp9zMoFaZ`$W?zuIWu;?5MYJ>0p-u4_%X
zPoF8V3-cb2-MO?AuQn=k>dd-*QH%nq=OQ2{*__gJ{7NVQ@5{-tQ*4o>SO()aQMVC7
z`}jU^(#i=v&<QbbyUhyyTx*ZzI)Bw&7X6-Xv&riE*Vtoqm8qOy%KmCxwr<K93;>)X
z{xXp>B!*kMGoCPR2iAQn-o;itVaanNk>z%+1yy!b?02(a+Znp<lA{uT-HqC;f!nTI
z*jn#o{c1QjSjW2Rg!N2Ljfsnxtcq{CfT^<Le<$nHMTxHWcC;Sug`^n2{le86YU)Qs
zMeT$MvacN(fFEiRK&++Ll=JwR5_4<!>D0rkHP*J3#w&}t3?hYkVN9iUll1195_4<!
z=~Rt58*1AMV+D~JxB*Y}wKxG)`K#`|?UVMc)~>F9jWw&Q#Mi31{u-BJqAU@8_|oxn
zka*=7=8Dm<K7ah-7Z&sgUFpnKSwm8E;_tu6$vEl4@|tmjZh2xg7in0+;4Aqy%`1}2
z#j3j)b_Hr5f?Fy#X?2*$M>DJhCrVNh)tlFol)T2RoD{Tb%6-4wM>xLVi94C&?|s{w
ze+QIH-~9D#&>QvXyEZ;@^RV0rxz9+FIOG}0G9mOP-wzX=^pnDlbwAhJfLR|GW74dU
zMb$Xj)>89?X5@WhD9-WlD+a!vm9Y)z0?`&K@fIR+1D8xA6Y|kWE0z+X?JS86)YzTP
zt{itGoU&kX^7_|ay0+GoMQjUBTFxe@f_L8%S-uhhVN5_?dUxApd(mnR&q=(*_zGma
z(eHQhZ^wy!LMt|Q+tRrs9qu)jZVeipars`8Xr>o;CN?HLCEAf7VG(}El0Ln?F@K;b
z8w;Dmx80OCAQg-SjW1{52`*&I5SXsU!WMN^5+@KVQ0Bi88yWM=wUIFopRBW<ZIYb`
z+sWuE$-x6XfdWX;c5o#kN5IS3#0N`NROuFc$=08lL)j`(;Tl~tj(h#WZtxk-FP?*j
z=T1H*RG~OOu{)X@5Wx$35YHFw$3TeZ3iAUZB)m^qOUQDuqD}%7`$N@keF`Xmz7D*w
z<hHOBZ(-T(;c~o(%Wi%5z0-kZXs_MteUj#v;hch!8hN6TYt*oQRvY$eVy|6LyHMC#
zvB=|-D=dfpO=|z(c)O~O(R{o!0_2~d4^@9iCh1pQpmTC|K<oA10ONST^EvS+<KBJe
zP9VnWFgh!0%Il9fO*y6XE(FC`uK~?LqgnioGm94S%0{A8#bQx8VH&H5nKM?eU{HEJ
zK(cxl*D(n+v1Wn!)~*grep+2LzVms(+w*lT6uUquf@3vB(-W^y=DBhEAsRrZD<+&_
z)hj4rE4)5)M8awWAjPn7%5g|RzZ6@HI)$volO8-j!I)x%QsdC=j7MnA05Nt|8ioX2
z8VjHt!Jv&x;LN~&H8s9|455Y<!Qh&~X?@fl`a{?D`oe^SOiAD&TW-x=vh2f1;xAfS
zi--~xeS-EZS~(V+kjH9pYufW~9B&(nO`yl)qAoW8CBDG;kN?V9RYtjmNkP_~rL@M5
zy&vHDM+}dJzt3WNEWb=1Iv!VCQF*^$g0JV>?jUIN&bqLnH7S|~;h%Dq$i@t?v9~g=
z^$6=gj<BYkNx8(jGs9j>)@FNrLoTH6)-{(vA1nLN_hIpxFrHK7wrHCW=e)U*apKK8
zUREQPeN#wB3^X5YL`$#;>^pbDScKJEKZ|1Dt-G_x!kZ%i9m|--br5<`ABqq+hu*Q_
z5Vz^xDf!(}$Earpem)M}Jq+oy2fjwcIfXHnn%k2-?Lq1|k5a?vSgxAA#)v~<%G|hC
zeB57{?Bk7BwXN+p$FFhccP8V-+gDZ9sb97_umBKkTmy)+aUL`?Ik8QHIcj|RU?6^E
z3X2%7VdtMbv2$Psblz=hqQ;@}qIbPz<CNhzAmAJzRTziaP><9Iw~@BKi?tPBQaHVH
z><`)-4OP71Ff(H_2-JV$#{@T6L)=98XwYqr<)R4;^!^#8=7!i&;fRHT8PS^XM|qpb
zBjug&N%=K~SGr}V@=Ld{nPB$#*5ss}KALe3CN6XcPWN7zR+QE~nESnMfxP(8a+yLD
z34*2pUnkt4h~)F4#9K~rEKkTEz}bC=J?IRQQ_5{>yuti5cvO(#&DLl-u^gH9JLSfr
zL5rZcLXC?YcTzik9tH(+S40O*dL0(YD~-MB#MPV(zsS$5-fRXen`__C1_kibwBHTF
zAq6!ETTRX!;dv!P*FtvrRD0y38IDfasW@amWY_?r5dfneO;GtZ5UO0cz>dypzbApM
zaDj24t9k9`4Cf7&(qKdv=pc(;u^5+uASfOl%e;C+Hc<#j|F`D1)u?wdlEi5X=y_UG
z&?O3N($kHJxUK5$b%nUpo1k27OfC+|6?3qK|NCB7nIs%!a%8<j8nEO_@xf$r5noL4
zH0E$(N#ZTh&g_gi<UkeepgWa8OZblt^lYei%J1w>{q<`S3xbJ))2%Pi7)jj;ebEZT
ze!UT77eX5GlPA!zLIJu(q9Ha3y-9Fj1Nb-tP|$z;M3s%ON41|bGbXi5eHih!%_9P!
z2p9cH{#FSsPW%y86VmRYpke+^gs6o+S|74H{y7R@{<E@33NWcbA9x9nMNWcl{(RD}
zrdnMfkq(nyA617GzXaf%jr4lQc93$#%$cg5Hmg7=fI8q?jX|qFq_8rSOoo%q(Aj-7
z?2pKuvEHX{Q+znWeW*&v-Flzk{4CLF%n6?;tp@-G7p*Yhcqm9E&iJ#QPL*E}*IGE~
zGfAB`t*ZF4(>@uUepp>uSz5(i+L9Sj8JjL3QNH1@d>ow*=Z|Sqo6)vAy^zAjn4pt`
z;Gd(`K>OGiOvote3*fQCor1+GT3IV{%Rxf?DuH|*f7|+ntI-`bizjFsaMH()Na(<I
zWjnkHJNz(!ZTqOOakRO!ld-wcxa{;0IHIgXcEOc0bR@twj)LJ^c!1nq0Fr}nZq2;j
z=AB%0Yo?HQ)XlpRn^PD0KplvN;71{N+=M#R?9pC_>t!8f?Pm5ABu1WKqqpRr*6oK%
zud71YJsV(U*%L8>!4Q6TXvwWpI3QkoDWe)P*fyQhjY3IYz$9_;UQEgc8sYKA@!O*s
zMM=bcDkZcC<?$#m|9fekWCzC&zZiGJFB&g=Ixb265tPE`Lc92xABbx{yvRslwC_e5
zn`@7%TH5mCym}EfxR!~i;6_v6S2!+;RC+`^9iiqGPys&LME6|AP%h_`hxeN-VC_Oz
z_D7u)GtD`TN)G1(3+)t_*zkPN8=XnomMA_cEZzPVE2eOPINKA4+MFHM!QZN`n2aLY
zHzoT5PAf@T#aI*@q{c?O2Ly3y)&eibmxI9IfJ~HxgftXK#b`HE(qHke8`<weM}5U@
zv$6_`$y^I{pvb{dU1*$oK#qorgK=f(K&VNIc5$t{D(F7qwlh=w=UDvj9_%ueI|}?Q
zpULsROHWsymLvIpfzdzY|Gk$FizfKl>GXLYU||AxeGmymXuzH26DkQp1t-9EQ)l|c
z&y&J6K2LP5uPXY1g}=Rj9D7WK$Jd(h@oT|$KH-C^kXS7mYOU4%Q2rRN!7rg623fMT
zZiWpt!|49|I)%Ubg@2i?@J*-EAgVMGQRR=Zshl#3SP#FdqvNfe{o2c&-R=E@XnA;d
z@b=ieybH4NKbx<2cDE+ix;75~i~#!2MqSvNn#J?y&NFy$((ASThp;i61zrEyX`?%g
z6brAy#fy4t=s$(MsN0J@!Uy`)i6VM))UP#z&)reGO~ENyn5)SVfKLPHJPkWjHnRvN
z7qKYpfw1>({d#DW-=|F$6Hyumm=%&OEiir2ulLIGn%OrUL4b~Hs)LCa;gm#t0azj8
z@uGf3p<RZ}R(BEpg;)3o2ISmja&K$hUJYQLfRu#6t6Du8*Mc4qMrZ@r&(@J(LADfv
z75x~L#O9kd8a3e>i|5tL+Sc1QFSqi3Ejg@;tZZtsyN`@)!5O|c{?r0BqHB4=gs}!f
zx*|9zixv1Q9iv`ug?PnQS2i9a#l++*<xe1dQN*L8-M5E(DjYNxSq-$<tiy2y?{ta{
z_a2xv5YkZAIR*09cB|8(bNp3^s+Vl-XtQ<zVk9Z{9c2IwUW(&@_2fQzl5<DIk`A53
z4>0OpU7fU=a2RZeS#H18mjk(baPHTK=hf6H(4WGuND#qg&=|3X8Hx`^T`D*P3P8qJ
z)#!{KxACk-C77c4SPtmAPBkDwVIYTkQt>oIB48dyK-V-}T5sew+#9n5D1;f!2^m>8
zYAg<q&*BSuEb`O#62wqo7$C~lcp;eCG8+OK^4JZag2#7hk<(z4g${Km%MHE*fj()e
z%83{zC98pJpEl(@g;tVl_sOIe#h9!)UbmVP8frSR`m}DA+A#V%CM-utpc9b$l%)e+
zokqV~X95eVsI~2ID3EY82v}PR)cagj^2orlv}z^09!8!;%NUf&uJEYPAgux3x9C8?
zKz+P6N$Z2;3uks+x7%c+jF(%r&E13TciWp-zIO^^1(JXR=IAo#t0t4!*kfm>Cp@Q8
zMk`4y*Lk1!wrYw@<vy?#BX|_Z#H(s>gc@ecm8X`&P3RWO7!&%%1@gX}pnGPYBzMh$
zKCW+OA>gUg-PR&}J_&?SPCZ)M=kLgD+OkymU%vf`K-AZv64eS$vy}UfkD@Vjucc#D
zEAD7!=_?G_;gC91arl6F#{`cxWku04H!9GVxE-7U7v}(tVO62b;P5ogaLswYELS64
zBNxEo1wzWDJThl;Mf$9xF+Wq4l~uz7nZ=E|Ve1U__{7+-#o8$bg+zc^!;K8@;&`BW
z3AR}y8AeAd@gOxg6d7%#3<hDtN^~?`@&t<EHz<V-=G9}}NQrOFWb4wUk*@&-Nz?0U
zduRXH$~jIceMRT>l>Yj+P`~cHJlc7+zp=Z$mF1?jri;h$>+$yC9+c28*BpjLW?nL#
zjAZ0K=RhwQfRwowfq6{He7RBwcHaiauEi<i0(>2$-W(k5yi?(M0GsGVZ`8)?3bedH
zI?$XBwyU#2y~9<SC|QswK@m%F4(e^otO#~tU6m_8#74dSZs&O1nDHGek&=eJ+3S7|
zpi?B<U|Jb747v`&h4{HIMB+0}hL8PuD))~rrNpKS);Pz-B-K?qrJuXS98s0RWX-7E
znf{6De?p8TQ6CiWMzhiD4VrhL3Ywz-SzcZ)Mf5*US03~~_w$M5e*=+#@+kFz+BoV6
z5dpGf$qq+aWn{2*brjT(5P6(7>CZ|9$9G6byqE<p`W!^&Q7Gx*)F$4m18b#+Rkg1!
z)iV6|L!S4OeY8C$)i#OE^)t?8Ag2?#b)vd8!U`d%LSH$;a4GAVp_nzK7P<|wlp>+R
zEyVEf8V}^`mV1rrwlqX{Y}w4;Idh`O1PM0-8bJkmeF76X#7lL)mDdW1CysLB2{OS7
z>NhHm9%c!Z7?Wz0u~gZqu{^Ct2}_ln8Y@$35PC|5lGds;%7qe-Wb(k5W*qoTNSLX!
zGa*NRU%d{b7}9=lzCXPNq$FgT?-fSuqR158M&UW2(3l+!8^udp4)pjeaE-y7qGdAR
zYte7AHCw_5x>6L_596RXWme!kKBdu$Q~YVXxV<Ia5l;>w4YpoXyi$2}W%5${1h;;G
zc_m|(V3hFWi#!S2y`dx5;)49<FqMkSRLT|nXLSr)VLGMwvTD$~xE_nC{Glwyr-eBf
zRb+b1To-IHJfAG8ibcOiJ12_XS;#nKd$JasFT?USWb?$Gg7_S`LExS0*@{C8xnv+^
zO<o#Od_M8e5%$`jgZTE>ayLCf9nW2S(pt@m2vWQO(c@WW?gsOgU8uLudLKUK#=%z$
za*;ccFmmh^llri^@nUyp|5a^scjM@Yd{1KATs~Hgd{8KzaqLwk2}C7cZb^fHX)&51
z9GTD@+If9Uu!R^ohTD2_Ho3U0O)>~@pyZr&EZB3~$N`yR`P?0f*?-wjRQ@-<5jBtq
z+u@yD1W%U#D@#vHm8kr`Tv>XM|L^0&^8Z_y>(=nf*hRrHKwN2+YhXr4aO;3$5{~^f
zM1OG>l+ef4n9CV~&FdtZB@C&A^2N_#aOMY7G-`h|FdD#5F|o42_=?WXO=@rn4P0x5
zHEoy`Vfv8}!tiBk9NGm5Gdez=8f7_i!nxuVNpR+d@Z?aXI;1WBVTVp_iq21*FoSMB
zuYtvs8+}i=Vq%z4wl&3X;9tK5dQkU$mSe!pl?TTfH}0RCZ{998dV^r{lp^79G<*mB
zbiFf(%(afD2|H1%!^YE|4KG5nF*hwNsN0H!+${T4Qcj=}>Y!YcM0%G*91=Jk{*9A{
zOE_jOkaBs0b)q@r+|WOIn%v1qq*J8D*`|areHILBr-LBisP=&6elWl}ao9th?2xIt
zPK#j!*;BQ&BR|mtm|wNvgU&iW89FMu{B;pztBVsc<+n>pAh94aq4&mYdujo`lK9d6
z3{a>qqqb8(Tjc6XBr0bqS_aImliBH8xsVAl^~PWXW3RJbG(<<E5!&d}#f_N!*O~C3
z4B~*4iY6xkbcoyyPTd&y95`R+#b7$1**4x5LK_tD|2rM<pU#u|nuAMF)&MS{(*}4Y
ze1aho&9Qb9PvXoTxIrk_Y}N+#?pct1jJ?e3j&Zt*$WO^y!n#s1a@!OWS4~&E9Q7<@
z*x1mL2~2&KJl5&CPXiI-z%*9_SU7n(OXTdaZfV@%wdC3%f%{yRf{*n<0pDTnNC3*$
z4mu!HGMuEv?~~z}3@Dm_4Sa&Eg9?_k?2Lp7hjAttl9z8rMu<P#0=c)y=<U-fSOeZl
zSQB)av#{Zvp~D$qeG`W^I4rLw<8WZ|Y2J{5&b05E9&ocxmj%~1V8S=ts#DP}xRVim
zH)cw-qm?r&c0Nq3NoSN(r^APMpEF`w{!=F<Nr2*L9SQw&XDs10O`1qO9L9CJ%;Vxp
z2Mnfp&7Su<Jq&Zu8-+QFJW3YnW=4@&{q4nAR%L+Q`ILHst7T)6aL2cr$Jqr`-gMa8
z!f78kr}z6ispG|UrdgK#s?~MS&1_W0X)rIFR%Dzpu(D6-3v};I;+l4X%33Z<t@6Xz
z5>A}zv76E#HrL*k^~cY&!9BP8#6`XAVjj{b?QFDN;ouyMYteuFPPV%2<J~~H2eeb$
zyh$)(?h)&@*(V9~j@AO>Cc@OzoH2>^LyCMtGZBQtRtM!=6tEpZgYNRgVH5VZnN|dY
z{e!1wT*T&1-CZtrtN4@F4Ogk_=XG7aj{o7)x>Gon)ZHgsC~1-tiaV!&C!p%$#+#ij
zm}avl?o8YhDbCc%6-p>A)&v{{6ou=Q6gqv(%GnsT>jTe$_vA|bERjFQMwxOWizc*{
zL^E6%{}p9F9jl^2TZx{;7>EG1KuW)Az&?b3ZCKLp)@@n9G?rPHA~*pKR^eHkd+C1q
z2*ESU^{_G{I+AhWA9Kv*M<O!B@zOud*ij~7UmtEvoUy!tI0hk@zO#c`49#}fDc6ji
z-Uq{AK;cIS5i`&Rhxm*&GnqQr?R64B_-D<_SL^iDii_+&&WS5c=u7N9UHg)9rmoO0
z7)VXRJ4zk8w5+T7MyENC73N=T9PRAR>qM8t3a}%Z>`XXH3>ZWIz$^IIS31!H+Y!70
z<fMeS4Ctka00;BzJ<!qTOmLI5af7G$d45@nhL@o}y9tS9W*K>&mSzG)$V#~mT})XZ
zV`$=<Yelk5C#{!>#AQODIO`LkI_tyZAL^$#PKYn%RCNsYP-7+o6&?dm2yw)`VLA62
zt-7({^l%oDkvvc{;O1HgF>A@ki8meNP=mF8Vj87UR()Ry2OJIo#1L!@*^8A(oXEUk
zGE&mAx<O>Y>j+yOw|kDLfpW%4(C%I2fGSUs3dM8ZrP0<f4BDq@S@XklIGwaOHU)<-
zY8KVesBs<%W6`4PZq^yjqZ~A!my}ba_(pJLuTy9vI_h?Tq2y6Z9;=@xZTo!D<(m=7
zD~7SV;gqHoM{d&WlcNzl&4FyvMd_nzm@1=v!yRE)VQ?t-*~VZ{zY_0^lOW_6ENUSE
z{~W``i7<?Ithg|GitfaCZeUtFP3F%g43sTMJdL(#GJ|#cai_dlbh;5`-<Z=-Y|aeJ
zybi6q3}6yLT#rqPsndP4NsodxvrJ$R4ts-@Lrg9-4Ab10;5%ZS<b&&?%ZB18XtR0r
z&_$BWOBfmqj{|c+F*;G{O~E^V-8kIe*?(2V^^7FPO}J}$6ap=u1{WAUh^}{r=a?mZ
z5LgZ2$(83|s=JkU6C1c!$1M|VgTjmG*f|&H0R{^3V3EnUBZlgWI1GWp;Z2}_67fi#
zwLYWo7T~VvZy^igk$f%sxeoQDW~H84*%pt4DZ^Z6ppODJQeMOBrTFAqqdxA<*vqLK
za0}}b5CAnGMGP-s%em@vV7|ym+=cH&d5Z68l40F2A(A}F!9&ZTX$iX|q8&kM(OxzR
znW#dTzhHulwx+v)zKax~#%E2Om;>moTPQg)Gv8yL(mkE-M7=Q_Vd@TAI{)aMK-&TA
z@YxG=i;(4d!3ZLIY8`|t;$jtHysV4V(>(|{qB#A|t8AAn?Vx!bVw_%Zm=u9V6`kvK
zZ6n351QLFWBgSm>TO#m_T9Rsi=oZM7%g8qFt0zw+NW80`q<3TX1!Q8i{wO@>ct`jJ
zLp+ZB3EDw@j0i4RgFGRT467-QkO`DP>@Z8wp*W;4*@^s&DN<lXRZ||kEgUGdX5bEc
zeeK3eK+@W;rPpm=F~1|akwZ3eC~~jEGZHs?3UFZ-C@vC_j-@LtTS=%jUe(C&I(1}L
zU8GM>?jc9lBs^skHlzhU!6x#dgyN0S0N1u0o1@X?X~{zon8yWP=~!Oz6ZqGc3+RE#
zhvF$7oEThE*$;mY4|91NMT^Qz4`UR;2?sIqd*P<H(fvg}tp}7&X$t_tL=s(wwO*HX
zc%(8ELnpoCCQ;~?7>l4yp;a7C)P9&ouB$*4F$6UkHKsko`KQ<tC6^d;i#D#LubxaO
z6;V=NUjxZ5Yj=LzmbjiCJ9zZY&QTbK!fbf|KT<<Fy|Yxo>C-n?U8+mn-a<7N|A*op
zag?^&a<Oz52B4GeKg&;-S9ScKl~SovM*Gi7sr+F7c^@CP|HOdT7^GA}wS`v0j>k?4
zR)&GxvrSCG(CZV&ktp&y8sWVge+s3Nc!lnuYyrC9ba(!cyXfbw@uCo)>tOgz%$;!C
z95%E^gp`ZM)z8lJ^LhVJkG}jM9@y@p2mxmt6h|FZg5x<tP=Fb?P<$N?VIWW%cBJ}Q
z9iwtbC0|kss2!GiOdpH~Zz&&F718m0Bxf`fhgnI2ab`i9!5|@8)8JxmsaEy_KmP#<
z<NTnz(CNZ+9?`fIa)Pa-Os?Nzgt8C&7U^;(Q3_w#VyQc!m2<R?v*~Yrg0U~BP)$+&
zebggAIDS+vRtSs^j!qNHTCL0m)wbY7Ha3GdV}}M#K*&{pf4hD7K5HyGa|A{Q*2Uv|
zfZ^@<e8i^UAx8T!eb2k6Go$!Xb%rCC4@{6*|H77m7=0!xz~CuG<AiDrrc3B&YuKu{
zTVGfVkedrw9Rw2<CL>x?tU{pL7(k)ZA5zR<1OXkVPy~*}E)~ob)egG*L{VTz&jV&C
zq_Pms6x!B!h4BF8^x<H~nF&1IGMvQe2vw&Ca&oWRLMi(STY}R`NV--!A`urDs`)bC
z(lD@XR952Q5@zU%M~{}8z$--e*6NOFP_WNnvcz66AAullEY3K)Ow0lrGu0p<D}%c4
z$hf}IfTjoaq5m!y#~(vVMk1+*05LcO)N1dXiZShcD2iy{N?lG|D`O*vhEX5Mtl7Kh
zy6s&cmput$AZe<}mX|>bEfajgl-o+4brduS)E2gat&RH8wipgW@SDwB#XY=ewoXq2
zjOL*OB4Cc4)>#V`ZUjxR9iUzeL;Rne>tnCb2>?5#;vD4`u!d2Ek+dA*0pjOl?gbpT
zL#@*9_rkD+rY)?54xEJ*a^NIV0}U^F3}xEtp#_ek)ja?zYhWTFfzME8SU_4gFNI<x
z9h!aX_FGdl4P!z)c<$unRN!uz8MmnoyH1j`XyW^u=7Z+#&0UH3T}@04&AyRsH#;<~
z1pIPZ@3h)i@GY(L_AzIubxtU0A1w1vwa)pMKQ75E4WKA}95)T}I&lU}bSeXd4b{EW
zTBRy0k;)*ZzFpj>ll`J?$4;L!dj{Nd1yc3gD2FY3y@kfLf|j1^EY}+~Xk|?~51-*f
zN;794#<EOAQ(RL~N^xx~C1jU&5I}8xr+bi#U;rL57OXEORhW(mDuzm+d4Mqw$Ov=M
zG=Ux>nA2iE(2GG2nx8(Srw%UY@mOS3Tx3wu4^FkjWZOFL9+zVhcasB}pwDiBu~fQj
zsno2Eadhh9%oL7N?efQQO|96b%*vakDN{Jw)XKGzFvpn7wGxq2Mbyju0g1>7wqzc>
zRw{B^afU33iS@9HGxsey<#=>k*f7%5_zj{a*uLHQC6Y#*jxg~Q<bOu;a0eaHMB+(I
zer6hWkax>`;(s_L-@K~l2I?({8v$^sc4CSqQ8+sJD?gYtohJ9k#}vmXNMYt9_@3G0
zY&e{TjB|~_V~j$U#|?_FUh<e3MUt7}DTz))lwE{&rxc|fORckyBSSjxHsJv*7)iGS
zBgk`4FoDm({!zRuv>tV8ooI*hZae5%8EM9zCf#;0QBbTMH+xT{ner^nP@WC19ZE=<
z)DGSBN7|^&&<54>ns5?l%B;p4GBSIl#ifZYO2CL{QxjW>G_ahXMg@4y*rYwoHf3^f
z#xS?3&CIWhI%A*i4u}(YCQJ`2->yGBR^4fv;rySmMz$%r$#t9Hlqjebv<;XeV62TD
z<I;J}W^AMiuO&-p(UxvTU@^s?-&A5N*CsG(!5fCjlNxJ4;u|E5WaS!51si}V83#TB
zd3EX#<Ra2G@aFJfGkFAWv?3^J3{S5y25bU$hJ(ZLO+=voFm)WLrHTv!6WmT1gxT&Q
zZT&b?TP#r67WcNt%scx7-WFv5K4}#)@M|Lr4kCWf6Hz5Szd4@Fm9uSZcjJX5BaSt<
zTR#cfx5X2|PX>K$EVZ$g1#R;v$Kbn?{4LivwKA!JYp-yU@uI^>obbd0$;8PR+g^0u
zVTVsfo#Q5S7gEIjqy}yzK=}N9<IS5fBoMJKz6}Yyd9%B-xpBO65Fd@x%=HPu)^+5e
zy{+T{iF3(q7o_9~9|!*0i?YG>Yy)`q*xPt#8lY%qcuQK!>#n5Xko#v}*x&IdYX5=f
z?e^WKf0`EmcWL$MYApX#sq$d|aUUPH|KLn;YD8hcS`?V)K5aUh!6_!VtL+>eZ2tIT
z?dag`;pR4{f9nktY%%+<YX92X+0VoJ{O0Ge35QJ}-B=!*RyUG~rh$6qXl9#EYZ%7U
z*pM~zm^?Pxn2_&*RKZ3Gm#nd9a0+i+^Zc^Tl!;N@{mAq!y2PR@B{%g30Aqtsw*UTI
zZ##Oppxf*Z$;Q)0rU!O~*n)b)UOQ{NSxCWw)zLhXUSci+g8rDnNC0}&fMsp8`NNI<
zSKE|C9P7f9w>$@!XoW%o;dPFb|BfB`Owm#<_4O-uhOZAf?;iF8>u{r33_6jLf?Xt_
zvz8>v!(uW<gpC;%b3D3#jJlste}l=VNpq8s$fr!#W*V(am1MM%j?$pjddGMACNr(f
z3gc>sw%K35VgbCdxqJzqQ~11ft?H>8j0J||_lXypgubs|@ue_2CDrA=cNk#GK+N#c
z?$zn0mPi(B9dy;@539AMid@4&6&PC{i3y(rA=>BgL)SC(#ZAR=Azip8CayDPC-JSs
z&8o>taZPVUDIxYBA>VB4U|(FG%)LK{RU{Ap4PbNF3Qs9H^M7wG{N8+0%>Le7_=;cP
zFB^(?>i@t@i}XjEnb;tQT%^^dBKRj>1a03a4<lV!Q$h;7v?BKtX`<B_^sqksxk3F%
zUR{-i(aO+T7<&B#Gg1wJMlSogJO{unlsR5In~X*IN)pyR68Id#YS0NS76>&YBe@R|
z8D1$Iok?Q#IZW^_A({{uc4z~6!*{KaRyh79^7+hxaOhf(TP8N9+;VFQOJQkKLcT1h
za!EbG0RR!rQwCN_sm0wvj)996e<A}Z(y#2olP5`?iFRDNL`1MI<DJ|$TeLg9Z^e16
zWU^KZMC=-eQZo(8mjrNbcCOp+|6}ZbtySgio0yvab9q(ge}MPPt7!lGkpJ_ZJ|h1C
zLxANyaMD0<HexgkX2T_8cEnz&!dLEFQk<6c!IRVwp1tKPP&#@xR+3EVFiaevz9`k!
z&&1BjzP_45t7zjf^fMhVorkL5qm@E0*`0d8ujL0wN$3RG2*|veT}uGLp~>OSDS%U6
z?sWdwkN3sd-cxXDJt6zOR<}PIE=q=Wy3%nxQed>QoBvQ70#~7Kqg}2Ir72@h(J($7
zZ@j8)ZNE8wO()<rx2_C?6nyp_N>CE|O3Wgl-4vdiG16(u#6Th!MGP^r{_dD!ne^Jh
zI)|2oUu0*o{`QR|JjR;(`iLvqFsrVaa<9u~bzN<1^9_A{kT`1`8ZlzoWb?}dgT*ZD
z`oQ5{XTne;MqH<O+!r!Bw#M<Q-RpfC_4Ne8yjD?H;JRVrTw|UTmuC3J5k|zGxoj5*
z<zPP#8dxcd@&G3+IAuFXd>^uKfs|8CiMK2@s={+hodmL@cr1xYr?&iMH@a@Knle}K
z1;hl(NvET3!vxJ4b0aGUj?@A)=1_#DC7La48NQ==K`Ofdjpbakf&`-iG;1TIs=bZP
z!-FGie-Y7Zl0{M`x<%l`-2iqURVl6(mzLa9qH8TM803_`cc&kV+Wv-q;st+>TJzjm
zE{j*i27(0HEFjnRsTb2_?_zrGS~bfzthsdKn#+;irWg6d+V+M$^Wk$8<kq5<rXH;l
zD$Q^ViPpNY5SyAR#4=iRzH<jJvt-0XgJ}8!95|K)(~Mxo#B+wr2$gb{RLPARLk5Se
z7{lNc@VC4qp|$-W^Ph#7u?jSge<I})40e)jgYtrwg+cJo&)g_{YY1YsJ3?u(Tr4Xd
zlJzgL1JkBSVrem>S<+gxy0)OWO?|@PEdbEQb<_HMYkt$(<Y;=sx_AqDo4VJsCyPPA
z=!ZA2db_siV(u0b{(0uQ?usZcF3q$e<VO9b{c~<<ZVE!~NN$56-2ZNXCBzVgC?mDU
zbX-{rotuwUjBjo}TE`vkR!rbF5In1x+i;WH)YsRYGuL;G$*(o<*YS9WuaIk7!~Vxz
z|GD~~uz9lhhqOUTo|Tnx`XBs|{$~|cLFMKDDwUU3D=Yt{R{oIQL_Yt=^*@pM--Ck~
zW}Ls%Hm2zR$}2z{F#qM{(&`En#QWdM>cjo-y?nmEBh}sa6Et_<tK*HsSKG%&Y5aTe
z;{T=A71LX>o=(H>t(`;l)4D1y_D=p0G=^dN=y3B$RoB&{>;)W^8g<N)S8U|4tURwb
zoBaWh>Lq+<2)V`lqbyde78~ou9v=L^S8omvUL9`i9i=xnQ&8`$(MTU|?C<Wpz+afU
zmI`egzdlOu>~CT}5PY$?C<?{&>+OxL?ZcxKHOfVDixLRsD*Pv^D5B3L<t##exx4WS
z+NM7+=)#UEs|shT@GB~L3cr#XO-f;$<VV@9?U#U4_X%gqrY2cbf$D|EX`3`e(9+@C
z-R+}v@y+Xl{r6SX08yk1GZqf{8?=*7!#`DkBy#|@?Dr~T5!y`s{3yEt-NS#WdHDLX
zdi3Av)T1n84j*7$jrF-=PgT_%9HAd&1zS1w)tMHoy19uUHw9t=%05$tUIbD#o$7pQ
zwg#%ur#Gnn!k*!z=3H8^7+(PV!)y^3!|My8l2ik0VyDwI{3?y=a4)Av3B=S>3;(Nr
zQkb(s)r)Y7CuJMHfVBXB2Ejj^S{7#pr-EcC1SjO7`c4&^cFcI=H0aP!KHH~%Md2aB
z{~gM%>Dz!?TLVpr|FZnFO!hyOmGaZ4mGaVml`5sm@&o_Bmk%qR(3#`}Z{X=RqfWYE
zdmDu6=uLE{es{e1Zd=lQhktU`KjFTUGk)W>dcCQ7eRfSZ9`kTE9`yPHibm17DmJ>O
z9Fg*DG>}Y(AOpGi{ZP4up(F)T4I$O!MxWCJ?7ctw`>tBxvx*ZWQgmdjJ6v$9)bN>c
zb4q0vgJENldqdWY`~X^9Yyk2X_pYD;_q~%Vm7{3^IDi#zVvSXoYYP9h-mrN53B#;(
z?XY(6w=oXD3k*3HzOCEC7==yBburH!o9CDt87D%!Y+8c3EF&nd2w$~B*f46a+ikqv
zJ+8gn+1<`_U%f!>ID8yV7i5DPa49ys!e2T*$h+f>W~aqbD2R@G9NOY&>xDY+QAokX
zw9HTxS{9FkG-2KpV#qG)I0hNX8aFt^hb}NSuqGCoOf5ntPTn&~Bcns&VGLN0Y~%Dv
zj^ls}bZJcMXt&&Fx&_sZ<n2RfK^~H8;8~ADu@WjcMkG1w@d#uW2q@VWl`)f-0la0{
zELQ)UkvU*PT=<mqn^Ml8Druww`;Q!r&hJKr*T@smDTHDN%T?`O7-*1*QR0bL27J>O
z%`w6WwY=;vb!MBJyP9bh`BIKrsHUVF61)_o;V2n9c0pANk=w%v9IavNvu(Jm!VxWG
zDM0o^+!{wjance=kwRHfIBPcX;Ok~%Hblz<!xuMto&IRZ>GdSH*F+xo&rvYALT;AI
z9BuDzZyu|yougwA)Q;8e!7IE;!GM_ICyrv8Au7qdJUrO5VLZaT6C_4T{Q7$PaN9nn
z#1#^05WY&Gjr}d<1emEww1jyX!+d5<R(I{_c<(s15Dq&-^XSFS{#NRw)x`{EZ45cE
zh6GWg3(iz41Z1?^k*9<*erE1?0C_1w6Nl(crMiRO1&j!&hz;b>z6fBz))@@5raiJR
zmrG0YkO<m#sr0dUGD7<cJuvS0@%uO1H8jjjF3d}49kylKf^v^_@4T`wKr@7Oo%J(X
z0#P8)<X5y1+brlN9m9amkb*b{{rUhypLZ-1A%ZbXBCi~jn7E-_Khef4%?QTUE`tVx
z#(xapBzRoqIp?%DGlL;*$)e5{!MyiqqQIO1;nq%JJ3gl->^^Bg2yQhip$g1TsDc|X
zoT|k3H^A#z0E@p>lf_$5>LoJ#0As!BK-nh4dG_6L4vj@$Q62cXox3q_!LQ5{G%HYI
zw2UlFTHs#MyOMdQdt+WYJR~}x1nwar&o;tv)RDc8!Yh$+#60}RZO<z@VquhCj$~H2
z#)m6_gb?YhgnRA*AJ;E7j&?RN-zYH`j-IESNY~2}44p=P(ylkz<N}_%0FLr#)M?_2
z!CO2sf{Y3~+`Ihy{BJp+6^`<PVg7V;dDSX{#L2OWLAsLKPHNHMpQC!4!xT$ew^Iw+
z0a?@|-u9<1G&^m--r|HM65PFKXSs3S>d!QCRMJ=6T47!al@`ka0IPa*=oH5V&*dwN
zOR<syz%5@ca=>O^4yt3I<$9<?*)zlo=(p}d8zs~$2RJN&_kgz!95nzyUY!Ts25sds
z*7JJ%6d+3d4h9QInm3AwZ)<VG5>QR=^*j%oPAxI^rZhIX%faxXH~7SZgz+^4Jbryf
zJ0cykN*D*RIoyAOdbdVAPP0-xEXba_dGL1sIBRy^%Nlh$udei$FZ>0AXK>jC-i&!G
z_P1Zzd<BTwIa2!v$J~2*&D-wP<kw%~ui<UZk*J7W(6LP<WnfV?65|W%-~<EKX@Mpp
z1e0N+@UxqXR*IVTvlXzz1`Wmc*pPc7?k!f414X3!*hB)uOQ=0;+1QVpz>(@e(7^1w
zyo;f+mhb>WMZyCW+8e-;S!j=`aENAM7hO$>HW{}UudtvO3Pr2u*h!KTW`5M42dckf
z?uaMxQ?fU7+83%!68)4W!@Z4nSs|NTo`f`#3^F1i;Onc&ufI%w-Kd|`SQhSeCzXNC
z9F7d}_mOqtg3CARXF<-bLrcT1jf^&>q;aE&G3o#rk=Nkn!EWsxT1w{iulL@s-@IR&
z8!x=yhX=p<?_X~m9qk<H-*5N!Yt(T%_D99n4{nK%$r3LKrqE#`(pZI<)H|sF%fQhR
z)IbXZf6DHUlFzVZ1H_k&v<sx4({dS>7=N7Bun<b3XkpK6l=<3}e>UcH-85%7Em!GW
z5)pYPV^5X<LXMM;2p^T+b0)}sOS}e%&srSelP6EOQ#5I<>9T#KU@9io%XGFrx`ky9
zrSxMxrG6p8i&KOrmCAg(f%u;H#yU=jx9D^g4J0z-9tQ_Dk+Q#j9V34yPAa`;`)-_p
z`MGeu>Z=o^%WiuFf?>Kd&9b{-Vc04>2L{nbX1uN(PTz0W8)Rjfb(fT0)gMI@0u6hw
zWs!fhwC{jlS5SL*Xa800?f&2P4}RUBLchA=u-EUkduLafal=4+>O(l$EJ6+V=e!<<
ztxyCnkS0NVd$;(0@i$3X(u9pA^kU1FNF7SJ5RoOdZdbYbiG<(UyY1Sm?Y*7-9aWVF
z7-?kV=y>CBjVl|`E4A*Ug}CNN00jHh+oRgj7J%Tdm8~^131PIFMmDMyq!=FHqQx}Q
zonnH67(nK^AEne~ty8~D?yHozN-FW*E%7C(#BWk!{E^Wl!9^x&X0tlJ0+fNbnCJ<J
zY8DL^bgR{Yjf^EAG>_?Oua0ZVQa@i-G0(3~l&Ar}a|8z}c9mG{eYDt@#A3fiiWvcJ
zFbcHHM!x<Lp^fZwY3D$#EH7X~hY_`w*!5n@-z)k1WxV{Rccml|&}>W-wmIrCmRPt%
z_M&MHJu9!|!IggSMIL;S2bw}iZF3WUu*m)29pC)%XWISWa)s<aDy7QGQhBL_{$Eh?
z;r{PFKFkq*9bkC8f$<CBJ3o}zko&iLaH1}>G4})Pow!yEf&4}80JSEK+-HC}2yZ08
zTp}zN)GzW<@pG$15%H$UCn4iIgD|S8ac@bMfY<a_4S{#EKNq?4klL|oeS6$n0y->M
z?&UBqFGm#dtnuT?TH#(Tu-enTZ)8a)ol4m(h+4_p?sc`-5$l@C^V@x!avL(|h{Nrp
z6s9G@JFHGHtdpR%?g~?pmsax$qQ^@T&bUMY!}((=@63(T1v#8Yz$6MlJy8qjDT#wa
z0{%TS7yh2Pwh-v^WfB2l1e@nU;}a|AVid$gz^7t|<}`NjI_0z#w3}fzIyjL5y=vdB
zd-LOt-%!+Cd!%W}87fHOqoD%n58pducEB0I-EESF&3#4bL1r?kvty9+5^B+EHLHc|
zDnluVoLO3k?P(^8fO80e)x4u~Kd>%R-op8f=fY6NIK2=*ahGzWq7E-WKKyhlVK+>?
zRe+{CRb|sm3}AVkP_l{7^+5}-|3bWv*9qcssE^@h2u7E)3|CX_pnEo?sPz_ZXq$m4
zs;2N*s53kA1t*a{>w$?38Hrdgm0DkaP6-T2bu}-CzWK#@qMv!iRLF@N$>$CrcF>Fo
z>zd@7RGA02NENMm35^E&LmVNGgcx1Q@;5`EnmDu3F~SQc;6Va=m8o&AHWM{*E(7!q
zs1b24nJp@-JW&SqA3o+Id{SmAxeAOVp@!k93F;T0%@S<QWtk5!4eCti7{$Omp-3ok
z<p?Z3XDL`DX7O@c@*CprG9pIBk;8L*mXp)H!wJdO!B?huhxa5_Y?AlMv{`PH5nki+
z4asEpEq?TuIrQ)FU=0HOFj{iS$9}!??a*(?_UX{9Dao@R0381k3|gn0a@MSC;s1zA
zXDWS+Ixozzj&2#5u#nfw*YSMb$ue(|JBHJ1g4N@7+MhB=+IuGm9NxSzmFLC*^vCF5
zqw3XG?+8e<{F3JvUpqKw9awmq{e3bHqFt4p%IPa|u<>0LpZszCRE7t+CN9_Prz|Gs
zDrvt+HJxIUrfF&gM=Kzq5_-ZyWy~0J<OQ1&prNvUh*_y+mlZRsM!TiO;`#t>b|t<7
zUC?0KW#Y?Tm>cK5IWOXIt#?}E>-lvZ1;>+!TeK%f`#7^8EcXJ}<KdmF+6W)<W9%L4
zxK&FNxyv7sVN+e#KET>5M5v3$;*eJ|QE^sd#<vJnx0h`x*3CD^hQg@=r(EJaG#NhA
z(xwqv7caR&+3PrWRk#$lhXRbu_N{v?!E>e<W<+)HG%fH|>)vTRh61!EoC2A}p5_!+
z(F2rOkW6}AEr!iCQD*JpUf49F)Z=VT9HT!<O0TRpo}(c6q)zH|^iU_k8MJ3Ip#?)n
ztR}#8bs9ny;a4SBq89qKD_$>6Sm)a;;*u<_Hvw4Y6OYD|PsWjhu@z|RbW!e!Y&ln$
z9Q(f6<27RgVMLRDGk=OOCDH#GJ~SJ{xGuYpC;+;TCPvn{%MEYI*!<j5<_?dp*a_V$
z4!RO!p^0WwdXv{#Hlo_fF~>AxjAe-`FOxJIZAA&i-#;{UCBB<d)uXT5Z%eohSktpV
zhN=u1eP{YRX|hE99gAva<b~@^eQaKNO&8P~p1Qmy*StTsMp`StK{$+_$ZvcgkDSEi
zgz2A<bz-{M(b!DCa~_Q=<H%a5FzglhGVwNNQj4&ePNXVE8q&Mjj+jhzv)<JTXgkl2
zECVcJj6r3jmc3mcD{00~eZJ%xp)Sw1$x<27mgb8Q(t5#Fl+~@wLRiiznR1v?@=%|;
z5*Y#+MN*h*<^;6ZBFw-y(3&>$&McB02h%HC7CW(HQuS_l5eyPcG?I)m5=}ph*#bA=
z8ZPjY93Ks+ec#|X<ryU$@9e)%D4B9B5=?JxA#IIdEfKueY=@9?j1U~_VP_>p2wIP2
z7JK2T*Q~nNcM(?c-{zz5|H9@;4Xtd$KOh9ajQ;=R|6N(d_>ap=D@zal|M&5U&p*q}
zI_B10ki)g+zf|J*pOuICzmLz<`M-mR5L4nml%6gxnfWi{`mdBr%MbVe_wu=q_z!7p
zq|<L<fI^B`klx%>Y;#Bf8TRDIprZ<(Pj%RXsZkGXcmywy0!CK&z5pkJI;NCP?{4kx
zyqEwD4PPjN!b?;uHG_Tt-S0Lq3Bu9gri#a)*m(1Xp2Ay<Gy!#W)CKGW2L+%|3%H!p
zsWkq<C<aguB$DFdqh}cN0DnnrgPaYJKye?OKnahY!4DomPRC1-m<Gy(Cs_Djif6!4
z3@D%hhd<yJ)03r{sp}stru;|OwhEfI{wpi!|G!*XT84H16rNY$+tS1OzmJcNoD9c^
zvv%(UV}3`IY4rR@Vs`VK6D@S=t!|bgd=AbUCMG2O_)LeJ{s7*cW;5izE=oBaJsQ<p
zzjrfQqvenqT5@D_uJZDZ;+;Qy{7=IAHzt;qZmW5Z@*l_lV)?JK^7KLeyN}P*`M(GG
zud-V5<9}63Ps<PT-@Sb9L;fRasDb8lxMT1pQY<c>j6kj`cHkks)3K?xk3~#UKpLu4
zE~zr~rc$Y>rKP2Z9pH{XQ`f)$^F|ioQ`Y}#X^G^&r>o`Fm1Qhf0RiwK{?omD7R7#>
zvzWaryf3&;7QRz#SIs};pnK2f8u&kq_JeOZ?b-g$mT#r~%(K!SlX^{}`oBInI^Nwm
zI<CFgINDZAt7&cZLBU!vhEKZ{4kgk#G*=t;I3+WQo_;uX{MKxB_=<!K0n{3t^zac~
z;;rFO7T5hYQCkePKVT{JWQ8JgehHIW%u2kDy!+XWXz=dcJN0ts-QISU_~2=;-R@nW
zF)187n=Q7mZ|L+5C&3U6n?k(oK-&RvJvW~brEK6!5o2Cjjg~4XSe~lc3)qi0xCC^u
z6~CR$!gIBC@aulOs*Rk2SRaMz*>m;;wq>SQCI3Z5Rdw8z$yLd6u+~BMX8vH+llri6
zt{Zl$j`27)j_+QC7&p4ixR5g1t9@Pl<cE>D8u#K$@{1S-oWL{5<pjy=jAN%r3?u6r
zArYTrv8*Grls6lPHFg=sZ|}{w-}JYQJ@&_>6oI&LcY`+PO-YD>Lw2xECF9h~O-5c#
zc7z6w5OZCgsS^}cChA)3McSdLM6KqvoR*V4mZ|MjYC%Vb8tXPaXiyNJJllSmC}FX9
zV)`yB9wrPraET7kvDjCbh-iEWvrH};I0z>x7C4AaC=>jTohggDAM<q2>Vq69opQa9
zg*=Sw@a%DpuH!DvlwrM_lRcYFDIJvvN_n2mK9wPzPPp)NBP<aDOcf_~QYs;KQUtVn
zLu$x(CLe2O&Yiim?!^w0uj_vuMSi^jjJfO>%1P=IOiB0TN&RzPE&LPfG2jpg`83-H
zik@pK`hP}Djzj$y9d4cP6*AFtMj_^SXzA9kH-duM2E`xQNe!C%rc9J3F%0jI0Y`Qc
z9GOXyqCzJh|1%%i^JyfU$`Hz8e%7yFl_WCdjHf&Mu}2&4*f3w3N?p_k-OFqS{ms>R
zJyaJ1Z2aB(-&D|T^kC~f1EipUz?OdKz|rawsj#76YLavVEnAGb<6N`&pl-Om=msq^
zk3)&=oqDf+gGZE63?^s`bH)UPxu(c&#f)PeyMPnyl2{C1kpX<uq%b`Omq(e}s4V8G
zF5p@=5~WrkD)kO}_#XcI4Z8{p>-R2mMpI;N2}qFtGj#Z&^wE4R+pm?6In&q_Q&QW5
zxz!P=D`oW(I&cR3ZsO-B?i(9xH#o$Y>%kpq*0D#R{E$4C%)Rg(r8(6`E7rDjtRWyf
zf+t2bv~kE9AjWLJ+c~bi+}PQD3ldS4e;&Qv+}u7odeFE2U--n&{~UcugkX;Nb4w-g
zwe`Q1(h|IW(Er}UXSVsjof7yu`rk@v1@Hf#K7IP2|GAIPgZ}sb_Gh;BPXbTEi`mq{
zQ}w^gM*q7E^9<{MWu@|P|9>x^KT`iYHd4Q%|NT!>0MD$W%xk@0%GVzzJ!gHPedDMJ
z??KZU(enM<sUAs-V}2YRZyeiS8@sy)n}58HlIQ_W&NliNHH^_=rGKa<`S^iyt3wwL
zF#sH<IKEy~#zY0@Od8Gy73YJB^N&+;n&E3|CoJ$!S_qE5(HopQWy+s4UL-Me*btdO
zwh|W8S!QwkNfY7~wyt7Jkg@rorG3!SK4@ugqNOz#zO>E32Y2ANNBZgB!PfSDC~_J9
zt|}KApDUL_HTNU@S1yOHYL{27Fc555zLxD*YS^FMWF0V)&x0QL;q#aLB%J?sbl94X
zDSTTS;2Y?FE2|IwfA{j4Y5s3%13Xp#yR;m!|1D!m$Orw;J$(Mc`rn6L=N>*Yt$+Q6
zgIi+=@N1a?Pg(y<%jF8L|CQBBWwlKFf4RK;u>SAm6U%8RQ|-sn)p=8$i0^SdZ@t|Q
zlM=Gdkf~Kvy>566No+u8|14?x+$U<-kbvxCD>m8~gwEtw{h;$IZ0>Fx9hE+!f*`Yr
zWmE~JX#aiLeP52huek3k@%Kya`z7za<aMBIIr3fSe4ryzMsm7}cpe?ju<?=f&<P_4
zq&6IUnV+gY9EU7Z#^NcGwt=(yaDp=$ho9$x$H_TUB0YCt*M}cxQJ#-WkK{#B`BOBL
zGe-GO2=X6mRzg?DlTOhpvnqTFvYT;I2XOui%1oGZ&dSFobIn|+@Pyb73Q!Oh^N^l!
zGP2*hP8>0BDY^Maa(O#zStr#8DKW*;DY0GC5U0eO8>vC>Vvb@(!guM;TsRlrwj)km
zo$4cRzH!#ZpHwCTXV{DzIZbKw<&}rX@r8AwNGzd(dgGjm(SbgaWe9U^;Dne&4L~Xt
zV5;^v=N!@54pn7KW*>v@*9Vlq4v3?yF4cSZ?>9w@7Es5fvRxoho3bv)iRCo<DcBW~
zEFxaAd+B_H?~avlH~~eI^AFhaDDR;pKv^>>5?4BU7P(D58K%UlEzoX#3NBh<kkeV}
zG+pcrNxxPnE!HtL%T#cw|IswyrGvrgFqvO|QcHmCiY|1iv6%Ofnzy+yD66bZrV>r%
zYjdI)_Vl3Rz)xKM3oeJ40bb|*y#ph#>!1J2%a!Ga^Z$K(t}*{N)&fjB|F6W(|4XIS
z2mil&`TRA{|E<$*@K2T1AzLnoC>GU8M5{c96YA$hybjIfVcTuCPES=Kyi$ciIO_KY
zAUqWsy-ugsEwnL3je3?PW3sObgsPz`baV8CVoe}%hxSn0Z|Bl1<dFIy=mKnt3<J_w
zKNygthRI@k{b8%q3Q1~9!`Hx5UEl-tZ==7s#`+)kVyZG=n*CRK`6-?ME5He!lK=1O
z>O=mg`}y$sf19T^mTGoN)}`5;VW%WO=6up9GM)rM7gJi}!H1I{tDP=q=0piMk6~l3
z6bhd0D^+iYy|_!|ckEMnU58O?TID5zRgyv0eSL=EjN3ho@7o{Q7h@AGU&}%(nnHPM
zj^&A2me;13NF8i#*Bmpc&5ajsB3kG9E603l_u$w`PrLKR{BSbf+HALzGS|i}lCG1=
zcC4+{U8T4kV@%~nzE3$B0iwoJca<a-7g36WTa%$m70;;kW)t^0JXdLlaMWU43|eHe
ziTTr=Ds+ghLN%k=#mvvG#<^3ORBAmnq7yz;rK4~`VXW7k{bQxppGQN$dgYGxHg<Qh
zpkMCj^}*q>SCD{_WApQ<{gGGh#m*}(8ZQ?s`*QbS<2VkJ9(x714&J`l-HsLH$6m=_
zHV!vmZyY+$l)?uIU1v)nujKLeJ8Af5$9xKBK*y~&hzHiD-}`gM_DEGsa6hWIiF!yQ
z01Q)#prOdg73Jfmic*8<gDX*z0Df${gG5h-D9NkNWk1xZC7?sdJES~kY{1f#jI)mt
zRKGVw;Sb<TwgZ%cQp|Awaw&}?`HUA)ZxX1})&Tbf4iTOe)}a=?Eg`3sE5RjZywVx!
zG<VY6Q;$Hq>&MOC5~?^<p|nvQb*^PIIn5K&=Czg*+h=Y$pngo$fD){B9cJfT;zp1{
zCLz_9lLl&hl;xXcb~Z`>D0d8M)3W6pHF%dfM>Kofe^E6mF_2J)HIr}z<&1Vcr1*3r
zi6hT-@PlFXd4J?&09b#X;(E^`&qi?{h=8=gwvtVEMxAR(nypXW-bGi{Pl3x`)Zt`N
zG|NH^d3Y|!_-jmF6q_Aryit#zkK7vD8BB?>lQi1xnami_=%5vR4w~$nYAu(f=qaVJ
zn`^g8eJci0_$_DG&kl?ZJp+d4gWl-uoR<jjgMm)KVbYS!kf?<kH%4EpBh{E|7Ur7p
zZ#dWN&NY>CEWu*-S<yVuAM>%C&mWTtzS@qKM5!kF427G7>hGfE-bWw&_R&GtRuIXL
zR#e-ZQBOqS`7juOT&vDdBU`_yU(G8_UOws)HO-r}m}9KO-08cyomeczy<@BPU_kbt
z{JF;YKUrlmLmN}{zsr>ho&PH<@bu}^GM@jT#KZajK0e=nw>Sz1i?GEn_Jct?{eAj-
z^#&O->jHL~7+4Cwb?cn~uYf>IdJq5o#@CgbAbk|x9pgo*xjZcrkViWp@g|c@{<A6<
zmlbLm7fY4Ja#@vrtd>@)D@#gdL2Y05&G$!XIF?X0)&`vW@H_H5U~wV;+Iq1A+^9E#
z^VB2YQ+f5Md2$Z_hQo7YQ{NZv{sZHaE8}A1!uF49&J|5l{?QjRzJg!_=mD9$i^>ed
z$~=6VE3Gu=p>m@?LIe8lFk3F?Qd9w_6lC5~u~>lusL+U%z(*+SK#8XaqJ9=cO5h_Y
zQK1ra$?KP)vjn{}T9qgezNyx4>53B5GxaW{9(?nwzynj<!wl<yNavA7El_^GiCbtM
zHq-j&daDiJFmX9ZcJtvV!~~o0jkL;C4wcxVh%pVqqpSF%FY!md6QA?}xAs;8+tcyD
zu&DLkOG!BZ6ClrRea5mJ@GC#x!=vK71H6WF5+=0DFDS8cVwZ5NDL8ZD*(Ccz314=B
z&DY>8P&;YY8=q<+puKUP!t70`zlH+6W=$j)ra6KO5}P@${y~I}g8a@Y6DWx=C|;x8
z1kI?coK&6ql~pFP!N$B9^gOReU`8ae?|}IA86XCMXcU3e+P%?vi&IzikmAC6Q62CX
z)xulsem}rPc7X?gx+p3y>Y;$__bviFO(F59Y%?g(48nOBlSZTQh}^<A&RhLlF^#+l
zgsNxHIv5EX<Sf=hraV0)y7DAzAfuW`C^=T=m`+8sjp@`_J!3jGR>P=YCFmB@Gzh#L
zIodqjd2_7Z93H&cK1?I)g+T&KE<Be*X*L7Dsw6^I^%k)%+;a_`YSv*_0Lh-T%MIsC
zS|xn`r4H+@Fd!KkH8ExNO(B;@ZjWL4q5Z;hok|!{4lnUt;knMfmqWjxW-CycO^m?U
zogbQW5hCm#XcF|#QEyletLhPUy$V7w>^^Y(fOZN)X!7x6MTZuZF)l86w%^~_BL^4b
z0Yg>HF-Sc+9|Wh^N$L+BhG1%I=k*X@kSNb*Aq8il=Jc}=kia8?`LB7Gh~~fLoi{Ey
z!5ig1ht{Ko&jPm)lABklKk_RGLAhnF;*Y-QM|c#F(YLy-@SKztJZH^Ts0juh{@G&D
z|Cvu*{s+0CV!7-?{CD~Mhsys`=l>q!zb~&u?LSHn{vY@B`M0qD;M*$43WO{_ekQw)
zBpZ>h3j0xR!k7BRCpG^s-mM=!Du0{TCLUAFI@pY(&{vKr$e%q|f5d0%`uBhS5%J$w
zo|67&xwKSXE-#grDE|9H{Ga>zu>Hp?IC*x-CK%R42UAF~_GgQ2ljY(hk2gpm-Jnp0
z-2wdSV?cAvNsHxCyf5C)W@Mtt#-C%H;YAi-S3gY&#jPXH+(q=8WD|$UW*un8p%QI4
zlri8?3Dz5mO*d4s-NwyB;{3t(82>iGZSd~QUhg8l6#Ki3U>Q-Qez~fJ7jlzkk{xqo
zjoHKFcmx%T{gQYiKeo!0o9#%Q<g4wwkCgLZ7V}^h^Y3LAlNi)!d>pE1K%>dlG0~t=
zsac{%r4lS;{?s@}GXy$v0vbuAqs-hWlUJH6<<JBaHFV<5rOQ%Dyb_5Q&LxEA>0wAg
zyiA$HI}{1R;|2&y46pMYT9eEiUMElNF?a0%JE(C?ja4|_BpS}OEiYy_y_nVZBB89Y
zznE@-5ij-I_~<+w`UeoG4!4iq?jFZNx{r<5llspK*pqPnBjcu8@3bbDRt2#H7~Iqc
z?E3y+l?vYfKj{Dd_|G-x|3=!t>&JgtT7^ds=l^^7{5AD|5Ak2_>vPTZe-;c2P@v$x
zD$uJ5n!zdSzg&L0YW06hsQ-JqTz-iEe=i@_|84BPIjX95ulGrf`m}Vdi8IYED$12R
z=}-$R9(2hBOl`sek|P6zPW3|Xv=FN<wnw2k?IE0OrC2Oia-{s|20=5#lhsLJ&*n`V
zghnC|fCPTpIvWl6qy;cOx9T*60t}&LlrN51+N~6mUJR14Y9q9p8+CZG=*S({(<8ZE
zW?|j{)UEOZLT1gb%}5jyTT6m?nnt5@gE#|}oz`JrZ*r4bRNi-}AiCf|nr*S@tXrB8
zf8inK!e8_g=l{deCEcRk#mzr|u7Cf(S}8rmf4G;=g!#XNn}7aH+y5&|E57^>0>Scw
z{rA0m?nD0H+t}I1%K#y)Vnb0JZ8#Q4G%bCn2STJmBCMUa+OY2oYb{Jm0$X<@eemM{
z9pV1hX<}4|VlO@YRI&MTcjMJj>dCq)>=e_xTe~|ij+DZm?clOsADVLq{VC|~$xrD*
zLAoG?O{uCGjwCB8oa-Wyn$^T1Ni7@9BAvGQshV+~r_G3!ecG_^RXX)3ySbTDg<sLn
zuFz1}m!5j`b2@DcnTl0&nHv_*&U)$8S<nTUzSkdum;gsEz$B14V%84tXEeR<<J(j?
zYW7svz#r<Q*Be$B2ZP1NXaH+xxClTN!}e&<fia!dN9`eb6(G<iOn0Ll)B$Vs2Vg$R
z9`5Cm1_bX0jgX7|*^A@<z-I#g_bBByZA^>*QF@B;KbL{|m&=u>6|(;-tvvAm`}p+h
zjZf&uk<rthNh5zkPmIi?!{BoZzgE>!aalcrLsEY<0A>xv$=9|&=$#Gf9gNx0Z`ZFv
z;J-ymCxu5su^_>tm}8rj{UN8q;Z-}xV?j*6;9B%&#gPSXzK3}1INE)CxTof!bMw{>
zDMxi*GegxU)oe(KDL`Op8+(*Nz56LwOs98-<ZVeIL!^s?09uu83U?zo@inL)XLtw`
zmc|3Y!!cfN9~^FNAF6DjOO|52E_VYMC_BW~ahwFS(mYY~^le`CMngC^DU_q=jvw1A
z9Gt>~pxsnd2-+-WuH~nc4#gupqh7Y_XJO`AmAUsS18hE{R9$7l^WFt{X(Q4o-r$gf
z1d`VbUC8#jaJ<W?Q)msQ<uncFa(IO&(o8z_;z|j}&$A=+h(fq{2Y$37f}=aMgqgaE
za2ikfuYpY1-DCdkn|aa$h$D;=Hx%qMI?cd*rnb8rnUGyMs8@yt>u~f08ASRZLlRFM
z{+UO^8ap;F5)2!;yz0~kpAhc61~4zLDRLt2T)}0dU6+WRSX!$ZFpq_V6ey-)IyIID
zK^@RM5G>=bJRSKzw+3_@&7o7>RWJ_CwB!VAz29%Qpj%Qq`))gD^e;FCh@=DT7%o=*
zf`c&{VM}I&A_ANQ!wYncsrZuy!>u>YdBT8FIJ&6Bk~AJ$oq9i1C%{KI4O~TGNUng^
z4*|ix9QS~jFp(QgSiN)7IvcfyS7@yT0)rw4s4h(Ol{$q}N2ulnJ@e|l7mp})a;1j#
zPuj(XS%NTtQn9#{!_|x829y}J!gE1T0f(vC0(ZzkW`~+2_aU4gzfFZK@C2{F>mhmt
zLA4MNKsH$%6~I{hWD&%>MW3Z1lDh$Vc-PxQ^cTt@d0c0`!rD3;^hSM^*+=Eef$)rs
zqNgyP#(L&@5UHYN1O2ejZ8eD(WfVMN&Ip*+Y|!I?((ZbsHwm;VJT}w}z~!)HK`7uq
z*V`jw#^JS<P8pLBP7_F2Uxc-~2t>^qz(pK@%E~m67H}RY5_pL!gr7gp=XkmU!>KLX
zMSe5_He83zp983}(Fb(Jfavz-X+3*5lf4Jn=W7RD{*E*gp58!B!#gdXB#^i)ugJY9
z>l!!HyPTn6<`t9h+=8|7F8@CNTh1j+^b&<K=@h9{y}dKCvjW{y=)6zCm1$L8;bK6R
zu9+ak*^sy@!`843Lzow6^TLwqnrAUr%2PevqE5wxfE(v^IGQwNK{LyMj;B*eOIl^w
zbc>w)T1`BiqBpEF-dxpf2~8+%Q|;h%2m@)I4jJcV_6Zym(D{$40>;+60Qp#*w1z^e
z>6C~#z{Pq@IPP9S6$U5))!P@iF#%_l&@)9hdT%;)AwmW$n17C11H;xMLu8G(kOqM0
zksmaJ7=<`dCm?N5Utynw(>e4$irBoE?&1v1hdf7uX<)9<!u%_lE|rx|P*n#MPkj#U
z8@-tf<IBX#Lm73cGHivPAS6M%;jjsvpKZ+$7#yk=F&9neP~s!XANAXeG6q}~?2STV
zA!&sMJpUMV8^-6D5JU2w(?JX*S;ycvC=<foFl7F;NMfnXzmef0y9zYLn@WhD^M>WX
zYAO_@yK#)sc34ElmQkU~NSpR44VmQC;F4lpLao^uGK0fVK)B0eb-+fy8J-tNBu@fP
zLM!@{qkLw%f&KooPc9FiyZB5v{~MLeZQGc9{x7dSU45$aKd)9O{s%sNIRD?rXK`Uc
z{Y<A*tAWEPS3jQ&TEQtrOFIwhDByTUSYeC`YiNc?c@YOx+lpGSo4(qA3p<5YGivQZ
zrC6yH%T)S&IP6y!7cVX@if7$Xu{Std411?T*xiCfqo1TxyV^P+7j)X+uH^LBLgAc4
zb+v{%tWpo!LBsE1(B<*eM{pP#L^=b#+XY#^`=zH|1j8Y2W($jO6mOk&K`K+5dt19Z
z``fkGCQG$_l16*}Q*}XmiUR08WQ;|{m&~VF{q@E#+qKPu{g*qh5QMpTX1SFzqUadI
zCNnl{SFK+0{J8?~i0qd;yW6@?2(wme^xLBl|4Rp#cxqCa&5Zh&fWm4{k;Kuw**HAf
zK7>D;f7^JqohAz4*~XdGIvtAlf@6t{+^_BGjqG3&+xunv@Mz~?e_VT-F2*-00Fv}l
zx@iaYuje2pjiXgL`|>#sT9m{ihS+NL!5L>DN&hRYyo@R5LAzhWf7Srk3(s_OER^7L
zk{yG~mi)r=*`P(OGI99ec#{sAwW)A1tvM122@&nK(%U;Wciw6?1Fe?|^VjQwV18`l
zdS<2qCBun!yM{-SwUi5JRt*z|(V)Z7dhhf!m(*E=AMfbQ@F#U0&;H~3K8xOFdgrNy
zF}<Iug;}N(kHa;6o=<3P^67r2CPaLl1UzzvoCT_tyAuFUKRe9S28pm}wsV@$#!S-o
z*k<7WuDc$UtDTW!rB6`5inj{^x`6Wl71Cfl7(REH1WcDK%m7s{_(|~eqMr;2SK}lo
zxFjb-pd~R01QwXu3zy<2_s7w7O@XS3d=flO<&z;fggptEL)??$IYc}Oo<qkGc*5}T
zyjauc#Tp$V0qO{KvHrr?<cB1Xuqx84iXgwKHE;Vx!Li<Kl7<*2(D7u11>47*MD!OW
zMs+IZ@X*<cMH=N~+XHd3rOlWQ|3IFDRx|Lssolr`vMzvHYSas2ym+9b=9O__)`Ea$
z+UOKxwgMZ6cWetzs9qE@L<Tk<ok_BUZL93GMMeWPQT;T>wnxQS%R~WnrMwvj#-c)D
z>T?T~H4|Dg%J(Tv;ba?WNsxzu#8|N;3cu?O+eI-kboPD;lY1g?C`r61YIzqhk$pSB
zlJW8qyINN#b&ZYa@T0^@a;My6qH#m_OlzODGzyhOx^{Pugu&GW&RP`yh<bKg=mf=s
zjg!xpmR$;gX6{0W@ym7N><wV84<Bc1LGLkMQ&}sO`cf##iz1}vuF)fJ&%>@^-_nXB
zcYCtDJ#hX^`~huS8ZqnTxbW}S_&oNI;Fl9(CXJpU_X|2@2-UgC{4QF6#S?wTAV^{Z
zTf$(?cp-F!fez1L;db$UxC@8aeK_6FYeV5fG|BF_>kUr1Nvqi7jEc>bVc^<eSrEK{
zL&F$q<Igr!hQH>fqFH5)NI>fCMBZM22?qv?{<F>8zs<xjB(r7QWE*kdI2*$?wu{b{
zbLO<b{LSIU`@!ANpZeL(E@?d?n=~wN44@9w2Phgtua!F@*nC71$Vc|=e8S#(H$?vL
zM%NObBzeJVeV)Tg{}48Xv~h<sKS@Yb3;fra?IXMuWTbb|OFz^bvbPM`e1YtQc#Ze^
zu$z$r4hjVIPSNESqai6oWp?1LObuRFjynCRQ|dP@wf?{fK=4`CCQ9FLvN>D}lPWue
zY0r^{wwTsAOIg{>T3W<2Y6}G7Y3qz%o4xJSS0$T%x{DBNkhXG2sXf|(orw^-JNpcn
zot;y6W=)ttW7|&0wylosq~nfl+qP}nM#r{o+kPkCtoa3Vbylr&F3&}+r)uy06Ci=p
zU$IyfTSoVHy0K-(t;${KCv27$W1n*tDqzPL+rQZS)78VUK1IGDYd({BFw9Q-=>kJb
zRh)0e$etr-R;0#URY)`a`&gfg3WN~?<-JR=gwi0(^Ic|jV`juPyJES{=s5$<Qp7<l
zsKOn5(7y$a(o+T>RW9}lI~w$E9w!8cAyJIhNIo)N#b64mk4dBnUqYVeUuCcnsl8|7
zajGj-N4?x1!V=gAkFrBG3G&>uxIw}i*ETZj2srWc4h@@-Y&Uk|K4NN~i~bb*jUu9u
zdpo*uG?<a>FmRktXuO$>ZodIj4r^i*UpzrPp%15FWGzZV@hHVnQ^I<JdvYm@{AAPm
zBct@?(_bbROrBw!usGAZY>hgzsiS9E?|DO1%veII==Ve&l8dJPe%QU8PQ&&-EOBeI
z39*#bvZ=^=l9>KG!#m$=n8)WRgqa^Z*w+ACl>504nI3?i5A_}4*9QO+Q0w`Y2Wa&O
z7e>`Jpn%n?=kXM$WGPWD<<p1594$FCQa*A30dbxlAvN<tSpTJg7#6$sB5^}!9p}5=
zF{m193h^h(l#d+=a(EZmIqIU>J?oV0qcG8>5_l7lucMv<6FB`1geIt@X+Wz`$2R^S
zE91YvIOqqym&H8*s1)5t?6vrRERZ!C0(qew3u!G&ex)qblyh)aRSUy6TbkYMWrwm`
z>}B_pKDfQEzBgjL98i7PZ8k_nJoj4~nf%aI@F7%gk$P3G+3$9@$8IP3fK!qv8w|ne
zA(DX1>WET>U<<eC1jP!w9Ch#>#->~_5eTWFxV@F&0N({S5m0bJK%0EiQa{70^i)8X
zt{5F^8%!U0ZCZuDf;iMF*$GSIn4fQTt-(H!&GD-+o9+2WfF(%9&O9Jke{&H$gnDKl
zJn$cp5eNh)uu~##x?g$~0r_NKv)5lRch_Ca0nRqi6|A#){Pq!~DxSNzxvvfy2J8SA
zYjJE@A%zcgT^{vh+x@Ds%884O?v32P$0!s98$6XrRh_r$OryQgGZBsNx+m3R?YiCv
zS!&u{o10uiWVu@0Wk`9JZ)I)KJA?8=4dEN_6-Zf~cIpN`j+U|LAdVDrG+<}TJ~Um3
zTmBa?sAE@M>RI59;jqKVP<JeW4-u^{HaA7W%=7rK<*S+;#FikQxkEZbLW*IO4zy9X
zESarRQ4ibmG~mE%<i(Lv$@4pc>|#DpCn%d*3uO_8A|3BQw<F_X%v3m*Sc<~t?j15Z
z?``bRQts@OM`H@qjY8!zAq}j!NyTFdDW-u9yx8O8RtjnqBAVIo<A{m*yg!fF#Qaaj
z0~z`Bb3#)v7UPt0mIPaOV;DT*-Fcjhwvt|10XqjW(m_@TF7aw;vZ~jW;l0ZBZ(zYO
zn$L(k_xYWIaV0;I`njodbUH_#GRuSM^yUeBpx0-db@M&KvX5x0XHD{4hf2PWY?ph}
zl*qy^TYxqF`&vg#(l9dueSAwc@a>eMo;yc^!S!<)MT><GMh?&OgrKCwly9cdahAel
ztCk$4{!~1wE_cdsxatA>AoP$|;cgHJlh#fUGt<Xb5HqTkWASTikz@ygyZDJjr;Xj>
z1LyUo>)f)rEgQPY4Up#D>xM(jt4xxM#2V{{E^m5=34}@vumira$!26CrPJ~GME&`&
zztM2CBXjZ$-029`uN6?EWjfn_rF8sBB{-SEV#8&8d2?p+HM@i*e+N?0v6+mi$zTim
zqTAB382kE@{q7V;4Epfm@js^e3M(-!4!_8*4sD2X)gl}%;MRpWc!a)km1mgXs<_6o
zOjvqdjlmKtrslz@T5C6e)QN9fyi7`-@$lV*D_4(K)wU`u6HRmJ3@#TFz4MQK?<6Po
z49A{$)F>^yqh6e4<l8?JDF{oS778zUF6(;#*33Of426rd#khlY{_SW8qp6^>g?!w3
z?Mwu?;Mvq#b`4NM_Am7!YXOvqSMLV;9Ss#|7xsG`Wr9{zx!&3TLi4r+DH2ey?Q!d9
zAnl})$>JPhJ+_(f1o>$yfWs>{D&J;Y%pQrW%?HU+XC1r|3IwkAMRYw5PKM`-uG!;E
zsatrVY8^!Kk99WfPTmFN=>22Z=!!d>M8pt|^bxLI5u4-%qvXh1GYc6a{{$30Az3Fj
z<n5Igi-&|E|G-XQz;xm<Xn`!=Z3Z9Zh=g?XyT)=z_SBpI1i{(szj^K>;Ot>RqK=T6
z$k}GvGCE*VOMuC;22d{lm^KwgF-v04xPp)~4|tp)hW=`VD+^TZ$|V1pc<!byxtk&J
z3UM<!?GKBlTJdL?(Rv(&QeJC0NzvWc8HFTVCZjnD5-RokTjEIEth=IE-6XNYSD@v9
z&ETDv{OflMLET+_*kKh!Yq0$r``2{n%-b=rO4jcRpRrVE$zYVcFEU5G)Za;%UDgkW
z@1I=Yc@UdJK1%e!{=))Dn(7=eeM;SNsfLoC>6d4gA>A-iQ#zo7Th!LC%9_M+52})5
z7yD3KUq%|qx$Z!v6rbAZqmda2mwDqL)x^Y$>nv%Wo$@PV<V03%{@EEnaK@>>eAvo2
z{%XQr5)aHsbB8FlQhDf4WmnOKRlPrrmCKfix8NPG$)cmZEh#JLk^3PYe#U-pqO~rC
zZXt638kJnXf3oW9EXwgBswz9II3|0sG=avKUG!0@7(mJbi0CBiC@(V>iuN>&9SuId
zl&xbrY1YhiQJ_6r&rtHPwpgMwh1?o0wjH&Z{y?pS;TUqH4?AlKjKq`nB-|q*eaA_@
zLhCfE)_(B}0bZ2*!k{X}YtpP4B2(!A%f~`JYGT#vI%720%O`{|LL>31&kW)$2O?_-
zedLX`bO;;S$2)WgOh!J7$h#yz{w}t_@sQO3^-}9*_AH36j_fbwj6`36+1M(u%J%R~
zPHTSV2mm9<(Ru>F<N}~43_n(g(7u)sbak=UXA;w<HDbVj%JkZ*nPTEo=jOddXx>Bq
z^Z0B=-GrWmb_$KQLgX+z4{5^O_k6LZRfA_O%jqhNf1$i#S@)zW0#_Tg6mYVF?A1d~
zi@HP%r7dB#7u<ZF04!AHTU`&vP5yTtft4au7)@{JwYaT@bzQxP+(&$q?T*miE54u)
z)jyn0ZOUc#!9tiEeBSufJuOkwqI(YN{SY+7<!RGua4&R0(NOfA8DnaW?VIHnLprwT
znovUJSI~M#lyddP!O*$ezlEC)Q7R>&N)8zJIJqr^yz-^caHK9QtiKB?9Ax{3!%(Ce
z@^Eo}9{oPG(j^*bS)^)2yf^-4iQ0kN>E$8ixeSve7w69%{u~tXmfAeS3v+%Q)3L!y
zINb2t1wE+))`99&4LI|2d)Uw^S(4VKD^@aCw;%{~oHT5N?#3%Rx}yG^)FfLbMnYb%
zr;WU^gNE5ShE)2e9BM>F=dwnaauq~Q!(=2us5glVp0bq88X^#=G(*=H*~*eoJfhJ(
z<GSZVSAkdd+0Z(K+VVm**7GJ<m6EkyqUuhvygru>im%DeChFc_5ftdYPa@c<><(PM
zrNy3jjTk*z;(unawMhwM0i_CkaF=TUs!6tEM~{3vOm04yvoIdaTU#z_ar+A!KX>C&
zqfaX%L(8ku*Zax}8FJI$Ko_h@_BbBgKK#c&q+$0sqL|gK>!&1=$mzfJ1OwowGdkJC
z$+3NXO)=tJ6X?!RYsqkZZcvo2#}b>}Xgab!Mlqu7;OQqH35_X#pAfpkYd0}obsr-*
z6i46|weVM<<OS1)zrb=?@IngqM)Ljz%kgWwh3<t(t$3H8+eZ2ZvKV{Z8s(!)GIS3m
z*OtFt{uW|uPaUf!alvAji99;tvW15cX+$)bjOjXNL@oeRXQ~zAJaR-J8y^aNNHB*b
z%<R_KObwphuO3-qT_hA~*@;^Hx#A^}Tvi^~s~A*!IN|S0NJ8g0Z>i5H&<tIC9!(Tk
zMJ^cm9Q0i9dMAuYFJSQNRARBE_bBdEbYbCK{st=zsgORqtue!})D=hwEYhF$T=~j?
z)q|{p!D7ms)rUcfNB@yC!%N5_vg2fqNxj%ax=EHPo?=_AQ+%k6J#m6JMn5yMKWNun
z=osci$#(1!_ICGi4mNHC+zvBX_OaAGfZX-vXf($%KuKs|YIol1O{UCA35`0wTJna`
z&C_@j^zDJcJ2{E^PLikgc;;lOwM&cGfyF2?g&2IwI2QCOm6Ek%$R1{~5d76;n2ISZ
z@RnXHt6<Aa(J^oz%>$qAa66pyh`#V>QwyO~8;eRjmXFq)-}Ja#NP8|s;IZjf`h(ti
z;Pu4>W8%v(eJw#JYA(xcTmx<yDpitWL-eoMH8F5V<E8H(!OTM%e248T>>Ql+&t>Yx
zn15b{-Y^mc5kRQg;?AE+%06Ai(FQNjM{b#t8Y(Rzg*C`WVWNK5Ck!?Mh0}#B&P&bR
z?}eUPC)?BKW#=yf-@#{nT*%|`j4zxX8xbKnTuQ%3bnEpowx9`3Net<@je{@lx^vk+
zWBSAJ7pe-HWYCX%FlSM!aW-Teb^RL&s9`kLb)@?v<EykU5%Df(_KQ6@PB}tapkH0-
z>q_PQv|gu81nGx?IbN8-b#em6a<U9>2|62coZy2B8eMbda_e0!C1DKv2dv=v?I&Bw
z=b)?*?jDHvBWWkw`1)z3S@YFE$}b7E0~N~t6uC{l0+T=Mo}M@d`FIibLH37ZWy(;b
zOS}mLX;jI=PVRKajgcjeIYG`qMucag%2H&wB?|=E@D7lrEqD@T$4Q!!kE}qRhi691
zmgfBT%7Yh_Ep1sK%){%R3i<AGorcuq_WJo#n)pVAxCS{IAuFvbu+#JL<41SR1~AJR
zV9_620#Q;ov7^3~I3YjDW}+bYc-MM-d_ZNW&b~hJ=scq7v7)2A6`1C7--e!vuIM$P
zRTV_@6R<xHR;&MuC?{sB%7^X-HXA?AnBtE}`$i!6vEnM=?1{NmkzpxQe-{||4e#{v
zyO=_P{o2Z##F;gBx4D3j@XlW^DXj4!_oRY1GW;M5el1LBTV@yuwLO@GOG+#uI{*C9
zPqm*)P4O4T^6r$q1Vpvq<jiP;O%QLj2pa;jS!PDwCQC`g*gM}~QQtk?C$So-`O7dV
z-C^ka=S4Fo#&_#lqN7|G0Ly1zgQgV$h>Ppn@a)XO9sEF`rm89>MV&^T>D#dQ5eaao
z%Z~<7ESE(^W!e}mzmFBD!JsalPgt+~DGwCJtY0HQD*U!y3U#L36wNH<4gqZA4_iZa
zA<9^ZTs5zY?D~gxd!W7&SWVQS=Ym^23^h>qf~K@?@ma2P%9`|{p@E$`C=hBBiR4|0
zQUqx$t9K(0fq`Mn^=B5NMvX7^S20cvZTtO_@Q`rhJU^H4@5A9HccC}&-y9Mo0hNKk
zhC8=@3LzF#dfHMWIsN-N5_cdGa^fGZ^o_#fNSdw4aMut|%do66D_jW&qJLnV;v5Nr
z3KkE{SVj~hEdFa+Xr)%AlfTKlHDWBS4wN0*ZQ9eQ1r$k3cXoY2G4ljG5D3uW`1!tj
zRlmOKTkEsM$@`O@^63@phkz)9Zkkf53<O@xz{)WNO7sNxra<!C2+gd`pB|3P-pW3B
ziI`U+xEa8x(IvpEfCuMUy_-JX*oqc;I=<S=3@o%>#fc3NJjkfe2(k^dWDO|Tun)(f
z#BxJSqyw&{RGa^023BXuN~IwKIL~;H>~MlEFl6c+KIRzU<zX~SWU?)Rj>*2_9VLtY
zt6Tk9788ho1K%z5@z}+ULljg@hQzGbmlHzv!Y+tXlQeN8>|Xw;Qr;?#YYs!<adny}
zyK2G3T}C?URqyCkq^3lLrd$=g0Uh$OY#({0%35W_6bZwmbB@V;KTnMeQ*(5@y}7=|
zqa>|3JEaE6D2dxSs|N>^esxhQl2;g45WD~R?bYus68$lwgPB(U=Wpt1wDfAY$4G~R
z3mB+g#bU_$7UU%+S8nm-0Ehe2OBYT}v9g<=fWI}~U4pt9Ld?*^X=faU0m`zvpLTwz
zbxEE2TJOPdZ4RIE8mg+`faQ}bn$zjbgR=|~cy;l%V0rI>rx`^s?iUXn+pWeIA7roc
z<3aDr5^jg4xdBg7Fx8N3lSGRG-IEM(sTrTUa59$Egm5|#IhpIdM#LvVTUq}*7tpQA
z6e#1OfY$VkEfj0pVVWk`Gw#6=dPw5Fgr*zhxJ3UM!mZ+T0ku6|2l8v=>l>7{0pgDL
z>3Qy42gTzF_I}E_qfiLZPOiX+Ca54F8xA^G1!>YTLS}&)ZL?;ui!_O-!~n6}5=Qds
zs8&cdh))dt8s(f#M*8{bZ}8o50f@M2Obm!U3a($Ew5e2id#gkjV`Sg;qNwd{_@6aU
zee~22{Pmh&WWKF6&$kSQd~$OKorj6#al2r`Y{w+6Bugq$8Wc4h5*$6|rex0a+|opr
zJa@GZFuedEJxu!S8};cI27XPb>fPU62dnyPE1ae1r^vsaDnGSDa?w8=(S7JQas~Qk
z2rp+GplNE4%}ORPOx8l%4+pgG4EUc6zEAzCG~&nzjfNoGF9Wrogue9!#nk6{yxQy}
zjbcWcf@$9W4A<(N(uf88eQ+~n5BaCbIBFrogSxvbfc<5b4|G(kqU-%8_85Af7~T$p
zyP$NG@Lh{jh`Q}zcU4pC7T)9w9$wPYTHlx}ca{f%+a}t<B)M`&DGewsq(qDx2^k28
zm#(`}ahedWk};FCjSGMbc4S@2dhEp$ZIKRFkv*hHR6C}37oho;r|<eLfRAN-QeWP5
zs9y0(r&7R3?L$?g@=TNbg)y#(rColgPW_gg=0?~5cA7bE@&z&qsyOoWdoR=B5&qL1
zB#R{T3FpWaU1#EqtoJ7oFh1ux(V<55ZZfp&5I1<E_C>&Ln<M6V+V%18W-r0}!LTXx
zS*QK*W!7Ce=Gktor}CB0X`5E`=L0R1)@IJ5L@MX*rD*K^cyNMhM`pf>$`#8oJoQBt
z_|0(%ZtnsP=I$~vJS}f}m77J+r_4$|VS;2~bfc8#VcTf}?Z4?@Lhr;=9TkY4)wDup
z29{99`zR1{LeXOF+EKRpgjEm{r8j9hmX5Bu&_8kz%}A?Uj(Kp!l-RrX+#hb!;Wt28
z(|Z=MIddWec*p~|&jLO@&wMW%^%-x`gnu%HHPhq;@*SSm^V`Eb#SKM9|JWVOv*F&!
zO#|3;-sq=YEzdN6qr$2Lp`H=;y(g%rzQXtV2)+4%!LoO%rT)_BTit`5K!1$EFWmbi
zzNz_*|C^!rc7oyk2-A(q^laspI)dg*BxYuooKwrV=+XCLW@+<0Y!GE(Z$*hef=6)8
z5O2~DZ|Z;nhgM2AHI>p;rvP(x3Eii=zgDkjcZH#<OA44x*F?x@W><FcIc0rqBrkLW
z8v>LWbJ)9g(q!8T=%jCMI{LH$6F!|`3B*r(H$Upm>AUe|956UT`X=A=`R~Oyd47Sc
z3!qh=<S$#JqIUO<xGga1`*NpQGz;WWQAu9wE-Z}T3Mg^{dCHd{Au@R>4$7`mkJ~}q
zp7a&ladBcWd!E*Be-V7-Aw>9C8XAg>jQr~hHd&!-^%$|mAYl=i1XAF}e_52=%7@~;
zJR7pO@xlDr<%s^>D66@Gl2~{Ke|j|ie69Aso$kO?&DAq`M=3~*Ns~~l_|n@?)>M|G
zIYv<FQnl167G|F9&8@X<0Xbs~@jd-JFg3|;dHk_7M%aDrK&%RbVO-T;8<1!^K*=z1
zJo<XCHkF#PisYfLBat^P&IM3DkP=lTGUu$D?b!*@1dNyTo^c#K3|>30{GUA^e4eqn
z@x~r(L%t-<-4!j@>5JV}{c|f*=ZT_`wmz=lH(gNzYY2kBpZ<KIa+iocm>-fdm~`s|
zR_IQ3#Q!c+7uRA)l7s)9+#{&m8+{gHrMaZS8jZJHVpE5QfG@_S${pv3y0D&Z@9L5U
zzv^yxAdfuHPpis*Y_OvgCGlzg@{M*`{CXYneZ+oxf!WR=ahCl@zaO+{3^Aidfutkl
z87$p~vPC`Zy<j+ei%d6p!QSab9rgs6por6D&3s4dHwS8I7UfID+}Q<7FYB}ojPq6R
zsDQi#?ffyt8kPyBgR_Pr2$Y)O-*{B5!S33*EkEpbZ%m-kqGR*r-ZvtZ0GmQF_pUGy
z`0o(-)_(?|ONO@$)T@i~7Co%B6&5~*1hw)~<ZcVB{}Oxvj%aW-{3=otN&Li)P2HW{
z?osYIcQHSjmuCGM2Fpd<xZNKS!Mij>F^w_vqrZf+(!%;J?-$M^{p|#H$gt&sQ0W}u
zk!n?eb$kF7XD>1z3DP6gxF$Ty+}bz;BI0va2X>y@__JVPg?&#>oapLQpw8pQ=*7*=
zJq)1hM4&hQ<Ol12o!k3YJ1r7X0lyRDf*A9*ah%zD{Pmw2<9KB5Zn*xdxxUl0uH3hK
zYopehpbMNlro?^}wo@pRAv*8E^CtGYN6WLmu{RmydVBCYlyLJ8p?h$tk`Xy{(xVW!
zFphBJrZW6TLI%SrzfwEmf6j@z%+Q~lu0=nx(hqqmS-S$UY5+elF^IzsJ~~SK{>LL%
ziZ1JCiw0*(Il#re%_Bz_G%!8uL=cPZ{zO&FNMoR3NDgUB!HIOhq1CS``IsE=@==?v
zt|+MUtg|rpbW6aJ-Oqc%-G|`Zw!}A38d-Jm1i#0NI6AVLj7PvH#Z|o$ow14>byKIm
z8Io3pYyyBv)C$+qQQ)Pa7NmFOE9FjtQ>B17ktNVTtqNi<fn~u=z|x|CYmBjrQ@^o*
z^hGt);$CgX0%}^m45a-wk`y~>qd2Ntb_or);qJ;%@B!{wENOE^agW2mhbP>2J>r!>
z^uJukDikdupof}_ATU|fPRj>RnAcTft2H=V;$X+tzwa;8VLS<2wVB0Q-i7inDP45d
z2{6qfmn1|`bfs`<(tuxrxryPR^vse(S~a`_A`OI_DK1giDsfjZv=_lyoZ7gbS>5Hz
z|1$BT7DBsp8YGFp$m>kzgE<$sW7ROKCl@c(Xx|{8Z?NX|k+B@OYcIsYMm3X1=pP|}
zjbk-InbOHB^uN5@aLobA@qQur?QqyNS;vx4ME6SZPU<l3^ok-mqjd<{*7TMktY`2u
zUx1TwLkZ7cgAX8=RxJ8;u#Lt=?@Nu2f5<)R;}^q59)SSm{g2VIja+jbwmChN%P?KP
zR(F|==HvoXIPR#sI(8@#nSAO{LEs`n6USkG{rcZ`W%J=Y6@DfvA1!>Pjdv6}rds3?
zp=C>^LKGG~`rl<~(M?OzsqJhc<egk&g>gcR8yG{#kYZz3Arj;*yRMmeu&P@Y&W7sB
z)f^TAYzua$GAv;`(9^snaXV~Qi$P-}>tjvgQJPTUO?a7Fof?Lyz*{lXE}Wqf=Vf<l
zrbjwwV97)pB1Wv=kfM2Uf)^$oAp)qTf-lQ9@&#|f>FF*=oi}??c3>J42HDng4Nk2Q
zN73|-#FRgf3q;FtR#a%%B`QOjyqRb4=MeU;xgW5xnG<UT8yOg6LqGy=Z<n~AcJ`D6
zwU(LCXxSh~W*gbxdBu^~G0FB7?u%!}5fag)i{}n-1`?aOHRKV)sBP&`4Ra_mL7;(M
zmV1mCp(m3>myt`uPDLd~dKx8vv^OU4Nxp0;GDcURe<x?{isa?gY%kEDaH8*ir_1H>
zOX`u0iaQ0BaI(H%7P^WRCLb;LbCWG0hAyC)dE>khkf+Zchw*En@`RCYd781YP?@Z_
zK1C4kAn{Gt=0XJy+PVo%%40HyHJWLb{k?nIjXWzVinIY?-|>+iu|oO)ESfh9o^nd5
zyZQ#(Z5XO|k+ws42Igy5R*PQJ+@V6U9g3vS1gMABSP!z_XmD+^4>p#p@FoMYN-1*=
zw_1xMdu=!Z%zsh_mBtKlAMPXbxk=K@xR}yu+oGVnl_^DbB@JRZr^%5=oXZTw?thWs
z>h`*YN$fH8FYu~1^O%SQuo-p9ERo+-FAhn1*)ojT<$QF1m&r<|YAU0n&qQb^SK)hS
z5%1Ae1_u!ZyyD|V2!y4gEXO<H1#%DFomIGQME2-bQxCm>_9P56ZBpaGA7ZSMe&Z`R
zm=LW^hu!15f|Sd=#R?5y?zeYyStyjO)J!2(BGl1fR>w8xxbgD97S~7$OMmoSBfu1X
z)%kNA)tUMM;Zt7<mO0VyazmwxG^kr=wZ12uwJfnM0k1^+i5OkqSP_GcM2B`J#Vh^M
z{V6%Z@x;eR^<s;rw{r1!>2{d#P6w^YZp9n<XHu{jwtGnHju=UIvgZR`651bsX`FjW
zf3!5%OG*9tGKr12E;3Z(BFHz@I?$M}x@{-a|2|fWpn0Eh9nV5+p4`0*otv7;O1OmZ
zS;<zB?EcA!SU9!QQ!`qCvXV}Bpom)8Xrw8W=U$G^D#jT#tU|~W{kk!iX*A3+&akEJ
z;wC1Ro*^YZzBGvQM!&-kF2M&`5OJ1CUIeZQk_0d2KFdcTb+CCzv}bl%`4g~<IzyCj
zZ+!P7z>1cLIyfx`XTz(*mdgi1!i04mrw1qxXCjfnfxs<yoUR|D$G|-|IkV%$k%>QQ
z2>8Q)qOwf#G|HSef*A<hP|rE$*mtku_%!hUIv(#SZ%VEg5vL=TUc2)bf3MvnAB}Hc
zyMTZmkW3kJo_I{5tzgV&e<UGZ83+f;i9|-YwSuz*nq09UQh|7uIGI#HtV~P9W$1nu
zD$ouk@-E;f-?nRmO&G9J(9A?tDF0g@=DWQJDWYV7y7-f`$F+5hT|&NO5OVbX*gC%6
zD9qMtSwdF2d*9$#^ta=4JSai`)60aEo-VJfcqJ~;j=$D6UNTnEcfsZ#C5Mn=QbtJw
z1RUZ>BzOA#Ll%AyZ?gl0MOYWLXp|}n`LMc72%o`<k*tbiEG{XMg{+gt;|ADLTzAO4
znadtC^1FJP+?u;v8+#2WMoNXgf-Wd{nhSw0pQf)PuLcZX|12r7^ZAm^I9_rPtQee;
zT~$}LAH^5oQ?(R~xo>dM#a^V$5dZMi>j-H09$%i+z46j(;}jSP6LZ*7K4WBf48u;>
zHt#OkHtBLnsr{TCTsrfE+m-DOWV*^Q@4N=|x-sl_3&J3Tx0xR6Q9^cH?YZ#iDL;em
zU*Z$hf?Y1)m0H*`vULC<F};E4%vswSv;oTvP}w_xp7)=cyswvqrTg=Hlr>+5YFfTk
z-<^rjE8iH#aU$LOy&ef6ra7#UXm03B`qRhI<r(Tpa}oq%#2qu!D*Kpvji;O8>`eQ1
z%+~sZH<!=CKk{TdQ44#F$&#YBwFfg#bS*xp-BWU+Hw{k*dV02mHWxH*yKe$#n7AKp
z*1FN2cSGKLJCQF2MoyQWjrJOzsVh6%nYO)Wwz!@vZ8Z>e*@$vvj#}^x#4{s$3yCyj
zJk7V@4ER}F6P^+FawG2&{%5JTe`SLaTiIhc%EYKG=|_%)L2<^1B2akgCkjS`8WwR<
zi)$0dnLQfiMDOVmVs`wy{W6&XXWoWaN9)n*yx&l#K!da)u>9MALd!5Ra-7c1xF>yk
zZV5+B`g-3*V4`%s_6CY71&-oK`EPB;!%|Fpz8|DLS9~Qtjy=wYSCY1kRjV4~aho)2
zW>9Qqyl{wzdU!rn=5Mb~Wzo$v6Y*5A5l$@(X-|^^sYSA`r%E(S10bc#d@qlVr)*dO
z@bG8}e|p2+1GpGGlxhkQ{rNZq@u@8J#PcoN!b6HR*C(zBYG=p73s0ZK45r&LiiHa9
zU|>U+#|Af_`7wI#G_Xbt_wm}z1B($f>^6%wsa_y~kvs&c%LlcLP52R%>njp3Hn}fE
z_#YRlz}5sUZ)U|}d#b2T)*79U6R$<OS-akE1enF`eMGisdapf|S86}CitcSy-Xith
zILc*2M(XEOdH3J|l5L?YraFjB&n`>90@-{lG45RvAQb9P;Ksk+HpGFAVij*@+hM;y
zq95Ulyz44LqOOtf@9k@SJ&Se<@BL%N%rW;jm6XSPvB#+;EtEdBQhI+->CzKao0;bM
z9j=DzeIcJ3ZJ4Fa*jv4JD+GTro)g;xqqz9j5Yp5`byAPfEhlPY5<U}DK^n)%85g_r
z3w4om>T(>=8JhN*mSx9l317k*0relP*J;u7Uv<xG&i7B!QD)*`*F)4k8~o3XT3`<*
z;nRlZ6@*83kaB;%(FOy?*G&tszX>CMOQc0#-^L#2=li83WPAAim<~k^6ZdISSAn%`
z{<<CfYA<a{VIS1(hLz=JXI4u`=&dkhJLeuV0+mCm$&akqj(1i0{Pg7-d_b%Ox;~DQ
z6#7<E8_ovY1%Ce&wxoYF|9hV0UGGgzk9rnIHW|5}ZvsBwxx4_+CwX8KkY1f1XiT6u
zd0aUQaPMI53=UHOSed_{5Bxs)!N$b30W`()vB6U1+G?4n<%`PVxmtQXV7J9<N|{gW
zqp-JCbN!CD=9P2GYg2_&B%I2Y3>PH_j)3#2>k}y^9oyDQua^)HLbV@AB$A>~tyA-|
zPsBrAHMU^{H?+1T*70oFoa420oPRvz1o(&5p6L`xYv--oHo16ILGPzgr3@<MUn|d-
z{qw`1=83VZ%rp!QiO(zoIMJ8XgLP;*_N2e|6Egsqq29iaWuMmHzJkYN9o0o|Qg@zD
zC=a#}p!2bc80+74OXk5@jx69ipsJ@dR&-ybqW5iUruXlzJqO#1-R_^U&_5iyd9%!S
zF3nMCr8Es1!0^cY$lz;dB=o7b_VgSYl<|TQZa)gE06Ix7FpFb5|E{S;g&??p-$?eh
z8m-rmH?xVK<9N@>w!%Q2Bzk?jtpSq-7{|L-t#CWog+sHIa}5&qtMc(BEi!p-yC5~$
z{lVj8N^fuh#^c`h$ii5)+|oD1B%(hjqbefI+@1m-p7e#Y6VgCIt;OdAE~qTtfM$8E
zxsxD%xG6P!e4}19N#kT#HBr(TFS>LdXJ(_b2&kOzwdfP_t!Puwx9nQRpK?)^auFBd
zyW9c__Jg(a_D~XkfhS?&=fp^<ENSUr<gd*SV>4z`nXaJms9unkHbS~M%IYVn|JFV)
zrF<vKusCSVu?88i=ozT!TChJm@H=N2pGq`=HKZuDMJ~P|nCJm#4q&Wj+dI+zEFN<;
z_nR$G@v|x>Np-S)8SNhG#^9s}u$M3y3?+BwP;;g#Ia5K{^rtMWcF@i8)e>t!zBL%~
zxQH!7P4O*oNg-h<!xlCZ$CCY=`Vg)OL?(!Pli#D$v&z{RpuS`TsLy?UyRmWNzc$VI
zJ~>-3)&nT(27@4;^nSc<hJK$Q6(%BbSJ-995q5jmH#@q4Aw~>|lJ>X=B-IKAoZUQq
zP+at>Z>&bSM9F*BMhVD+vo|l^;K^Wd{ki%*X}^j%(KXKz)of6UXvo<az!9Mfo+>ig
zEVt}rqe`Zl?;qh^1>B&e@nbzoj7$qyBwWp?*_X^*Dqcuag4I}?5jyB9%@L@f#BY06
zbg@0K-QZ&Fd|x+%e4@uoJ!9biheIjOss*|^{^MlDZ)aP&Os@!*eL6+PaDRMQsZDaI
z>=J7kSNKqTwqqEPYQe;%EZY4XS?N`q*usyN)dVCLD-Bfv#x1~?0Wb|gp+E3Z?hQKa
zRwBXehY+Fnc9bDTxvhj!QC(V_4vBYO#Q|J1_SwzC%RinvbTshTc`PxU+K*-bAO^qR
zxKBdLbZEHktd`NSN*7IVF3*)i!yP^mrT*e;3(LQLpEQ=tF%cd)lt0exk*@@NA*_l3
zTN7aarYAHfP33pH11P+E1sv5Aw<uj6&CmYh2!*WO9kol^-aHh!&@A-Vj_n~i`0&c$
z_vr5KgcYDN!>Ve3U?uR_uG4a<jFqzbjP#+4(6{+Nk#0Y+sSPwB)m-vNv1><Ax=`E7
z(b9tGPFu1O+n~+U+NQueQ+!s``~VVZ_mp+q`)*sG!@uqU%%EC?#0dEYDw=1T#)ktT
z?Q0hnjK7dVomJjn%Nf>ROM8j_F4Ty?vfY<)n%ez#5DPP)wX2Aid`7yE&m>iLOx2H9
ze7~>@z2gP{tDms_2B-%Nd)A24s)obolc~ECJez-?O1>%}&<~rY$KAUQ&?EI+J*j`>
z5@1738(y^FI7BcS=Qsqxi%uHI`tidTg=ZyoLgD+Z%%}jb-p(il(-ifH=Vpk_s*Ngy
z&T6q?HUH=t%V2@wNNIwG(nf5V6``+q-)*xnag5|LL|Wv6k_v}|Jt}Qn7ch#fOV5?Z
zXS5{9qP30ET$KNz1kk*GqvRp{kTYxz80bnFh-*ZJ!EUJVK>-IIa$BVk72{TF0_DK~
z@H9SEhW&Lbg-6W_gOk&l<2=ANVK-o>d(l=v`591*vIeMCkxEW3UMf~y1*G214SfIn
zM4oE{$+T+giT-=V6&F%awW~D;(=fCkgdmlko)VV@uJ?#e#PX$7br{#J73Wtr9yi3$
zKxsIs;h6p1k>kG#WhH@EL*7C2UZhR>7XR`G7w|E5+t^B=I4!fvYI%1;5{ncC*{q<A
zu2p|{t`*tO+&qOw9ul=vrW?oO-=lV?(zw31-6VRz5UlIy-P|v|vNL#gt!2jB6Mi;$
zyR|ho)1^HS9F+yF8Nk}PQN?YS0)c`KzCgHD$4F&@$WdfWaKIiK_|7Bft37Vl@=&1&
z#=Ly9(!>-iIA6MK3f}0f#<{kZ8I22mpbv%$_44z!;!6CU7ADP0YHn}uBx^Qn{q00R
zh<8a%UQZ;}M}?+8qXD^g>50_fH{u+6wskNS*v)|@p!^%Z3bdPHH+hFU=af;v#t1M(
zkJqyWuBifxbnGOS|8}O%T}{<lBMP0zxO(n40Vh^BK}B>mM65(Hsl||uv)FkH+A1gx
zl5cUw)gw4m)%ME=G+s41ISNKgZnOPqho$FV(C_MruN+A&=3G#uUtXt22a)XOl{!dm
z09;m4qbyRpwpo)9h{Mx33AhddA5~Zh6+UL^E43F?*}3Z!3R2_XED7BR7cA$nT)R6A
z{+BcOFcVtmIEco%CLOiplR9kyhJEh=ocl%_l=3i%pm`V=YFNM6=7khwV^%kkc|XAi
zR9I%(g*Z-7Hh=JeT%lV;JZg*}kwAllU(UE7s1?%vVLRBnRpq&7lh4`IZ3pk~KPsgt
qP}6|#%k0y=hg<hoUM~WE#N5!Cfs_B=a^>yQ*UC4d639&q=zjqGBX5oX

literal 0
HcmV?d00001

diff --git a/contribs/cray/munge_build_script.sh b/contribs/cray/munge_build_script.sh
new file mode 100644
index 000000000..df778941f
--- /dev/null
+++ b/contribs/cray/munge_build_script.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+#
+# Build munge from sources on Cray
+#
+
+#----------------------------------------------------------------------------
+# CONFIGURATION
+#----------------------------------------------------------------------------
+# source and build directories
+LIBROOT="${LIBROOT:-/ufs/slurm/build}"
+MUNGE_BUILD="${LIBROOT}/munge"
+
+# packaging installation directory
+DESTDIR="/tmp/munge-build"
+
+# installation and runtime directories
+MUNGE_DIR="/opt/slurm/munge"
+MUNGE_LOG="/var"
+
+# input and output tarballs
+ZIP="${MUNGE_BUILD}/zip"
+MUNGE_TAR=${ZIP}/munge*bz2
+TARBALL="${LIBROOT}/munge_build-$(date +%F).tar.gz"
+#----------------------------------------------------------------------------
+# SUBROUTINES
+#----------------------------------------------------------------------------
+function die() { echo -e "$@" >&2; exit 1; }
+
+function extract_top_level_from_tarball() {
+	local tarball="${1:?}" dir
+	test -r "${tarball}" || die "can not read ${tarball}"
+
+        case $(file "${tarball}") in
+            *gzip*)		compression="-z";;
+            *bzip2*)		compression="--bzip2";;
+            *compress*data)	compression="--uncompress";;
+            *tar*)		compression="";;
+	    *)			compression="--auto-compress";;
+	esac
+	dir="$(tar ${compression} -tf ${tarball} | \
+		sed -n '/\// { s@^\([^/]\+\).*$@\1@p;q }')"
+	test -n "${dir}" || die "can not determine directory from $tarball"
+	echo $dir
+}
+#----------------------------------------------------------------------------
+# SCRIPT PROPER
+#----------------------------------------------------------------------------
+test ${UID} -eq 0       || die "This script wants to be run by root"
+test -d $ZIP		|| die "No tarball directory '$ZIP'"
+test -f ${MUNGE_TAR}	|| die "No munge tarball in $ZIP?"
+test -d ${LIBROOT}	|| die "Can not cd to LIBROOT=$LIBROOT "
+test -d ${MUNGE_BUILD}	|| mkdir -vp ${MUNGE_BUILD}
+test -n "${DESTDIR}"    || die "DESTDIR not set"
+
+# generate a clean build directory
+rm -rf ${DESTDIR} ${TARBALL}
+
+# DEPENDENT CONFIGURATION
+shopt -s nullglob
+MUNGE_SRC="${MUNGE_BUILD}/$(extract_top_level_from_tarball ${MUNGE_TAR})" || exit 1
+MUNGE_LIB="${DESTDIR}${MUNGE_DIR}/lib"
+
+# extract source
+test -d "${LIBROOT}"   || mkdir -vp "${LIBROOT}"
+test -d "${MUNGE_SRC}" || tar jxvf ${MUNGE_TAR} -C ${MUNGE_BUILD}
+test -d "${MUNGE_SRC}" || die "need to extract munge tarball"
+cd ${MUNGE_SRC}
+
+# Build
+set -e
+./configure --prefix=${MUNGE_DIR} --localstatedir=${MUNGE_LOG}
+
+make -j
+
+mkdir -p ${DESTDIR}
+make DESTDIR=${DESTDIR%/}/ install
+
+# final tarball
+tar -C ${DESTDIR} -zcpPvf ${TARBALL} .${MUNGE_DIR%/}
+# scp ${TARBALL} boot:
+echo generated output tarball ${TARBALL}
diff --git a/contribs/cray/opt_modulefiles_slurm b/contribs/cray/opt_modulefiles_slurm
new file mode 100644
index 000000000..675cbb7af
--- /dev/null
+++ b/contribs/cray/opt_modulefiles_slurm
@@ -0,0 +1,48 @@
+#%Module1.0#####################################################################
+# slurm/munge support module
+# Put into /opt/modulefiles/slurm or some other part of $MODULEPATH
+################################################################################
+
+# SUBROUTINES
+proc ModulesHelp { } {
+	puts stderr "\tThis is slurm $::version.\n"
+	puts stderr "\tPlease consult http://www.schedmd.com/slurmdocs/cray.html"
+}
+
+# CONFIGURATION
+conflict	xt-pbs pbs torque
+set slurmdir	"/opt/slurm/default"
+set mungedir	"/opt/slurm/munge"
+
+set version "UNKNOWN"
+if {![catch {exec $slurmdir/bin/sbatch --version} out]} {
+	set version [lindex  $out 1]
+}
+set helptext	"Support for the SLURM $version resource allocation system"
+
+# SCRIPT PROPER
+module-whatis	$helptext
+
+prepend-path	PATH		"$slurmdir/bin"
+prepend-path	PATH		"$mungedir/bin"
+
+prepend-path	MANPATH		"$slurmdir/share/man"
+prepend-path	MANPATH		"$mungedir/share/man"
+
+prepend-path	PERL5LIB	"$slurmdir/lib/perl5/site_perl"
+
+# other useful environment variables
+setenv	SINFO_FORMAT	{%9P %5a %8s %.10l %.6c %.6z %.7D %10T %N}
+setenv	SQUEUE_FORMAT	{%.6i %.8u %.7a %.14j %.3t %9r %19S %.10M %.10L %.5D %.4C}
+setenv	SQUEUE_ALL	{yes}	;# show hidden partitions, too
+setenv	SQUEUE_SORT	{-t,e,S}
+
+# logfile aliases
+set-alias sd_log	{tail -f "/ufs/slurm/var/log/slurmd.log"}
+set-alias sc_log	{tail -f "/ufs/slurm/var/log/slurmctld.log"}
+
+if {[exec id -u] == 0} {
+	prepend-path	PATH	"$slurmdir/sbin"
+	prepend-path	PATH	"$mungedir/sbin"
+	set-alias	sdown	{scontrol shutdown}
+}
diff --git a/contribs/cray/pam_job.c b/contribs/cray/pam_job.c
new file mode 100644
index 000000000..27c66c8ea
--- /dev/null
+++ b/contribs/cray/pam_job.c
@@ -0,0 +1,117 @@
+/*
+ * pam_job.so module to create SGI PAGG container on user login.
+ * Needed on Cray systems to enable PAGG support in interactive salloc sessions.
+ *
+ * 1. install the pam-devel-xxx.rpm corresponding to your pam-xxx.rpm
+ * 2. compile with gcc -fPIC -DPIC -shared pam_job.c -o pam_job.so
+ * 3. install on boot:/rr/current/lib64/security/pam_job.so
+ * 4. in xtopview -c login, add the following line to /etc/pam.d/common-session:
+ *    session    optional    pam_job.so
+ */
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ * Copyright (c) 2011 Centro Svizzero di Calcolo Scientifico
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <pwd.h>
+#include <errno.h>
+
+#include <sys/syslog.h>
+#define error(fmt, args...) syslog(LOG_CRIT, "pam_job: " fmt, ##args);
+
+#define PAM_SM_ACCOUNT
+#define PAM_SM_SESSION
+#include <security/_pam_macros.h>
+#include <security/pam_modules.h>
+
+/*
+ * Unroll job.h/jobctl.h header declarations. The rationale is that not all
+ * systems will have the required kernel header (job.h, jobctl.h, paggctl.h).
+ * On early 2.4/2.5 kernels there was a paggctl() system call which was then
+ * replaced by the /proc/job ioctl, which this implementation tests for. All
+ * patches from ftp://oss.sgi.com/projects/pagg/download that use /proc/job
+ * for ioctl have the same ioctl declarations and identical ioctl parameters.
+ * Comparing these patches shows that, when using a 2.6 kernel, there are no
+ * differences at all in the 23 ioctl calls (last patch was for 2.6.16.21).
+ */
+#define JOB_CREATE	_IOWR('A', 1, void *)
+struct job_create {
+	uint64_t	r_jid;		/* Return value of JID */
+	uint64_t	jid;		/* Jid value requested */
+	int		user;		/* UID of user associated with job */
+	int		options;	/* creation options - unused */
+};
+
+PAM_EXTERN int pam_sm_open_session(pam_handle_t * pamh, int flags,
+				   int argc, const char **argv)
+{
+	struct job_create jcreate = {0};
+	struct passwd *passwd;
+	char *username;
+	int job_ioctl_fd;
+
+	if (pam_get_item(pamh, PAM_USER, (void *)&username) != PAM_SUCCESS
+	    || username == NULL) {
+		error("error recovering username");
+		return PAM_SESSION_ERR;
+	}
+
+	passwd = getpwnam(username);
+	if (!passwd) {
+		error("error getting passwd entry for %s", username);
+		return PAM_SESSION_ERR;
+	}
+	jcreate.user = passwd->pw_uid;	/* uid associated with job */
+
+	if ((job_ioctl_fd = open("/proc/job", 0)) < 0) {
+		error("can not open /proc/job: %s", strerror(errno));
+		return PAM_SESSION_ERR;
+	} else if (ioctl(job_ioctl_fd, JOB_CREATE, (void *)&jcreate) != 0) {
+		error("job_create failed (no container): %s", strerror(errno));
+		close(job_ioctl_fd);
+		return PAM_SESSION_ERR;
+	}
+	close(job_ioctl_fd);
+
+	if (jcreate.r_jid == 0)
+		error("WARNING - job containers disabled, no PAGG IDs created");
+	return PAM_SUCCESS;
+}
+
+/*
+ * Not all PAMified apps invoke session management modules.  So, we supply
+ * this account management function for such cases.  Whenever possible, it
+ * is still better to use the session management version.
+ */
+PAM_EXTERN int pam_sm_acct_mgmt(pam_handle_t *pamh, int flags,
+				int argc, const char **argv)
+{
+	if (pam_sm_open_session(pamh, flags, argc, argv) != PAM_SUCCESS)
+		return PAM_AUTH_ERR;
+	return PAM_SUCCESS;
+}
+
+PAM_EXTERN int pam_sm_close_session(pam_handle_t *pamh, int flags,
+				    int argc, const char **argv)
+{
+	return PAM_SUCCESS;
+}
diff --git a/contribs/cray/slurm-build-script.sh b/contribs/cray/slurm-build-script.sh
new file mode 100644
index 000000000..e6e7b4208
--- /dev/null
+++ b/contribs/cray/slurm-build-script.sh
@@ -0,0 +1,144 @@
+#!/bin/bash
+#
+# Build script for slurm on Cray XT/XE
+#
+#-------------------------------------------------------------------------------
+# CONFIGURATION
+#-------------------------------------------------------------------------------
+#REBUILD="true" 	# remuild (no distclean/configure)
+
+# source and build directories
+LIBROOT="${LIBROOT:-/ufs/slurm/build}"
+SLURM_SRC="${SLURM_SRC:-${LIBROOT}/slurm-2.3.0-0.pre4}"
+
+BUILD_ERR="make.err"	# make: stderr only
+BUILD_LOG="make.log"	# make: stdout + stderr
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# installation
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# packaging installation directory
+DESTDIR="/tmp/slurm-build"
+
+# installation directory
+SLURM_ROOT="/opt/slurm"
+
+# symlink to current version
+SLURM_DEFAULT="${SLURM_ROOT}/default"
+
+# separate system configuration directory
+SLURM_CONF="${SLURM_DEFAULT}/etc"
+
+# space-separated list of things to be built in the contribs/ folder
+SLURM_CONTRIBS="contribs/perlapi contribs/torque"
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# dependencies
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# path to 'mysql_config' (will be overridden if mysql_config is in $PATH)
+MYSQLCONF="${MYSQLCONF:-${LIBROOT}/mysql}"
+
+# munge installation directory containing lib/ and include/ subdirectories
+MUNGE_DIR="${SLURM_ROOT}/munge"
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#-------------------------------------------------------------------------------
+# SUBROUTINES
+#-------------------------------------------------------------------------------
+function die() { echo -e "$@">&2; exit -1; }
+
+function get_slurm_version() {
+	local vers_file="META"
+	if ! test -f $vers_file; then
+		die "ERROR: no version file '$vers_file'"\
+		    "\nRun this script from within the slurm source directory"
+	fi
+	sed -n 's/^.*Version:[^0-9]*\([0-9\.]\+\).*$/\1/p' ${vers_file}
+}
+
+#-------------------------------------------------------------------------------
+# SCRIPT PROPER
+#-------------------------------------------------------------------------------
+shopt -u nullglob
+test ${UID} -eq 0 	|| die "This script wants to be run by root"
+test -d ${SLURM_SRC}	|| die "can not cd to SLURM_SRC=$SLURM_SRC"
+test -d $MUNGE_DIR/lib	|| die "munge is not yet installed"
+test -d ${LIBROOT}	|| die "can not cd to LIBROOT=$LIBROOT"
+test -n "${DESTDIR}"    || die "DESTDIR not set"
+
+#-------------------------------------------------------------------
+# Dependent Configuration
+#-------------------------------------------------------------------
+cd ${SLURM_SRC}
+
+# get current slurm version
+SLURM_VER=$(get_slurm_version) || die "check your PWD (current: $(pwd))"
+SLURM_DIR="${SLURM_ROOT}/${SLURM_VER}"
+
+# name of the tarball to generate at the end of the build process
+TARBALL="${LIBROOT}/slurm_build-${SLURM_VER}.tar.gz"
+#-------------------------------------------------------------------
+# Dependent Tests
+#-------------------------------------------------------------------
+MYSQL_CONFIG="$(which mysql_config 2>/dev/null)"
+if test -z "$MYSQL_CONFIG" -a -z "$MYSQLCONF"; then
+	die 'no mysql_config in $PATH - set $MYSQLCONF manually'
+elif test -n "$MYSQL_CONFIG"; then
+	MYSQLCONF="$(dirname ${MYSQL_CONFIG})"
+fi
+
+# generate a clean build directory
+rm -rf ${DESTDIR} ${TARBALL}
+rm -f  ${BUILD_ERR} ${BUILD_LOG}
+
+# (re)configure
+if test -z "${REBUILD}"; then
+	set -x
+	# clean everything else
+	make -j distclean &>/dev/null
+
+	./configure			\
+	--prefix="${SLURM_DIR}"		\
+	--sysconfdir="${SLURM_CONF}"	\
+	--enable-debug 			\
+	--enable-front-end\
+	--enable-memory-leak-debug	\
+	--with-mysql_config=${MYSQLCONF}\
+	--with-munge="${MUNGE_DIR}"	\
+	--with-hwloc="${HWLOC_DIR}"	\
+		|| die "configure failed"
+else
+	# avoid the slow reconfiguration process, don't build extras
+	unset SLURM_CONTRIBS
+	touch -r config.status configure config.* configure.ac  Mak*
+fi
+
+# Build
+tail -F ${BUILD_LOG} & TAIL_PID=$!
+set -ex
+
+# swap stderr, stdout, redirect errors in separate, and both into log file
+(make -j 3>&1  1>&2  2>&3 | tee ${BUILD_ERR})  &>${BUILD_LOG}
+kill ${TAIL_PID} 2>/dev/null
+test -s ${BUILD_ERR} &&	cat ${BUILD_ERR} >&2
+
+# Installation
+mkdir -p ${DESTDIR}
+make -j DESTDIR=${DESTDIR%/}/ install
+
+if false;then
+# Perl-API and wrappers for qsub/qstat etc.
+for CONTRIB in ${SLURM_CONTRIBS}
+do
+	test -n "${REBUILD}" || make -C ${CONTRIB} clean
+	make -C ${CONTRIB}
+	make -C ${CONTRIB} DESTDIR=${DESTDIR%/} install
+done
+fi
+
+# create the default symlink
+rm -vf ${DESTDIR}${SLURM_DEFAULT}
+ln -s ${SLURM_VER} ${DESTDIR}${SLURM_DEFAULT}
+
+# Synchronize sources or generate tarball.
+tar -C ${DESTDIR} -zcf ${TARBALL} .${SLURM_ROOT} && scp ${TARBALL} boot:
diff --git a/contribs/cray/srun.pl b/contribs/cray/srun.pl
new file mode 100755
index 000000000..427599fa5
--- /dev/null
+++ b/contribs/cray/srun.pl
@@ -0,0 +1,1103 @@
+#! /usr/bin/perl -w
+###############################################################################
+#
+# srun - Wrapper for Cray's "aprun" command. If not executed within a job
+#	 allocation, then also use "salloc" to create the allocation before
+#	 executing "aprun".
+#
+###############################################################################
+#
+#  Copyright (C) 2011 SchedMD LLC <http://www.schedmd.com>.
+#  Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
+#  Written by Morris Jette <jette1@schedmd.gov>.
+#  CODE-OCEC-09-009. All rights reserved.
+#
+#  This file is part of SLURM, a resource management program.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
+#  Please also read the included file: DISCLAIMER.
+#
+#  SLURM is free software; you can redistribute it and/or modify it under
+#  the terms of the GNU General Public License as published by the Free
+#  Software Foundation; either version 2 of the License, or (at your option)
+#  any later version.
+#
+#  In addition, as a special exception, the copyright holders give permission
+#  to link the code of portions of this program with the OpenSSL library under
+#  certain conditions as described in each individual source file, and
+#  distribute linked combinations including the two. You must obey the GNU
+#  General Public License in all respects for all of the code used other than
+#  OpenSSL. If you modify file(s) with this exception, you may extend this
+#  exception to your version of the file(s), but you are not obligated to do
+#  so. If you do not wish to do so, delete this exception statement from your
+#  version.  If you delete this exception statement from all source files in
+#  the program, then also delete it here.
+#
+#  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+#  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+#  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+#  details.
+#
+#  You should have received a copy of the GNU General Public License along
+#  with SLURM; if not, write to the Free Software Foundation, Inc.,
+#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+#
+###############################################################################
+
+use strict;
+use FindBin;
+use Getopt::Long 2.24 qw(:config no_ignore_case require_order autoabbrev bundling);
+use lib "${FindBin::Bin}/../lib/perl";
+use autouse 'Pod::Usage' => qw(pod2usage);
+use Slurm ':all';
+use Switch;
+
+my (	$account,
+	$acctg_freq,
+	$alps,
+	$aprun_line_buf,
+	$aprun_quiet,
+	$begin_time,
+	$chdir,
+	$check_time,
+	$check_dir,
+	$comment,
+	$constraint,
+	$contiguous,
+	$cores_per_socket,
+	$cpu_bind,
+	$cpus_per_task,
+	$debugger_test,
+	$dependency,
+	$disable_status,
+	$distribution,
+	$error_file,
+	$epilog,
+	$exclude_nodes,
+	$exclusive,
+	$extra_node_info,
+	$group_id,
+	$gres,
+	$help,
+	$hint,
+	$hold,
+	$immediate,
+	$input_file,
+	$job_id,
+	$job_name,
+	$kill_on_bad_exit,
+	$label,
+	$licenses,
+	$mail_type,
+	$mail_user,
+	$man,
+	$memory,
+	$memory_per_cpu,
+	$memory_bind, $mem_local,
+	$min_cpus,
+	$msg_timeout,
+	$mpi_type,
+	$multi_prog, $multi_executables,
+	$network,
+	$nice,
+	$no_allocate,
+	$nodelist, $nid_list,
+	$ntasks_per_core,
+	$ntasks_per_node,
+	$ntasks_per_socket,
+	$num_nodes,
+	$num_tasks,
+	$overcommit,
+	$output_file,
+	$open_mode,
+	$partition,
+	$preserve_env,
+	$prolog,
+	$propagate,
+	$pty,
+	$quiet,
+	$quit_on_interrupt,
+	$qos,
+	$relative,
+	$resv_ports,
+	$reservation,
+	$restart_dir,
+	$share,
+	$signal,
+	$slurmd_debug,
+	$sockets_per_node,
+	$task_epilog,
+	$task_prolog,
+	$test_only,
+	$threads_per_core,
+	$threads,
+	$time_limit, $time_secs,
+	$time_min,
+	$tmp_disk,
+	$unbuffered,
+	$user_id,
+	$version,
+	$verbose,
+	$wait,
+	$wc_key
+);
+
+my $aprun  = "aprun";
+my $salloc = "BINDIR/salloc";
+my $srun   = "BINDIR/srun";
+
+my $have_job;
+$aprun_line_buf = 1;
+$aprun_quiet = 1;
+$have_job = 0;
+
+foreach (keys %ENV) {
+#	print "$_=$ENV{$_}\n";
+	$have_job = 1			if $_ eq "SLURM_JOBID";
+	$account = $ENV{$_}		if $_ eq "SLURM_ACCOUNT";
+	$acctg_freq = $ENV{$_}		if $_ eq "SLURM_ACCTG_FREQ";
+	$chdir = $ENV{$_}		if $_ eq "SLURM_WORKING_DIR";
+	$check_time = $ENV{$_}		if $_ eq "SLURM_CHECKPOINT";
+	$check_dir = $ENV{$_}		if $_ eq "SLURM_CHECKPOINT_DIR";
+	$cpu_bind = $ENV{$_}		if $_ eq "SLURM_CPU_BIND";
+	$cpus_per_task = $ENV{$_}	if $_ eq "SLURM_CPUS_PER_TASK";
+	$dependency = $ENV{$_}		if $_ eq "SLURM_DEPENDENCY";
+	$distribution = $ENV{$_}	if $_ eq "SLURM_DISTRIBUTION";
+	$epilog = $ENV{$_}		if $_ eq "SLURM_EPILOG";
+	$error_file = $ENV{$_}		if $_ eq "SLURM_STDERRMODE";
+	$exclusive  = 1			if $_ eq "SLURM_EXCLUSIVE";
+	$input_file = $ENV{$_}		if $_ eq "SLURM_STDINMODE";
+	$job_name = $ENV{$_}		if $_ eq "SLURM_JOB_NAME";
+	$label = 1			if $_ eq "SLURM_LABELIO";
+	$memory_bind = $ENV{$_}		if $_ eq "SLURM_MEM_BIND";
+	$memory_per_cpu = $ENV{$_}	if $_ eq "SLURM_MEM_PER_CPU";
+	$memory = $ENV{$_}		if $_ eq "SLURM_MEM_PER_NODE";
+	$mpi_type = $ENV{$_}		if $_ eq "SLURM_MPI_TYPE";
+	$network = $ENV{$_}		if $_ eq "SLURM_NETWORK";
+	$ntasks_per_core = $ENV{$_}	if $_ eq "SLURM_NTASKS_PER_CORE";
+	$ntasks_per_node = $ENV{$_}	if $_ eq "SLURM_NTASKS_PER_NODE";
+	$ntasks_per_socket = $ENV{$_}	if $_ eq "SLURM_NTASKS_PER_SOCKET";
+	$num_tasks = $ENV{$_}		if $_ eq "SLURM_NTASKS";
+	$num_nodes = $ENV{$_}		if $_ eq "SLURM_NNODES";
+	$overcommit = $ENV{$_}		if $_ eq "SLURM_OVERCOMMIT";
+	$open_mode = $ENV{$_}		if $_ eq "SLURM_OPEN_MODE";
+	$output_file = $ENV{$_}		if $_ eq "SLURM_STDOUTMODE";
+	$partition = $ENV{$_}		if $_ eq "SLURM_PARTITION";
+	$prolog = $ENV{$_}		if $_ eq "SLURM_PROLOG";
+	$qos = $ENV{$_}			if $_ eq "SLURM_QOS";
+	$restart_dir = $ENV{$_}		if $_ eq "SLURM_RESTART_DIR";
+	$resv_ports = 1			if $_ eq "SLURM_RESV_PORTS";
+	$signal = $ENV{$_}		if $_ eq "SLURM_SIGNAL";
+	$task_epilog = $ENV{$_}		if $_ eq "SLURM_TASK_EPILOG";
+	$task_prolog = $ENV{$_}		if $_ eq "SLURM_TASK_PROLOG";
+	$threads = $ENV{$_}		if $_ eq "SLURM_THREADS";
+	$time_limit = $ENV{$_}		if $_ eq "SLURM_TIMELIMIT";
+	$unbuffered = 1			if $_ eq "SLURM_UNBUFFEREDIO";
+	$wait = $ENV{$_}		if $_ eq "SLURM_WAIT";
+	$wc_key = $ENV{$_}		if $_ eq "SLURM_WCKEY";
+}
+
+# Make fully copy of execute line. This is needed only so that srun can run
+# again and get the job's memory allocation for aprun (which is not available
+# until after the allocation has been made). Add quotes if an argument contains
+# spaces (e.g. --alps="-r 1" needs to be treadted as a single argument).
+my ($i, $len, $orig_exec_line);
+if ($ARGV[0]) {
+	foreach (@ARGV) {
+		if (index($_, " ") == -1) {
+			$orig_exec_line .= "$_ ";
+		} else {
+			$orig_exec_line .= "\"$_\" ";
+		}
+	}
+}
+
+GetOptions(
+	'A|account=s'			=> \$account,
+	'acctg-freq=i'			=> \$acctg_freq,
+	'alps=s'			=> \$alps,
+	'B|extra-node-info=s'		=> \$extra_node_info,
+	'begin=s'			=> \$begin_time,
+	'D|chdir=s'			=> \$chdir,
+	'checkpoint=s'			=> \$check_time,
+	'checkpoint-dir=s'		=> \$check_dir,
+	'comment=s'			=> \$comment,
+	'C|constraint=s'		=> \$constraint,
+	'contiguous'			=> \$contiguous,
+	'cores-per-socket=i'		=> \$cores_per_socket,
+	'cpu_bind=s'			=> \$cpu_bind,
+	'c|cpus-per-task=i'		=> \$cpus_per_task,
+	'd|dependency=s'		=> \$dependency,
+	'debugger-test'			=> \$debugger_test,
+	'X|disable-status'		=> \$disable_status,
+	'e|error=s'			=> \$error_file,
+	'epilog=s'			=> \$epilog,
+	'x|exclude=s'			=> \$exclude_nodes,
+	'exclusive'			=> \$exclusive,
+	'gid=s'				=> \$group_id,
+	'gres=s'			=> \$gres,
+	'help|usage|?'			=> \$help,
+	'hint=s'			=> \$hint,
+	'H|hold'			=> \$hold,
+	'I|immediate'			=> \$immediate,
+	'i|input=s'			=> \$input_file,
+	'jobid=i'			=> \$job_id,
+	'J|job-name=s'			=> \$job_name,
+	'K|kill-on-bad-exit'		=> \$kill_on_bad_exit,
+	'l|label'			=> \$label,
+	'L|licenses=s'			=> \$licenses,
+	'm|distribution=s'		=> \$distribution,
+	'mail-type=s'			=> \$mail_type,
+	'mail-user=s'			=> \$mail_user,
+	'man'				=> \$man,
+	'mem=s'				=> \$memory,
+	'mem-per-cpu=s'			=> \$memory_per_cpu,
+	'mem_bind=s'			=> \$memory_bind,
+	'mincpus=i'			=> \$min_cpus,
+	'msg-timeout=i'			=> \$msg_timeout,
+	'mpi=s'				=> \$mpi_type,
+	'multi-prog'			=> \$multi_prog,
+	'network=s'			=> \$network,
+	'nice=i'			=> \$nice,
+	'Z|no-allocate'			=> \$no_allocate,
+	'w|nodelist=s'			=> \$nodelist,
+	'ntasks-per-core=i'		=> \$ntasks_per_core,
+	'ntasks-per-node=i'		=> \$ntasks_per_node,
+	'ntasks-per-socket=i'		=> \$ntasks_per_socket,
+	'n|ntasks=s'			=> \$num_tasks,
+	'N|nodes=s'			=> \$num_nodes,
+	'O|overcommit'			=> \$overcommit,
+	'o|output=s'			=> \$output_file,
+	'open-mode=s'			=> \$open_mode,
+	'p|partition=s'			=> \$partition,
+	'E|preserve-env'		=> \$preserve_env,
+	'prolog=s'			=> \$prolog,
+	'propagate=s'			=> \$propagate,
+	'pty'				=> \$pty,
+	'Q|quiet'			=> \$quiet,
+	'q|quit-on-interrupt'		=> \$quit_on_interrupt,
+	'qos=s'				=> \$qos,
+	'r|relative=i'			=> \$relative,
+	'resv-ports'			=> \$resv_ports,
+	'reservation=s'			=> \$reservation,
+	'restart-dir=s'			=> \$restart_dir,
+	's|share'			=> \$share,
+	'signal=s'			=> \$signal,
+	'slurmd-debug=i'		=> \$slurmd_debug,
+	'sockets-per-node=i'		=> \$sockets_per_node,
+	'task-epilog=s'			=> \$task_epilog,
+	'task-prolog=s'			=> \$task_prolog,
+	'test-only'			=> \$test_only,
+	'threads-per-core=i'		=> \$threads_per_core,
+	'T|threads=i'			=> \$threads,
+	't|time=s'			=> \$time_limit,
+	'time-min=s'			=> \$time_min,
+	'tmp=s'				=> \$tmp_disk,
+	'u|unbuffered'			=> \$unbuffered,
+	'uid=s'				=> \$user_id,
+	'V|version'			=> \$version,
+	'v|verbose'			=> \$verbose,
+	'W|wait=i'			=> \$wait,
+	'wckey=s'			=> \$wc_key
+) or pod2usage(2);
+
+if ($version) {
+	system("$salloc --version");
+	exit(0);
+}
+
+# Display man page or usage if necessary
+pod2usage(0) if $man;
+if ($help) {
+	if ($< == 0) {   # Cannot invoke perldoc as root
+		my $id = eval { getpwnam("nobody") };
+		$id = eval { getpwnam("nouser") } unless defined $id;
+		$id = -2			  unless defined $id;
+		$<  = $id;
+	}
+	$> = $<;			# Disengage setuid
+	$ENV{PATH} = "/bin:/usr/bin";	# Untaint PATH
+	delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
+	if ($0 =~ /^([-\/\w\.]+)$/) { $0 = $1; }    # Untaint $0
+	else { die "Illegal characters were found in \$0 ($0)\n"; }
+
+}
+
+my $script;
+if ($ARGV[0]) {
+	foreach (@ARGV) {
+		$script .= "$_ ";
+	}
+} else {
+	pod2usage(2);
+}
+my %res_opts;
+my %node_opts;
+
+my $command;
+
+if ($have_job == 0) {
+	if ($memory_per_cpu) {
+		$i = index($memory_per_cpu, "hs");
+		if ($i >= 0) {
+			$memory_per_cpu = substr($memory_per_cpu, 0, $i);
+		}
+		$i = index($memory_per_cpu, "h");
+		if ($i >= 0) {
+			$memory_per_cpu = substr($memory_per_cpu, 0, $i);
+		}
+	}
+
+	$command = "$salloc";
+	$command .= " --account=$account"		if $account;
+	$command .= " --acctg-freq=$acctg_freq"		if $acctg_freq;
+	$command .= " --begin=$begin_time"		if $begin_time;
+	$command .= " --chdir=$chdir"			if $chdir;
+	$command .= " --comment=\"$comment\""		if $comment;
+	$command .= " --constraint=\"$constraint\""	if $constraint;
+	$command .= " --contiguous"			if $contiguous;
+	$command .= " --cores-per-socket=$cores_per_socket" if $cores_per_socket;
+	$command .= " --cpu_bind=$cpu_bind"		if $cpu_bind;
+	$command .= " --cpus-per-task=$cpus_per_task"	if $cpus_per_task;
+	$command .= " --dependency=$dependency"		if $dependency;
+	$command .= " --distribution=$distribution"	if $distribution;
+	$command .= " --exclude=$exclude_nodes"		if $exclude_nodes;
+	$command .= " --exclusive"			if $exclusive;
+	$command .= " --extra-node-info=$extra_node_info" if $extra_node_info;
+	$command .= " --gid=$group_id"			if $group_id;
+	$command .= " --gres=$gres"			if $gres;
+	$command .= " --hint=$hint"			if $hint;
+	$command .= " --hold"				if $hold;
+	$command .= " --immediate"			if $immediate;
+	$command .= " --jobid=$job_id"			if $job_id;
+	$command .= " --job-name=$job_name"		if $job_name;
+	$command .= " --licenses=$licenses"		if $licenses;
+	$command .= " --mail-type=$mail_type"		if $mail_type;
+	$command .= " --mail-user=$mail_user"		if $mail_user;
+	$command .= " --mem=$memory"			if $memory;
+	$command .= " --mem-per-cpu=$memory_per_cpu"	if $memory_per_cpu;
+	$command .= " --mem_bind=$memory_bind"		if $memory_bind;
+	$command .= " --mincpus=$min_cpus"		if $min_cpus;
+	$command .= " --network=$network"		if $network;
+	$command .= " --nice=$nice"			if $nice;
+	$command .= " --nodelist=$nodelist"		if $nodelist;
+	$command .= " --ntasks-per-core=$ntasks_per_core"     if $ntasks_per_core;
+	$command .= " --ntasks-per-node=$ntasks_per_node"     if $ntasks_per_node;
+	$command .= " --ntasks-per-socket=$ntasks_per_socket" if $ntasks_per_socket;
+	$command .= " --ntasks=$num_tasks"		if $num_tasks;
+	$command .= " --nodes=$num_nodes"		if $num_nodes;
+	$command .= " --overcommit"			if $overcommit;
+	$command .= " --partition=$partition"		if $partition;
+	$command .= " --qos=$qos"			if $qos;
+	$command .= " --quiet"				if !$verbose;
+	$command .= " --reservation=$reservation"	if $reservation;
+	$command .= " --share"				if $share;
+	$command .= " --signal=$signal"			if $signal;
+	$command .= " --sockets-per-node=$sockets_per_node" if $sockets_per_node;
+	$command .= " --threads-per-core=$threads_per_core" if $threads_per_core;
+	$command .= " --minthreads=$threads"		if $threads;
+	$command .= " --time=$time_limit"		if $time_limit;
+	$command .= " --time-min=$time_min"		if $time_min;
+	$command .= " --tmp=$tmp_disk"			if $tmp_disk;
+	$command .= " --uid=$user_id"			if $user_id;
+	$command .= " --verbose"			if $verbose;
+	$command .= " --wait=$wait"			if $wait;
+	$command .= " --wckey=$wc_key"			if $wc_key;
+	$command .= " $srun";
+	$command .= " $orig_exec_line";
+} else {
+	$command = "$aprun";
+
+	# Options that get set if aprun is launch either under salloc or directly
+	if ($alps) {
+	#	aprun fails when arguments are duplicated, prevent duplicates here
+		$command .= " $alps";
+		if (index($alps, "-d") >= 0)  { $cpus_per_task = 0 };
+		if (index($alps, "-L") >= 0)  { $nodelist = 0 };
+		if (index($alps, "-m") >= 0)  { $memory_per_cpu = 0 };
+		if (index($alps, "-n") >= 0)  { $num_tasks = 0; $num_nodes = 0; }
+		if (index($alps, "-N") >= 0)  { $ntasks_per_node = 0; $num_nodes = 0; }
+		if (index($alps, "-q") >= 0)  { $aprun_quiet = 0 };
+		if (index($alps, "-S") >= 0)  { $ntasks_per_socket = 0 };
+		if (index($alps, "-sn") >= 0) { $sockets_per_node = 0 };
+		if (index($alps, "-ss") >= 0) { $memory_bind = 0 };
+		if (index($alps, "-T") >= 0)  { $aprun_line_buf = 0 };
+		if (index($alps, "-t") >= 0)  { $time_limit = 0 };
+	}
+	# $command .= " -a"		no srun equivalent, architecture
+	# $command .= " -b"		no srun equivalent, bypass transfer of executable
+	# $command .= " -B"		no srun equivalent, reservation options
+	# $command .= " -cc"		NO GOOD MAPPING, cpu binding
+	$command .= " -d $cpus_per_task"			if $cpus_per_task;
+	# Resource sharing largely controlled by SLURM configuration,
+	# so this is an imperfect mapping of options
+	if ($share) {
+		$command .= " -F share";
+	} elsif ($exclusive) {
+		$command .= " -F exclusive";
+	}
+	$nid_list = get_nids($nodelist)				if $nodelist;
+	$command .= " -L $nid_list"				if $nodelist;
+	$command .= " -m $memory_per_cpu"			if $memory_per_cpu;
+	if ($ntasks_per_node) {
+		$command .= " -N $ntasks_per_node";
+		if (!$num_tasks && $num_nodes) {
+			$num_tasks = $ntasks_per_node * $num_nodes;
+		}
+	} elsif ($num_nodes) {
+		$num_tasks = $num_nodes if !$num_tasks;
+		$ntasks_per_node = int (($num_tasks + $num_nodes - 1) / $num_nodes);
+		$command .= " -N $ntasks_per_node";
+	}
+
+	if ($num_tasks) {
+		$command .= " -n $num_tasks";
+	} elsif ($num_nodes) {
+		$command .= " -n $num_nodes";
+	}
+
+	$command .= " -q"					if $aprun_quiet;
+	# $command .= " -r"		no srun equivalent, core specialization
+	$command .= " -S $ntasks_per_socket" 			if $ntasks_per_socket;
+	# $command .= " -sl"		no srun equivalent, task placement on nodes
+	$command .= " -sn $sockets_per_node" 			if $sockets_per_node;
+	if ($memory_bind && ($memory_bind =~ /local/i)) {
+		$command .= " -ss"
+	}
+	$command .= " -T"					if $aprun_line_buf;
+	$time_secs = get_seconds($time_limit)			if $time_limit;
+	$command .= " -t $time_secs"				if $time_secs;
+	$script = get_multi_prog($script)			if $multi_prog;
+
+	# Input and output file options are not supported by aprun, but can be handled by perl
+	$command .= " <$input_file"				if $input_file;
+	if ($error_file && ($error_file eq "none")) {
+		$error_file = "/dev/null"
+	}
+	if ($output_file && ($output_file eq "none")) {
+		$output_file = "/dev/null"
+	}
+	if ($open_mode && ($open_mode eq "a")) {
+		$command .= " >>$output_file"			if $output_file;
+		if ($error_file) {
+			$command .= " 2>>$error_file";
+		} elsif ($output_file) {
+			$command .= " 2>&1";
+		}
+	} else {
+		$command .= " >$output_file"			if $output_file;
+		if ($error_file) {
+			$command .= " 2>$error_file";
+		} elsif ($output_file) {
+			$command .= " 2>&1";
+		}
+	}
+
+	# Srun option which are not supported by aprun
+	#	$command .= " --disable-status"			if $disable_status;
+	#	$command .= " --epilog=$epilog"			if $epilog;
+	#	$command .= " --kill-on-bad-exit"		if $kill_on_bad_exit;
+	#	$command .= " --label"				if $label;
+	#	$command .= " --mpi=$mpi_type"			if $mpi_type;
+	#	$command .= " --msg-timeout=$msg_timeout"	if $msg_timeout;
+	#	$command .= " --no-allocate"			if $no_allocate;
+	#	$command .= " --open-mode=$open_mode"		if $open_mode;
+	#	$command .= " --preserve_env"			if $preserve_env;
+	#	$command .= " --prolog=$prolog"			if $prolog;
+	#	$command .= " --propagate=$propagate"		if $propagate;
+	#	$command .= " --pty"				if $pty;
+	#	$command .= " --quit-on-interrupt"		if $quit_on_interrupt;
+	#	$command .= " --relative=$relative"		if $relative;
+	#	$command .= " --restart-dir=$restart_dir"	if $restart_dir;
+	#	$command .= " --resv-ports"			if $resv_ports;
+	#	$command .= " --slurmd-debug=$slurmd_debug"	if $slurmd_debug;
+	#	$command .= " --task-epilog=$task_epilog"	if $task_epilog;
+	#	$command .= " --task-prolog=$task_prolog"	if $task_prolog;
+	#	$command .= " --test-only"			if $test_only;
+	#	$command .= " --unbuffered"			if $unbuffered;
+
+	$command .= " $script";
+}
+
+# Print here for debugging
+#print "command=$command\n";
+exec $command;
+
+# Convert a SLURM time format to a number of seconds
+sub get_seconds {
+	my ($duration) = @_;
+	$duration = 0 unless $duration;
+	my $seconds = 0;
+
+	# Convert [[HH:]MM:]SS to duration in seconds
+	if ($duration =~ /^(?:(\d+):)?(\d*):(\d+)$/) {
+		my ($hh, $mm, $ss) = ($1 || 0, $2 || 0, $3);
+		$seconds += $ss;
+		$seconds += $mm * 60;
+		$seconds += $hh * 60;
+	} elsif ($duration =~ /^(\d+)$/) {  # Convert number in minutes to seconds
+		$seconds = $duration * 60;
+	} else { # Unsupported format
+		die("Invalid time limit specified ($duration)\n");
+	}
+	return $seconds;
+}
+
+# Convert a SLURM hostlist expression into the equivalent node index value
+# expression
+sub get_nids {
+	my ($host_list) = @_;
+	my ($nid_list) = $host_list;
+
+	$nid_list =~ s/nid//g;
+	$nid_list =~ s/\[//g;
+	$nid_list =~ s/\]//g;
+	$nid_list =~ s/\d+/sprintf("%d", $&)/ge;
+
+	return $nid_list;
+}
+
+# Convert SLURM multi_prog file into a aprun options
+# srun file format is "task_IDs command args..."
+sub get_multi_prog {
+	my ($fname) = @_;
+	my ($out_line);
+	my ($line_num) = 0;
+	my (@words, $word, $word_num, $num_pes);
+
+	open(MP, $fname) || die("Can not read $fname");
+	while (<MP>) {
+		chop;
+		if ($line_num != 0) {
+			$out_line .= " : ";
+		}
+		$line_num++;
+		@words = split(' ', $_);
+		$word_num = 0;
+		foreach $word (@words) {
+			if ($word_num == 0) {
+				$num_pes = get_num_pes($word);
+				$out_line .= " -n $num_pes";
+			} else {
+				$out_line .= " $word";
+			}
+			$word_num++;
+		}
+	}
+	return $out_line;
+}
+
+# Convert number ranges and sets into a total count
+sub get_num_pes {
+	my ($pes_range) = @_;
+	my (@ranges, $range);
+	my (@pairs, $value);
+	my ($min_value, $max_value);
+	my ($value_num);
+	my ($num_pes) = 0;
+
+	@ranges = split(',', $pes_range);
+	foreach $range (@ranges) {
+		@pairs = split('-', $range);
+		$value_num = 0;
+		foreach $value (@pairs) {
+			if ($value_num == 0) {
+				$min_value = $value;
+			}
+			$max_value = $value;
+			$value_num++;
+		}
+		$num_pes += ($max_value - $min_value + 1);
+	}
+	return $num_pes;
+}
+
+# Convert a size format containing optional K, M, G or T suffix to the
+# equvalent number of megabytes
+sub convert_mb_format {
+	my ($value) = @_;
+	my ($amount, $suffix) = $value =~ /(\d+)($|[KMGT])/i;
+	return if !$amount;
+	$suffix = lc($suffix);
+
+	if (!$suffix) {
+		$amount /= 1048576;
+	} elsif ($suffix eq "k") {
+		$amount /= 1024;
+	} elsif ($suffix eq "m") {
+		#do nothing this is what we want.
+	} elsif ($suffix eq "g") {
+		$amount *= 1024;
+	} elsif ($suffix eq "t") {
+		$amount *= 1048576;
+	} else {
+		print "don't know what to do with suffix $suffix\n";
+		return;
+
+	}
+
+	return $amount;
+}
+##############################################################################
+
+__END__
+
+=head1 NAME
+
+B<srun> - Run a parallel job
+
+=head1 SYNOPSIS
+
+srun  [OPTIONS...] executable [arguments...]
+
+=head1 DESCRIPTION
+
+Run a parallel job on cluster managed by SLURM.  If necessary, srun will
+first create a resource allocation in which to run the parallel job.
+
+=head1 OPTIONS
+
+NOTE: Many options only apply only when creating a job allocation as noted
+below. When srun is allocated within an existing job allocation, these options
+are silently ignored.
+The following aprun options have no equivalent in srun and must be specified
+by using the B<--alps> option: B<-a>, B<-b>, B<-B>, B<-cc>, B<-f>, B<-r>, and
+B<-sl>.  Many other options do not exact functionality matches, but duplication
+srun behavior to the extent possible.
+
+=over 4
+
+=item B<-A> | B<--account=account>
+
+Charge resources used by this job to specified account.
+Applies only when creating a job allocation.
+
+=item B<--acctg-freq=seconds>
+
+Specify the accounting sampling interval.
+Applies only when creating a job allocation.
+
+=item B<--alps=options>
+
+Specify the options to be passed to the aprun command.
+If conflicting native srun options and --alps options are specified, the srun
+option will take precedence for creating the job allocation (if necessary) and
+the --alps options will take precedence for launching tasks with the aprun
+command.
+
+=item B<-B> | B<--extra-node-info=sockets[:cores[:threads]]>
+
+Request a specific allocation of resources with details as to the
+number and type of computational resources within a cluster:
+number of sockets (or physical processors) per node,
+cores per socket, and threads per core.
+The individual levels can also be specified in separate options if desired:
+B<--sockets-per-node=sockets>, B<--cores-per-socket=cores>, and
+B<--threads-per-core=threads>.
+Applies only when creating a job allocation.
+
+=item B<--begin=time>
+
+Defer job initiation until the specified time.
+Applies only when creating a job allocation.
+
+=item B<--checkpoint=interval>
+
+Specify the time interval between checkpoint creations.
+Not supported on Cray computers.
+
+=item B<--checkpoint-dir=directory>
+
+Directory where the checkpoint image should be written.
+Not supported on Cray computers.
+
+=item B<--comment=string>
+
+An arbitrary comment.
+Applies only when creating a job allocation.
+
+=item B<-C> | B<--constraint=string>
+
+Constrain job allocation to nodes with the specified features.
+Applies only when creating a job allocation.
+
+=item B<--contiguous>
+
+Constrain job allocation to contiguous nodes.
+Applies only when creating a job allocation.
+
+=item B<--cores-per-socket=number>
+
+Count of cores to be allocated per per socket.
+Applies only when creating a job allocation.
+
+=item B<--cpu_bind=options>
+
+Strategy to be used for binding tasks to the CPUs.
+Not supported on Cray computers due to many incompatible options.
+Use --alps="-cc=..." instead.
+
+=item B<-c> | B<--cpus-per-task=number>
+
+Count of CPUs required per task.
+
+=item B<-d> | B<--dependency=[condition:]jobid>
+
+Wait for job(s) to enter specified condition before starting the job.
+Valid conditions include after, afterany, afternotok, and singleton.
+Applies only when creating a job allocation.
+
+=item B<-D> | B<--chdir=directory>
+
+Execute the program from the specified directory.
+Applies only when creating a job allocation.
+
+=item B<--epilog=filename>
+
+Execute the specified program after the job step completes.
+Not supported on Cray computers.
+
+=item B<-e> | B<--error=filename>
+
+Write stderr to the specified file.
+
+=item B<--exclusive>
+
+The job or job step will not share resources with other jobs or job steps.
+Applies only when creating a job allocation.
+
+=item B<-E> | B<--preserve-env>
+
+Pass the current values of environment variables SLURM_NNODES and
+SLURM_NTASKS through to the executable, rather than computing them
+from command line parameters.
+Not supported on Cray computers.
+
+=item B<--gid=group>
+
+If user root, then execute the job using the specified group access permissions.
+Specify either a group name or ID.
+Applies only when creating a job allocation.
+
+=item B<--gres=gres_name[*count]>
+
+Allocate the specified generic resources on each allocated node.
+Applies only when creating a job allocation.
+
+=item B<-?> | B<--help>
+
+Print brief help message.
+
+=item B<--hint=type>
+
+Bind tasks according to application hints.
+Not supported on Cray computers.
+
+=item B<-H> | B<--hold>
+
+Submit the job in a held state.
+Applies only when creating a job allocation.
+
+=item B<-I> | B<--immediate>
+
+Exit if resources are not available immediately.
+Applies only when creating a job allocation.
+
+=item B<-i> | B<--input=filename>
+
+Read stdin from the specified file.
+
+=item B<--jobid=number>
+
+Specify the job ID number. Usable only by SlurmUser or user root.
+Applies only when creating a job allocation.
+
+=item B<-J> | B<--job-name=name>
+
+Specify a name for the job.
+Applies only when creating a job allocation.
+
+=item B<-K> | B<--kill-on-bad-exit>
+
+Immediately terminate a job if any task exits with a non-zero exit code.
+Not supported on Cray computers.
+
+=item B<-l> | B<--label>
+
+Prepend task number to lines of stdout/err.
+Not supported on Cray computers.
+
+=item B<-l> | B<--licenses=names>
+
+Specification of licenses (or other resources available on all
+nodes of the cluster) which must be allocated to this job.
+Applies only when creating a job allocation.
+
+=item B<-m> | B<--distribution=layout>
+
+Specification of distribution of tasks across nodes.
+Not supported on Cray computers.
+
+=item B<--man>
+
+Print full documentation.
+
+=item B<--mail-type=event>
+
+Send email when certain event types occur.
+Valid events values are BEGIN, END, FAIL, REQUEUE, and ALL (any state change).
+Applies only when creating a job allocation.
+
+=item B<--mail-user=user>
+
+Send email to the specified user(s). The default is the submitting user.
+Applies only when creating a job allocation.
+
+=item B<--mem=MB>
+
+Specify the real memory required per node in MegaBytes.
+Applies only when creating a job allocation.
+
+=item B<--mem-per-cpu=MB>[h|hs]
+
+Specify the real memory required per CPU in MegaBytes.
+Applies only when creating a job allocation.
+Append "h" or "hs" for huge page support.
+
+=item B<--mem_bind=type>
+
+Bind tasks to memory. The only option supported on Cray systems is local which
+confines memory use to the local NUMA node.
+
+=item B<--mincpus>
+
+Specify a minimum number of logical CPUs per node.
+Applies only when creating a job allocation.
+
+=item B<--msg-timeout=second>
+
+Modify the job launch message timeout.
+Not supported on Cray computers.
+
+=item B<--mpi=implementation>
+
+Identify the type of MPI to be used. May result in unique initiation
+procedures.
+Not supported on Cray computers.
+
+=item B<--multi-prog>
+
+Run a job with different programs and different arguments for
+each task. In this case, the executable program specified is
+actually a configuration file specifying the executable and
+arguments for each task.
+
+=item B<--network=type>
+
+Specify the communication protocol to be used.
+Not supported on Cray computers.
+
+=item B<--nice=adjustment>
+
+Run the job with an adjusted scheduling priority within SLURM.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-core=ntasks>
+
+Request the maximum ntasks be invoked on each core.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-node=ntasks>
+
+Request the maximum ntasks be invoked on each node.
+Applies only when creating a job allocation.
+
+=item B<--ntasks-per-socket=ntasks>
+
+Request the maximum ntasks be invoked on each socket.
+Applies only when creating a job allocation.
+
+=item B<-N> | B<--nodes=num_nodes>
+
+Number of nodes to use.
+
+=item B<-n> | B<--ntasks=num_tasks>
+
+Number of tasks to launch.
+
+=item B<--overcommit>
+
+Overcommit resources. Launch more than one task per CPU.
+Applies only when creating a job allocation.
+
+=item B<-o> | B<--output=filename>
+
+Specify the mode for stdout redirection.
+
+=item B<--open-mode=append|truncate>
+
+Open the output and error files using append or truncate mode as specified.
+
+=item B<--partition=name>
+
+Request a specific partition for the resource allocation.
+Applies only when creating a job allocation.
+
+=item B<--prolog=filename>
+
+Execute the specified file before launching the job step.
+Not supported on Cray computers.
+
+=item B<--propagate=rlimits>
+
+Allows users to specify which of the modifiable (soft) resource limits
+to propagate to the compute nodes and apply to their jobs.
+Not supported on Cray computers.
+
+=item B<--pty>
+
+Execute task zero in pseudo terminal mode.
+Not supported on Cray computers.
+
+=item B<--quiet>
+
+Suppress informational messages. Errors will still be displayed.
+
+=item B<-q> | B<--quit-on-interrupt>
+
+Quit immediately on single SIGINT (Ctrl-C).
+This is the default behavior on Cray computers.
+
+=item B<--qos=quality_of_service>
+
+Request a specific quality of service for the job.
+Applies only when creating a job allocation.
+
+=item B<-r> | B<--relative=offset>
+
+Run a job step at the specified node offset in the current allocation.
+Not supported on Cray computers.
+
+=item B<--resv-ports=filename>
+
+Reserve communication ports for this job. Used for OpenMPI.
+Not supported on Cray computers.
+
+=item B<--reservation=name>
+
+Allocate resources for the job from the named reservation.
+Applies only when creating a job allocation.
+
+=item B<--restart-dir=directory>
+
+Specifies the directory from which the job or job step's checkpoint should
+be read.
+Not supported on Cray computers.
+
+=item B<-s> | B<--share>
+
+The job can share nodes with other running jobs.
+Applies only when creating a job allocation.
+
+=item B<--signal=signal_number[@seconds]>
+
+When a job is within the specified number seconds of its end time,
+send it the specified signal number.
+
+=item B<--slurmd-debug=level>
+
+Specify a debug level for slurmd daemon.
+Not supported on Cray computers.
+
+=item B<--sockets-per-node=number>
+
+Allocate the specified number of sockets per node.
+Applies only when creating a job allocation.
+
+=item B<--task-epilog=filename>
+
+Execute the specified program after each task terminates.
+Not supported on Cray computers.
+
+=item B<--task-prolog=filename>
+
+Execute the specified program before launching each task.
+Not supported on Cray computers.
+
+=item B<--test-only>
+
+Returns an estimate of when a job would be scheduled.
+Not supported on Cray computers.
+
+=item B<-t> | B<--time=limit>
+
+Time limit in minutes or hours:minutes:seconds.
+
+=item B<--time-min=limit>
+
+The minimum acceptable time limit in minutes or hours:minutes:seconds.
+The default value is the same as the maximum time limit.
+Applies only when creating a job allocation.
+
+=item B<--tmp=mb>
+
+Specify a minimum amount of temporary disk space.
+Applies only when creating a job allocation.
+
+=item B<-u> | B<--unbuffered>
+
+Do not line buffer stdout from remote tasks.
+Not supported on Cray computers.
+
+=item B<--uid=user>
+
+If user root, then execute the job as the specified user.
+Specify either a user name or ID.
+Applies only when creating a job allocation.
+
+=item B<--usage>
+
+Print brief help message.
+
+=item B<-V> | B<--version>
+
+Display version information and exit.
+
+=item B<-v> | B<--verbose>
+
+Increase the verbosity of srun's informational messages.
+
+=item B<-W> | B<--wait=seconds>
+
+Specify how long to wait after the first task terminates before terminating
+all remaining tasks.
+Not supported on Cray computers.
+
+=item B<-w> | B<--nodelist=hostlist|filename>
+
+Request a specific list of hosts to use.
+
+=item B<--wckey=key>
+
+Specify wckey to be used with job.
+Applies only when creating a job allocation.
+
+=item B<-X> | B<--disable-status>
+
+Disable the display of task status when srun receives a single SIGINT (Ctrl-C).
+Not supported on Cray computers.
+
+=item B<-x> | B<--exclude=hostlist>
+
+Request a specific list of hosts to not use
+Applies only when creating a job allocation.
+
+=item B<-Z> | B<--no-allocate>
+
+Run the specified tasks on a set of nodes without creating a SLURM
+"job" in the SLURM queue structure, bypassing the normal resource
+allocation step.
+Not supported on Cray computers.
+
+=back
+
+=cut
diff --git a/contribs/env_cache_builder.c b/contribs/env_cache_builder.c
index 176b06bb0..9baf52f70 100644
--- a/contribs/env_cache_builder.c
+++ b/contribs/env_cache_builder.c
@@ -30,7 +30,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/lua/Makefile.am b/contribs/lua/Makefile.am
new file mode 100644
index 000000000..8f722afff
--- /dev/null
+++ b/contribs/lua/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST = \
+	job_submit.license.lua	\
+	job_submit.lua		\
+	proctrack.lua
diff --git a/contribs/lua/Makefile.in b/contribs/lua/Makefile.in
new file mode 100644
index 000000000..142d9acce
--- /dev/null
+++ b/contribs/lua/Makefile.in
@@ -0,0 +1,475 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/lua
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = \
+	job_submit.license.lua	\
+	job_submit.lua		\
+	proctrack.lua
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contribs/lua/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu contribs/lua/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/lua/job_submit.license.lua b/contribs/lua/job_submit.license.lua
new file mode 100644
index 000000000..db2fcf55f
--- /dev/null
+++ b/contribs/lua/job_submit.license.lua
@@ -0,0 +1,105 @@
+--[[
+
+ Example lua script demonstrating the SLURM job_submit/lua interface.
+ This is only an example, not meant for use in its current form.
+
+ For use, this script should be copied into a file name job_"submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
+--]]
+
+function _limit_license_cnt(orig_string, license_name, max_count)
+	local i = 0
+	local j = 0
+	local val = 0 
+
+	if orig_string == nil then
+		return 0
+	end
+
+	i, j, val = string.find(orig_string, license_name .. "%*(%d)")
+--	if val ~= nil then log_info("name:%s count:%s", license_name, val) end
+	if val ~= nil and val + 0 > max_count then
+		return 1
+	end
+	return 0
+end
+
+--########################################################################--
+--
+--  SLURM job_submit/lua interface:
+--
+--########################################################################--
+
+function slurm_job_submit ( job_desc, part_list )
+	setmetatable (job_desc, job_req_meta)
+	local bad_license_count = 0
+
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratcha", 1)
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchb", 1) + bad_license_count
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchc", 1) + bad_license_count
+	if bad_license_count > 0 then
+		log_info("slurm_job_submit: for user %d, invalid licenses value: %s",
+			 job_desc.user_id, job_desc.licenses)
+--		ESLURM_INVALID_LICENSES is 2048
+		return 2048
+	end
+
+	return 0
+end
+
+function slurm_job_modify ( job_desc, job_rec, part_list )
+	setmetatable (job_desc, job_req_meta)
+	setmetatable (job_rec,  job_rec_meta)
+	local bad_license_count = 0
+
+--      *** YOUR LOGIC GOES BELOW ***
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratcha", 1)
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchb", 1) + bad_license_count
+	bad_license_count = _limit_license_cnt(job_desc.licenses, "lscratchc", 1) + bad_license_count
+	if bad_license_count > 0 then
+		log_info("slurm_job_modify: for job %u, invalid licenses value: %s",
+			 job_rec.job_id, job_desc.licenses)
+--		ESLURM_INVALID_LICENSES is 2048
+		return 2048
+	end
+
+	return 0
+end
+
+--########################################################################--
+--
+--  Initialization code:
+--
+--  Define functions for logging and accessing slurmctld structures
+--
+--########################################################################--
+
+
+log_info = slurm.log_info
+log_verbose = slurm.log_verbose
+log_debug = slurm.log_debug
+log_err = slurm.error
+
+job_rec_meta = {
+	__index = function (table, key)
+		return _get_job_rec_field(table.job_rec_ptr, key)
+	end
+}
+job_req_meta = {
+	__index = function (table, key)
+		return _get_job_req_field(table.job_desc_ptr, key)
+	end,
+	__newindex = function (table, key, value)
+		return _set_job_req_field(table.job_desc_ptr, key, value)
+	end
+}
+part_rec_meta = {
+	__index = function (table, key)
+		return _get_part_rec_field(table.part_rec_ptr, key)
+	end
+}
+
+log_info("initialized")
+
+return slurm.SUCCESS
diff --git a/contribs/lua/job_submit.lua b/contribs/lua/job_submit.lua
index c22ff76d8..485322758 100644
--- a/contribs/lua/job_submit.lua
+++ b/contribs/lua/job_submit.lua
@@ -2,19 +2,21 @@
 
  Example lua script demonstrating the SLURM job_submit/lua interface.
  This is only an example, not meant for use in its current form.
+
  Leave the function names, arguments, local varialbes and setmetatable
  set up logic in each function unchanged. Change only the logic after
  the line containing "*** YOUR LOGIC GOES BELOW ***".
 
+ For use, this script should be copied into a file name job_"submit.lua"
+ in the same directory as the SLURM configuration file, slurm.conf.
+
 --]]
 
 function _build_part_table ( part_list )
 	local part_rec = {}
-	local i = 1
-	while part_list[i] do
+	for i in ipairs(part_list) do
 		part_rec[i] = { part_rec_ptr=part_list[i] }
 		setmetatable (part_rec[i], part_rec_meta)
-		i = i + 1
 	end
 	return part_rec
 end
@@ -42,8 +44,7 @@ function slurm_job_submit ( job_desc, part_list )
 		local new_partition = nil
 		local top_priority  = -1
 		local last_priority = -1
-		local i = 1
-		while part_rec[i] do
+		for i in ipairs(part_rec) do
 --			log_info("part name[%d]:%s", i, part_rec[i].name)
 			if part_rec[i].flag_default ~= 0 then
 				top_priority = -1
@@ -54,7 +55,6 @@ function slurm_job_submit ( job_desc, part_list )
 				top_priority = last_priority
 				new_partition = part_rec[i].name
 			end
-			i = i + 1
 		end
 		if top_priority >= 0 then
 			log_info("slurm_job_submit: job from uid %d, setting default partition value: %s",
@@ -106,7 +106,7 @@ job_req_meta = {
 		return _get_job_req_field(table.job_desc_ptr, key)
 	end,
 	__newindex = function (table, key, value)
-		return _set_job_req_field(table.job_desc_ptr, key, value)
+		return _set_job_req_field(table.job_desc_ptr, key, value or "")
 	end
 }
 part_rec_meta = {
diff --git a/contribs/pam/Makefile.in b/contribs/pam/Makefile.in
index f3193cdfc..0174f1378 100644
--- a/contribs/pam/Makefile.in
+++ b/contribs/pam/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -145,7 +147,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -182,6 +187,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -239,6 +245,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -274,6 +281,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/pam/pam_slurm.c b/contribs/pam/pam_slurm.c
index 36d37d451..426f10eae 100644
--- a/contribs/pam/pam_slurm.c
+++ b/contribs/pam/pam_slurm.c
@@ -136,13 +136,16 @@ pam_sm_acct_mgmt(pam_handle_t *pamh, int flags, int argc, const char **argv)
 
 	if ((auth != PAM_SUCCESS) && (!opts.enable_silence))
 		_send_denial_msg(pamh, &opts, user, uid);
+
+	/*
+	 *  Generate an entry to the system log if access was
+	 *   denied (!PAM_SUCCESS) or disable_sys_info is not set
+	 */
 	if ((auth != PAM_SUCCESS) || (!opts.disable_sys_info)) {
 		_log_msg(LOG_INFO, "access %s for user %s (uid=%d)",
 			 (auth == PAM_SUCCESS) ? "granted" : "denied",
 			 user, uid);
 	}
-	_log_msg(LOG_INFO, "access %s for user %s (uid=%d)",
-		 (auth == PAM_SUCCESS) ? "granted" : "denied", user, uid);
 
 	return(auth);
 }
diff --git a/contribs/perlapi/Makefile.in b/contribs/perlapi/Makefile.in
index 7464178a2..9a703f331 100644
--- a/contribs/perlapi/Makefile.in
+++ b/contribs/perlapi/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/perlapi/libslurm/Makefile.in b/contribs/perlapi/libslurm/Makefile.in
index 69e452c7b..38e37c393 100644
--- a/contribs/perlapi/libslurm/Makefile.in
+++ b/contribs/perlapi/libslurm/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/perlapi/libslurm/perl/alloc.c b/contribs/perlapi/libslurm/perl/alloc.c
index 0476f7058..940e34a50 100644
--- a/contribs/perlapi/libslurm/perl/alloc.c
+++ b/contribs/perlapi/libslurm/perl/alloc.c
@@ -182,7 +182,23 @@ hv_to_job_desc_msg(HV *hv, job_desc_msg_t *job_desc)
 			job_desc->geometry[i] = SvUV(*svp);
 		}
 	}
-	FETCH_FIELD(hv, job_desc, conn_type, uint16_t, FALSE);
+	if((svp = hv_fetch(hv, "conn_type", 9, FALSE))) {
+		AV *av;
+		if (!SvROK(*svp) || SvTYPE(SvRV(*svp)) != SVt_PVAV) {
+			Perl_warn(aTHX_ "`conn_type' is not an array reference in job descriptor");
+			free_job_desc_msg_memory(job_desc);
+			return -1;
+		}
+		av = (AV*)SvRV(*svp);
+		for(i = 0; i < HIGHEST_DIMENSIONS; i ++) {
+			if(! (svp = av_fetch(av, i, FALSE))) {
+				Perl_warn(aTHX_ "conn_type of dimension %d missing in job descriptor", i);
+				free_job_desc_msg_memory(job_desc);
+				return -1;
+			}
+			job_desc->conn_type[i] = SvUV(*svp);
+		}
+	}
 	FETCH_FIELD(hv, job_desc, reboot, uint16_t, FALSE);
 	FETCH_FIELD(hv, job_desc, rotate, uint16_t, FALSE);
 	FETCH_FIELD(hv, job_desc, blrtsimage, charp, FALSE);
diff --git a/contribs/perlapi/libslurm/perl/block.c b/contribs/perlapi/libslurm/perl/block.c
index 0e78e71e1..5d4f9df2d 100644
--- a/contribs/perlapi/libslurm/perl/block.c
+++ b/contribs/perlapi/libslurm/perl/block.c
@@ -17,27 +17,36 @@
 int
 block_info_to_hv(block_info_t *block_info, HV *hv)
 {
+	int dim;
+	AV* av = NULL;
+
 	if(block_info->bg_block_id)
 		STORE_FIELD(hv, block_info, bg_block_id, charp);
 	if(block_info->blrtsimage)
 		STORE_FIELD(hv, block_info, blrtsimage, charp);
-	if (block_info->bp_inx) {
+	if (block_info->mp_inx) {
 		int j;
-		AV* av = newAV();
+		av = newAV();
 		for(j = 0; ; j += 2) {
-			if(block_info->bp_inx[j] == -1)
+			if(block_info->mp_inx[j] == -1)
 				break;
-			av_store(av, j, newSVuv(block_info->bp_inx[j]));
-			av_store(av, j+1, newSVuv(block_info->bp_inx[j+1]));
+			av_store(av, j, newSVuv(block_info->mp_inx[j]));
+			av_store(av, j+1, newSVuv(block_info->mp_inx[j+1]));
 		}
-		hv_store_sv(hv, "bp_inx", newRV_noinc((SV*)av));
+		hv_store_sv(hv, "mp_inx", newRV_noinc((SV*)av));
 	}
-	STORE_FIELD(hv, block_info, conn_type, uint16_t);
-	if(block_info->ionodes)
-		STORE_FIELD(hv, block_info, ionodes, charp);
+
+	av = newAV();
+	for (dim=0; dim<HIGHEST_DIMENSIONS; dim++)
+		av_store(av, dim, newSVuv(block_info->conn_type[dim]));
+
+	hv_store_sv(hv, "conn_type", newRV_noinc((SV*)av));
+
+	if(block_info->ionode_str)
+		STORE_FIELD(hv, block_info, ionode_str, charp);
 	if (block_info->ionode_inx) {
 		int j;
-		AV* av = newAV();
+		av = newAV();
 		for(j = 0; ; j += 2) {
 			if(block_info->ionode_inx[j] == -1)
 				break;
@@ -51,9 +60,9 @@ block_info_to_hv(block_info_t *block_info, HV *hv)
 		STORE_FIELD(hv, block_info, linuximage, charp);
 	if(block_info->mloaderimage)
 		STORE_FIELD(hv, block_info, mloaderimage, charp);
-	if(block_info->nodes)
-		STORE_FIELD(hv, block_info, nodes, charp);
-	STORE_FIELD(hv, block_info, node_cnt, uint32_t);
+	if(block_info->mp_str)
+		STORE_FIELD(hv, block_info, mp_str, charp);
+	STORE_FIELD(hv, block_info, cnode_cnt, uint32_t);
 	STORE_FIELD(hv, block_info, node_use, uint16_t);
 	if (block_info->owner_name)
 		STORE_FIELD(hv, block_info, owner_name, charp);
@@ -79,21 +88,29 @@ hv_to_block_info(HV *hv, block_info_t *block_info)
 
 	FETCH_FIELD(hv, block_info, bg_block_id, charp, FALSE);
 	FETCH_FIELD(hv, block_info, blrtsimage, charp, FALSE);
-	svp = hv_fetch(hv, "bp_inx", 6, FALSE);
+	svp = hv_fetch(hv, "mp_inx", 6, FALSE);
 	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
 		av = (AV*)SvRV(*svp);
 		n = av_len(av) + 2; /* for trailing -1 */
-		block_info->bp_inx = xmalloc(n * sizeof(int));
+		block_info->mp_inx = xmalloc(n * sizeof(int));
 		for (i = 0 ; i < n-1; i += 2) {
-			block_info->bp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
-			block_info->bp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
+			block_info->mp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
+			block_info->mp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
 		}
-		block_info->bp_inx[n-1] = -1;
+		block_info->mp_inx[n-1] = -1;
 	} else {
 		/* nothing to do */
 	}
-	FETCH_FIELD(hv, block_info, conn_type, uint16_t, TRUE);
-	FETCH_FIELD(hv, block_info, ionodes, charp, FALSE);
+	svp = hv_fetch(hv, "conn_type", 9, FALSE);
+	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
+		av = (AV*)SvRV(*svp);
+		n = av_len(av); /* for trailing -1 */
+		for (i = 0 ; i < HIGHEST_DIMENSIONS; i++)
+			block_info->conn_type[i] = SvUV(*(av_fetch(av, i, FALSE)));
+	} else {
+		/* nothing to do */
+	}
+	FETCH_FIELD(hv, block_info, ionode_str, charp, FALSE);
 	svp = hv_fetch(hv, "ionode_inx", 10, FALSE);
 	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
 		av = (AV*)SvRV(*svp);
@@ -110,8 +127,8 @@ hv_to_block_info(HV *hv, block_info_t *block_info)
 	FETCH_FIELD(hv, block_info, job_running, uint32_t, TRUE);
 	FETCH_FIELD(hv, block_info, linuximage, charp, FALSE);
 	FETCH_FIELD(hv, block_info, mloaderimage, charp, FALSE);
-	FETCH_FIELD(hv, block_info, nodes, charp, FALSE);
-	FETCH_FIELD(hv, block_info, node_cnt, uint32_t, TRUE);
+	FETCH_FIELD(hv, block_info, mp_str, charp, FALSE);
+	FETCH_FIELD(hv, block_info, cnode_cnt, uint32_t, TRUE);
 	FETCH_FIELD(hv, block_info, node_use, uint16_t, TRUE);
 	FETCH_FIELD(hv, block_info, owner_name, charp, FALSE);
 	FETCH_FIELD(hv, block_info, ramdiskimage, charp, FALSE);
@@ -200,21 +217,28 @@ hv_to_update_block_msg(HV *hv, update_block_msg_t *update_msg)
 
 	FETCH_FIELD(hv, update_msg, bg_block_id, charp, FALSE);
 	FETCH_FIELD(hv, update_msg, blrtsimage, charp, FALSE);
-	svp = hv_fetch(hv, "bp_inx", 6, FALSE);
+	svp = hv_fetch(hv, "mp_inx", 6, FALSE);
 	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
 		av = (AV*)SvRV(*svp);
 		n = av_len(av) + 2; /* for trailing -1 */
-		update_msg->bp_inx = xmalloc(n * sizeof(int));
+		update_msg->mp_inx = xmalloc(n * sizeof(int));
 		for (i = 0 ; i < n-1; i += 2) {
-			update_msg->bp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
-			update_msg->bp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
+			update_msg->mp_inx[i] = (int)SvIV(*(av_fetch(av, i, FALSE)));
+			update_msg->mp_inx[i+1] = (int)SvIV(*(av_fetch(av, i+1 ,FALSE)));
 		}
-		update_msg->bp_inx[n-1] = -1;
+		update_msg->mp_inx[n-1] = -1;
+	} else {
+		/* nothing to do */
+	}
+	svp = hv_fetch(hv, "conn_type", 9, FALSE);
+	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
+		av = (AV*)SvRV(*svp);
+		for (i = 0 ; i < HIGHEST_DIMENSIONS; i++)
+			update_msg->conn_type[i] = SvUV(*(av_fetch(av, i, FALSE)));
 	} else {
 		/* nothing to do */
 	}
-	FETCH_FIELD(hv, update_msg, conn_type, uint16_t, FALSE);
-	FETCH_FIELD(hv, update_msg, ionodes, charp, FALSE);
+	FETCH_FIELD(hv, update_msg, ionode_str, charp, FALSE);
 	svp = hv_fetch(hv, "ionode_inx", 10, FALSE);
 	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
 		av = (AV*)SvRV(*svp);
@@ -231,8 +255,8 @@ hv_to_update_block_msg(HV *hv, update_block_msg_t *update_msg)
 	FETCH_FIELD(hv, update_msg, job_running, uint32_t, FALSE);
 	FETCH_FIELD(hv, update_msg, linuximage, charp, FALSE);
 	FETCH_FIELD(hv, update_msg, mloaderimage, charp, FALSE);
-	FETCH_FIELD(hv, update_msg, nodes, charp, FALSE);
-	FETCH_FIELD(hv, update_msg, node_cnt, uint32_t, FALSE);
+	FETCH_FIELD(hv, update_msg, mp_str, charp, FALSE);
+	FETCH_FIELD(hv, update_msg, cnode_cnt, uint32_t, FALSE);
 	FETCH_FIELD(hv, update_msg, node_use, uint16_t, FALSE);
 	FETCH_FIELD(hv, update_msg, owner_name, charp, FALSE);
 	FETCH_FIELD(hv, update_msg, ramdiskimage, charp, FALSE);
diff --git a/contribs/perlapi/libslurm/perl/job.c b/contribs/perlapi/libslurm/perl/job.c
index 63c75a23a..24fcd39ec 100644
--- a/contribs/perlapi/libslurm/perl/job.c
+++ b/contribs/perlapi/libslurm/perl/job.c
@@ -109,6 +109,7 @@ job_info_to_hv(job_info_t *job_info, HV *hv)
 		av_store(av, j+1, newSVuv(job_info->req_node_inx[j+1]));
 	}
 	hv_store_sv(hv, "req_node_inx", newRV_noinc((SV*)av));
+	STORE_FIELD(hv, job_info, req_switch, uint32_t);
 	STORE_FIELD(hv, job_info, requeue, uint16_t);
 	STORE_FIELD(hv, job_info, resize_time, time_t);
 	STORE_FIELD(hv, job_info, restart_cnt, uint16_t);
@@ -127,6 +128,7 @@ job_info_to_hv(job_info_t *job_info, HV *hv)
 	STORE_FIELD(hv, job_info, time_limit, uint32_t);
 	STORE_FIELD(hv, job_info, time_min, uint32_t);
 	STORE_FIELD(hv, job_info, user_id, uint32_t);
+	STORE_FIELD(hv, job_info, wait4switch, uint32_t);
 	if(job_info->wckey)
 		STORE_FIELD(hv, job_info, wckey, charp);
 	if(job_info->work_dir)
@@ -226,6 +228,7 @@ hv_to_job_info(HV *hv, job_info_t *job_info)
 	} else {
 		/* nothing to do */
 	}
+	FETCH_FIELD(hv, job_info, req_switch, uint32_t, FALSE);
 	FETCH_FIELD(hv, job_info, requeue, uint16_t, TRUE);
 	FETCH_FIELD(hv, job_info, resize_time, time_t, TRUE);
 	FETCH_FIELD(hv, job_info, restart_cnt, uint16_t, TRUE);
@@ -241,6 +244,7 @@ hv_to_job_info(HV *hv, job_info_t *job_info)
 	FETCH_FIELD(hv, job_info, suspend_time, time_t, TRUE);
 	FETCH_FIELD(hv, job_info, time_limit, uint32_t, TRUE);
 	FETCH_FIELD(hv, job_info, time_min, uint32_t, TRUE);
+	FETCH_FIELD(hv, job_info, wait4switch, uint32_t, FALSE);
 	FETCH_FIELD(hv, job_info, wckey, charp, FALSE);
 	FETCH_FIELD(hv, job_info, work_dir, charp, FALSE);
 	return 0;
diff --git a/contribs/perlapi/libslurm/perl/step.c b/contribs/perlapi/libslurm/perl/step.c
index 867d136f5..535708e33 100644
--- a/contribs/perlapi/libslurm/perl/step.c
+++ b/contribs/perlapi/libslurm/perl/step.c
@@ -178,6 +178,8 @@ slurm_step_layout_to_hv(slurm_step_layout_t *step_layout, HV *hv)
 	AV* av, *av2;
 	int i, j;
 
+	if (step_layout->front_end)
+		STORE_FIELD(hv, step_layout, front_end, charp);
 	STORE_FIELD(hv, step_layout, node_cnt, uint16_t);
 	if (step_layout->node_list)
 		STORE_FIELD(hv, step_layout, node_list, charp);
@@ -187,15 +189,15 @@ slurm_step_layout_to_hv(slurm_step_layout_t *step_layout, HV *hv)
 	}
 	STORE_FIELD(hv, step_layout, plane_size, uint16_t);
 	av = newAV();
-	for(i = 0; i < step_layout->node_cnt; i ++)
+	for (i = 0; i < step_layout->node_cnt; i ++)
 		av_store_uint16_t(av, i, step_layout->tasks[i]);
 	hv_store_sv(hv, "tasks", newRV_noinc((SV*)av));
 	STORE_FIELD(hv, step_layout, task_cnt, uint32_t);
 	STORE_FIELD(hv, step_layout, task_dist, uint16_t);
 	av = newAV();
-	for(i = 0; i < step_layout->node_cnt; i ++) {
+	for (i = 0; i < step_layout->node_cnt; i ++) {
 		av2 = newAV();
-		for(j = 0; j < step_layout->tasks[i]; j ++)
+		for (j = 0; j < step_layout->tasks[i]; j ++)
 			av_store_uint32_t(av2, i, step_layout->tids[i][j]);
 		av_store(av, i, newRV_noinc((SV*)av2));
 	}
diff --git a/contribs/perlapi/libslurmdb/Makefile.in b/contribs/perlapi/libslurmdb/Makefile.in
index 8a13b8392..910a20044 100644
--- a/contribs/perlapi/libslurmdb/Makefile.in
+++ b/contribs/perlapi/libslurmdb/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/perlapi/libslurmdb/perl/Slurmdb.pm b/contribs/perlapi/libslurmdb/perl/Slurmdb.pm
index 7adc001c9..52975591f 100644
--- a/contribs/perlapi/libslurmdb/perl/Slurmdb.pm
+++ b/contribs/perlapi/libslurmdb/perl/Slurmdb.pm
@@ -135,7 +135,7 @@ None by default.
 
 =head1 SEE ALSO
 
-https://computing.llnl.gov/linux/slurm/accounting.html
+http://www.schedmd.com/slurmdocs/accounting.html
 
 =head1 AUTHOR
 
@@ -148,7 +148,7 @@ Don Lipari, <lt>lipari@llnl.gov<gt>
  CODE-OCEC-09-009. All rights reserved.
 
  This file is part of SLURM, a resource management program.  For
- details, see <https://computing.llnl.gov/linux/slurm/>.  Please also
+ details, see <http://www.schedmd.com/slurmdocs/>.  Please also
  read the included file: DISCLAIMER.
 
  SLURM is free software; you can redistribute it and/or modify it
diff --git a/contribs/phpext/Makefile.am b/contribs/phpext/Makefile.am
index f9ee92ce6..7f93dedac 100644
--- a/contribs/phpext/Makefile.am
+++ b/contribs/phpext/Makefile.am
@@ -1,11 +1,11 @@
 AUTOMAKE_OPTIONS = foreign
-php_dir = slurm_php
-phpize = /usr/bin/phpize
+php_dir=slurm_php
+phpize=/usr/bin/phpize
 
 if HAVE_AIX
-	add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\""
+config_line=CC="$(CC)" CCFLAGS="-g -static $(CFLAGS)" ./configure
 else
-	add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\""
+config_line=CC="$(CC)" LD="$(CC) $(CFLAGS)" CCFLAGS="-g -static $(CFLAGS)" CFLAGS="$(CFLAGS)" ./configure
 endif
 
 all-local:
@@ -14,12 +14,12 @@ all-local:
 		if [ ! -f configure ]; then \
 			$(phpize); \
 		fi && \
-		./configure ; \
+		$(config_line); \
 		if [ ! -f Makefile ]; then \
 			exit 0;\
 		fi \
 	fi && \
-	$(MAKE) $(add_flags); \
+	$(MAKE); \
 	cd ..;
 
 install-exec-local:
@@ -27,7 +27,7 @@ install-exec-local:
 	if [ ! -f Makefile ]; then \
 		exit 0;\
 	fi && \
-	$(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \
+	$(MAKE) INSTALL_ROOT=$(DESTDIR) install && \
 	cd ..;
 
 clean-generic:
diff --git a/contribs/phpext/Makefile.in b/contribs/phpext/Makefile.in
index fddf1b678..c0ba16cb1 100644
--- a/contribs/phpext/Makefile.in
+++ b/contribs/phpext/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -275,6 +283,8 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 php_dir = slurm_php
 phpize = /usr/bin/phpize
+@HAVE_AIX_FALSE@config_line = CC="$(CC)" LD="$(CC) $(CFLAGS)" CCFLAGS="-g -static $(CFLAGS)" CFLAGS="$(CFLAGS)" ./configure
+@HAVE_AIX_TRUE@config_line = CC="$(CC)" CCFLAGS="-g -static $(CFLAGS)" ./configure
 all: all-am
 
 .SUFFIXES:
@@ -455,21 +465,18 @@ uninstall-am:
 	ps ps-am uninstall uninstall-am
 
 
-@HAVE_AIX_TRUE@	add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\""
-@HAVE_AIX_FALSE@	add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\""
-
 all-local:
 	@cd $(php_dir) && \
 	if [ ! -f Makefile ]; then \
 		if [ ! -f configure ]; then \
 			$(phpize); \
 		fi && \
-		./configure ; \
+		$(config_line); \
 		if [ ! -f Makefile ]; then \
 			exit 0;\
 		fi \
 	fi && \
-	$(MAKE) $(add_flags); \
+	$(MAKE); \
 	cd ..;
 
 install-exec-local:
@@ -477,7 +484,7 @@ install-exec-local:
 	if [ ! -f Makefile ]; then \
 		exit 0;\
 	fi && \
-	$(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \
+	$(MAKE) INSTALL_ROOT=$(DESTDIR) install && \
 	cd ..;
 
 clean-generic:
diff --git a/contribs/phpext/slurm_php/AUTHORS b/contribs/phpext/slurm_php/AUTHORS
new file mode 100644
index 000000000..5317cc36e
--- /dev/null
+++ b/contribs/phpext/slurm_php/AUTHORS
@@ -0,0 +1,2 @@
+Vermeulen Peter, nMCT Howest <nmb.peterv@gmail.com>
+Jimmy Tang, Trinity Centre for High Performance Computing, Trinity College Dublin <jtang@tchpc.tcd.ie>
diff --git a/contribs/phpext/slurm_php/DISCLAIMER b/contribs/phpext/slurm_php/DISCLAIMER
new file mode 100644
index 000000000..52c91a0ec
--- /dev/null
+++ b/contribs/phpext/slurm_php/DISCLAIMER
@@ -0,0 +1,20 @@
+Disclaimer
+
+The php-slurm program, its documentation, and any other auxiliary
+resources involved in building, installing and running the program,
+such as graphics, Makefiles, and user interface definition files, are
+licensed under the GNU General Public License. This includes, but is
+not limited to, all the files in the official source distribution, as
+well as the source distribution itself.
+
+A copy of the GNU General Public License can be found in the file
+LICENSE in the top directory of the official source distribution. The
+license is also available in several formats through the World Wide
+Web, via http://www.gnu.org/licenses/licenses.html#GPL, or you can
+write the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA
+02139, USA.
+
+php-slurm is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
diff --git a/contribs/phpext/slurm_php/LICENSE b/contribs/phpext/slurm_php/LICENSE
new file mode 100644
index 000000000..d159169d1
--- /dev/null
+++ b/contribs/phpext/slurm_php/LICENSE
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/contribs/phpext/slurm_php/README b/contribs/phpext/slurm_php/README
new file mode 100644
index 000000000..f912f327a
--- /dev/null
+++ b/contribs/phpext/slurm_php/README
@@ -0,0 +1,48 @@
+Slurm PHP extension
+===================
+
+Requirements (tested with)
+
+* SLURM 2.2.0
+* PHP 5.1.6
+* APACHE (optional, but recommended)
+
+This was made primarily for SLURMWEB to connect to slurm. Any extra
+interactions are welcome.
+
+to compile...
+
+phpize
+./configure
+make
+
+this should make modules/slurm_php.so
+
+make install as root
+should install this where your extensions are in your php install
+
+in your php.ini file add the line
+
+extension=slurm_php.so
+
+and you should be able to use the functions here.
+
+
+TEST CASES
+==========
+
+It is assumed that the user has both slurmctld and slurmd is
+configured up with at least 1 partition and 1 node for these tests to
+pass.
+
+Developer Notes
+===============
+
+To clean up the directory to a clean state do the following
+
+~~~~
+phpize --clean
+~~~~
+
+The coding style that should be adopted is
+http://www.kernel.org/doc/Documentation/CodingStyle
diff --git a/contribs/phpext/slurm_php/RELEASE_NOTES b/contribs/phpext/slurm_php/RELEASE_NOTES
new file mode 100644
index 000000000..3837745d6
--- /dev/null
+++ b/contribs/phpext/slurm_php/RELEASE_NOTES
@@ -0,0 +1,38 @@
+NOTES FOR PHP-SLURM VERSION 1.0
+===============================
+
+This is PHP extensions goal is to provide just enough functionality to
+a web developer read data from the slurm controller daemon to create a
+*status* or *monitoring* application which can be viewed by the end
+user. All the code has been written by 'Vermeulen Peter' with
+contributions from TCHPC staff.
+
+
+Installation Requirements
+=========================
+
+* SLURM 2.2.0 or newer
+* PHP 5.1.6 or newer
+* APACHE (optional, but recommended)
+
+
+Added the following API's
+=========================
+
+slurm_hostlist_to_array()
+slurm_array_to_hostlist()
+slurm_ping()
+slurm_slurmd_status()
+slurm_version()
+slurm_print_partition_names()
+slurm_get_specific_partition_info()
+slurm_get_partition_node_names()
+slurm_get_node_names()
+slurm_get_node_elements()
+slurm_get_node_element_by_name()
+slurm_get_node_state_by_name()
+slurm_get_node_states()
+slurm_get_control_configuration_keys()
+slurm_get_control_configuration_values()
+slurm_load_partition_jobs()
+slurm_load_job_information()
diff --git a/contribs/phpext/slurm_php/config.m4.in b/contribs/phpext/slurm_php/config.m4.in
index 51f804857..166637760 100644
--- a/contribs/phpext/slurm_php/config.m4.in
+++ b/contribs/phpext/slurm_php/config.m4.in
@@ -9,50 +9,60 @@
 ##*****************************************************************************
 PHP_ARG_WITH(slurm, whether to use slurm,
 [ --with-slurm SLURM install dir])
-	
+
+AC_MSG_CHECKING([for phpize in default path])
+if test ! -f "/usr/bin/phpize"; then
+   PHP_SLURM="no"
+   AC_MSG_RESULT([NO, CANNOT MAKE SLURM_PHP])
+else
+   AC_MSG_RESULT([yes])
+fi
+
 if test "$PHP_SLURM" != "no"; then
-	SLURMLIB_PATH="@prefix@/lib @top_builddir@/src/api/.libs"
+	SLURMLIB_PATH="@prefix@/lib @top_builddir@/src/db_api/.libs"
 	SLURMINCLUDE_PATH="@prefix@/include"
-	SEARCH_FOR="libslurm.so"
-    	
-        # --with-libslurm -> check with-path
-	
+	SEARCH_FOR="libslurmdb.so"
+
+	# --with-libslurm -> check with-path
+
 	if test -r $PHP_SLURM/; then # path given as parameter
 		SLURM_DIR=$PHP_SLURM
 		SLURMLIB_PATH="$SLURM_DIR/lib"
 	else # search default path list
-		AC_MSG_CHECKING([for libslurm.so in default paths])
+		AC_MSG_CHECKING([for libslurmdb.so in default paths])
 		for i in $SLURMLIB_PATH ; do
 			if test -r $i/$SEARCH_FOR; then
 				SLURM_DIR=$i
 				PHP_ADD_LIBPATH($i, SLURM_PHP_SHARED_LIBADD)
-    
+
 				AC_MSG_RESULT([found in $i])
-				
+
 			fi
 		done
 	fi
-	
+
 	if test -z "$SLURM_DIR"; then
 		AC_MSG_RESULT([not found])
 		AC_MSG_ERROR([Please reinstall the slurm distribution])
 	fi
-	
+
 	PHP_ADD_INCLUDE($SLURMINCLUDE_PATH)
 	PHP_ADD_INCLUDE(@top_srcdir@)
-	
-	LIBNAME=slurm
+
+	LIBNAME=slurmdb
 	LIBSYMBOL=slurm_acct_storage_init
-	
+
 	PHP_CHECK_LIBRARY($LIBNAME, $LIBSYMBOL,
 		[PHP_ADD_LIBRARY($LIBNAME, , SLURM_PHP_SHARED_LIBADD)
     			AC_DEFINE(HAVE_SLURMLIB,1,[ ])],
-		[AC_MSG_ERROR([wrong libslurm version or lib not found])],
-		[-L$SLURM_DIR -lslurm])
-	
-	
+		[AC_MSG_ERROR([wrong libslurmdb version or lib not found])],
+		[-L$SLURM_DIR -l$LIBNAME])
+
+
 	PHP_SUBST(SLURM_PHP_SHARED_LIBADD)
-	
+
+	AC_CHECK_HEADERS(stdbool.h)
+
 	AC_DEFINE(HAVE_SLURM_PHP, 1, [Whether you have SLURM])
 	#PHP_EXTENSION(slurm_php, $ext_shared)
 	PHP_NEW_EXTENSION(slurm_php, @top_srcdir@/contribs/phpext/slurm_php/slurm_php.c, $ext_shared)
diff --git a/contribs/phpext/slurm_php/slurm_php.c b/contribs/phpext/slurm_php/slurm_php.c
index d09b9fcd3..3720ba589 100644
--- a/contribs/phpext/slurm_php/slurm_php.c
+++ b/contribs/phpext/slurm_php/slurm_php.c
@@ -1,101 +1,902 @@
 /*****************************************************************************\
  *  slurm_php.c - php interface to slurm.
  *
- *  $Id: account_gold.c 13061 2008-01-22 21:23:56Z da $
  *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Copyright (C) 2011 - Trinity Centre for High Performance Computing
+ *  Copyright (C) 2011 - Trinity College Dublin
+ *  Written By : Vermeulen Peter <HoWest><Belgium>
+ *
+ *  This file is part of php-slurm, a resource management program.
  *  Please also read the included file: DISCLAIMER.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
+ *
+ *  php-slurm is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
+ *  In addition, as a special exception, the copyright holders give permission
  *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *
+ *  php-slurm is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  with php-slurm; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+/*****************************************************************************\
+ *
+ *	Documentation for each function can be found in the slurm_php.h file
+ *
+\*****************************************************************************/
+
 #ifdef HAVE_CONFIG_H
 #include "config.h"
 #endif
 
-#include "php.h"
 #include "slurm_php.h"
-#include "slurm/slurm.h"
-#include "src/common/list.h"
 
 static function_entry slurm_functions[] = {
-    PHP_FE(hello_world, NULL)
-    PHP_FE(print_partitions, NULL)
-    {NULL, NULL, NULL}
+	PHP_FE(slurm_ping, NULL)
+	PHP_FE(slurm_slurmd_status, NULL)
+	PHP_FE(slurm_print_partition_names, NULL)
+	PHP_FE(slurm_get_specific_partition_info, NULL)
+	PHP_FE(slurm_get_partition_node_names, NULL)
+	PHP_FE(slurm_version, NULL)
+	PHP_FE(slurm_get_node_names, NULL)
+	PHP_FE(slurm_get_node_elements, NULL)
+	PHP_FE(slurm_get_node_element_by_name, NULL)
+	PHP_FE(slurm_get_node_state_by_name, NULL)
+	PHP_FE(slurm_get_control_configuration_keys, NULL)
+	PHP_FE(slurm_get_control_configuration_values, NULL)
+	PHP_FE(slurm_load_job_information, NULL)
+	PHP_FE(slurm_load_partition_jobs, NULL)
+	PHP_FE(slurm_get_node_states, NULL)
+	PHP_FE(slurm_hostlist_to_array, NULL)
+	PHP_FE(slurm_array_to_hostlist, NULL) {
+		NULL, NULL, NULL
+	}
 };
 
 zend_module_entry slurm_php_module_entry = {
 #if ZEND_MODULE_API_NO >= 20010901
-    STANDARD_MODULE_HEADER,
+	STANDARD_MODULE_HEADER,
 #endif
-    SLURM_PHP_EXTNAME,
-    slurm_functions,
-    NULL,
-    NULL,
-    NULL,
-    NULL,
-    NULL,
+	SLURM_PHP_EXTNAME,
+	slurm_functions,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
 #if ZEND_MODULE_API_NO >= 20010901
-    SLURM_PHP_VERSION,
+	SLURM_PHP_VERSION,
 #endif
-    STANDARD_MODULE_PROPERTIES
+	STANDARD_MODULE_PROPERTIES
 };
 
 #ifdef COMPILE_DL_SLURM_PHP
 ZEND_GET_MODULE(slurm_php)
 #endif
 
-PHP_FUNCTION(hello_world)
+/*****************************************************************************\
+ *	HELPER FUNCTION PROTOTYPES
+\*****************************************************************************/
+
+/*
+ * _parse_node_pointer - Parse a node pointer's contents into an
+ *	assocative zval array where the key is descriptive to the
+ *	value
+ *
+ * IN sub_arr - array to store the contents of the node pointer
+ * IN node_arr - node pointer that needs parsing
+ */
+static void _parse_node_pointer(zval *sub_arr, node_info_t *node_arr);
+
+/*
+ * _parse_assoc_array - Parse a character array where the elements are
+ *	key-value pairs separated by delimiters into an associative
+ *	array
+ *
+ * IN char_arr - character array that needs parsing
+ * IN delims - character array that contains the delimeters used in parsing
+ * IN result_arr - associative array used to store the key_value pairs in
+ */
+static void _parse_assoc_array(char *char_arr, char *delims, zval *result_arr);
+
+/*
+ * _parse_array - Parse a character array where the elements are values
+ *	 separated by delimiters into a numerically indexed array
+ *
+ * IN char_arr - character array that needs parsing
+ * IN delims - character array that contains the delimeters used in parsing
+ * IN result_arr - numerically indexed array used to store the values in
+ */
+static void _parse_array(char *char_arr, char *delims, zval *rslt_arr);
+
+/*
+ * _zend_add_valid_assoc_string - checks a character array to see if
+ *	it's NULL or not, if so an associative null is added, if not
+ *	an associative string is added.
+ *
+ * IN rstl_arr - array to store the associative key_value pairs in
+ * IN key - character array used as the associative key
+ * IN val - character array to be validated and added as value if valid
+ */
+static void _zend_add_valid_assoc_string(zval *rstl_arr, char *key, char *val);
+
+/*
+ * _zend_add_valid_assoc_time_string - checks a unix timestamp to see if it's
+ * 	0 or not, if so an associative null is added, if not a formatted string
+ *	is added.
+ *
+ * IN rstl_arr - array to store the associative key_value pairs in
+ * IN key - character array used as the associative key
+ * IN val - time_t unix timestamp to be validated and added if valid
+ * NOTE : If you'd like to change the format in which the valid strings are
+ * returned, you can change the TIME_FORMAT_STRING macro to the needed format
+ */
+static void _zend_add_valid_assoc_time_string(
+	zval *rstl_arr, char *key, time_t *val);
+
+/*****************************************************************************\
+ *	TODO
+ *****************************************************************************
+ *	[ADJUSTING EXISTING FUNCTIONS]
+ *		- _parse_node_pointer
+ *			dynamic_plugin_data_t is currently not returned
+ *	[EXTRA FUNCTIONS]
+ *		- Functions that filter jobs on the nodes they are running on
+ *		- Scheduling
+ *		- ...
+\*****************************************************************************/
+
+/*****************************************************************************\
+ *	HELPER FUNCTIONS
+\*****************************************************************************/
+
+static void _parse_node_pointer(zval *sub_arr, node_info_t *node_arr)
+{
+	zval *sub_arr_2 = NULL;
+
+	_zend_add_valid_assoc_string(sub_arr, "Name", node_arr->name);
+	_zend_add_valid_assoc_string(sub_arr, "Arch.", node_arr->arch);
+	_zend_add_valid_assoc_time_string(sub_arr, "Boot Time",
+					 &node_arr->boot_time);
+	add_assoc_long(sub_arr, "#CPU'S", node_arr->cpus);
+	add_assoc_long(sub_arr, "#Cores/CPU", node_arr->cores);
+
+	if (node_arr->features == NULL) {
+		add_assoc_null(sub_arr, "Features");
+	} else {
+		ALLOC_INIT_ZVAL(sub_arr_2);
+		array_init(sub_arr_2);
+		_parse_array(node_arr->features, ",", sub_arr_2);
+		add_assoc_zval(sub_arr, "Features", sub_arr_2);
+	}
+
+	_zend_add_valid_assoc_string(sub_arr, "GRES", node_arr->gres);
+	add_assoc_long(sub_arr, "State", node_arr->node_state);
+	_zend_add_valid_assoc_string(sub_arr, "OS", node_arr->os);
+	add_assoc_long(sub_arr, "Real Mem", node_arr->real_memory);
+
+	if (node_arr->reason!=NULL) {
+		_zend_add_valid_assoc_string(sub_arr, "Reason",
+					    node_arr->reason);
+		_zend_add_valid_assoc_time_string(sub_arr,"Reason Timestamp",
+						 &node_arr->reason_time);
+		add_assoc_long(sub_arr, "Reason User Id",
+			       node_arr->reason_uid);
+	} else {
+		add_assoc_null(sub_arr, "Reason");
+		add_assoc_null(sub_arr, "Reason Timestamp");
+		add_assoc_null(sub_arr, "Reason User Id");
+	}
+
+	_zend_add_valid_assoc_time_string(sub_arr, "Slurmd Startup Time",
+					 &node_arr->slurmd_start_time);
+	add_assoc_long(sub_arr, "#Sockets/Node", node_arr->sockets);
+	add_assoc_long(sub_arr, "#Threads/Core", node_arr->threads);
+	add_assoc_long(sub_arr, "TmpDisk", node_arr->tmp_disk);
+	add_assoc_long(sub_arr, "Weight", node_arr->weight);
+}
+
+
+static void _parse_assoc_array(char *char_arr, char *delims, zval *result_arr)
+{
+	char *rslt = NULL;
+	char *tmp;
+	int i = 0;
+
+	rslt = strtok(char_arr, delims);
+	while (rslt != NULL) {
+		if (i == 0) {
+			tmp = rslt;
+		} else if (i == 1) {
+			if (strcmp(rslt,"(null)")==0) {
+				add_assoc_null(result_arr, tmp);
+			} else {
+				_zend_add_valid_assoc_string(result_arr,
+							    tmp, rslt);
+			}
+		}
+		i++;
+		if (i == 2) {
+			i = 0;
+		}
+		rslt = strtok(NULL, delims);
+	}
+}
+
+
+static void _parse_array(char *char_arr, char *delims, zval *rslt_arr)
+{
+	char *rslt = NULL;
+	char *tmp = NULL;
+
+	rslt = strtok(char_arr, delims);
+	while (rslt != NULL) {
+		if (strcmp(rslt, "(null)")==0) {
+			add_next_index_null(rslt_arr);
+		} else {
+			tmp = slurm_xstrdup(rslt);
+			add_next_index_string(rslt_arr, tmp, 1);
+			xfree(tmp);
+		}
+		rslt = strtok(NULL, delims);
+	}
+}
+
+static void _zend_add_valid_assoc_string(zval *rstl_arr, char *key, char *val)
+{
+	if (!val)
+		add_assoc_null(rstl_arr, key);
+	else
+		add_assoc_string(rstl_arr, key, val, 1);
+}
+
+
+static void _zend_add_valid_assoc_time_string(
+	zval *rstl_arr, char *key, time_t *val)
+{
+	char buf[80];
+	struct tm *timeinfo;
+
+	if (val==0) {
+		add_assoc_null(rstl_arr, key);
+	} else {
+		timeinfo = localtime(val);
+		strftime(buf, 80, TIME_FORMAT_STRING, timeinfo);
+		add_assoc_string(rstl_arr, key, buf, 1);
+	}
+}
+
+
+/*****************************************************************************\
+ *	SLURM STATUS FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_ping)
+{
+	int err = SLURM_SUCCESS;
+
+	array_init(return_value);
+	err = slurm_ping(1);
+	add_assoc_long(return_value,"Prim. Controller",err);
+	err = slurm_ping(2);
+	add_assoc_long(return_value,"Sec. Controller",err);
+}
+
+
+PHP_FUNCTION(slurm_slurmd_status)
+{
+	int err = SLURM_SUCCESS;
+	slurmd_status_t *status_ptr = NULL;
+
+	err = slurm_load_slurmd_status(&status_ptr);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	_zend_add_valid_assoc_time_string(return_value,"Booted_at",
+					 &status_ptr->booted);
+	_zend_add_valid_assoc_time_string(return_value,"Last_Msg",
+					 &status_ptr->last_slurmctld_msg);
+	add_assoc_long(return_value,"Logging_Level", status_ptr->slurmd_debug);
+	add_assoc_long(return_value,"Actual_CPU's", status_ptr->actual_cpus);
+	add_assoc_long(return_value,"Actual_Sockets",
+		       status_ptr->actual_sockets);
+	add_assoc_long(return_value,"Actual_Cores",status_ptr->actual_cores);
+	add_assoc_long(return_value,"Actual_Threads",
+		       status_ptr->actual_threads);
+	add_assoc_long(return_value,"Actual_Real_Mem",
+		       status_ptr->actual_real_mem);
+	add_assoc_long(return_value,"Actual_Tmp_Disk",
+		       status_ptr->actual_tmp_disk);
+	add_assoc_long(return_value,"PID",status_ptr->pid);
+	_zend_add_valid_assoc_string(return_value, "Hostname",
+				    status_ptr->hostname);
+	_zend_add_valid_assoc_string(return_value, "Slurm Logfile",
+				    status_ptr->slurmd_logfile);
+	_zend_add_valid_assoc_string(return_value, "Step List",
+				    status_ptr->step_list);
+	_zend_add_valid_assoc_string(return_value, "Version",
+				    status_ptr->version);
+
+	if (status_ptr != NULL) {
+		slurm_free_slurmd_status(status_ptr);
+	}
+}
+
+
+PHP_FUNCTION(slurm_version)
+{
+	long option = -1;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC,
+				  "l", &option) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	switch (option) {
+	case 0:
+		RETURN_LONG(SLURM_VERSION_MAJOR(SLURM_VERSION_NUMBER));
+		break;
+	case 1:
+		RETURN_LONG(SLURM_VERSION_MINOR(SLURM_VERSION_NUMBER));
+		break;
+	case 2:
+		RETURN_LONG(SLURM_VERSION_MICRO(SLURM_VERSION_NUMBER));
+		break;
+	default:
+		array_init(return_value);
+		add_next_index_long(return_value,
+				    SLURM_VERSION_MAJOR(SLURM_VERSION_NUMBER));
+		add_next_index_long(return_value,
+				    SLURM_VERSION_MINOR(SLURM_VERSION_NUMBER));
+		add_next_index_long(return_value,
+				    SLURM_VERSION_MICRO(SLURM_VERSION_NUMBER));
+		break;
+	}
+}
+
+
+/*****************************************************************************\
+ *	SLURM PHP HOSTLIST FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_hostlist_to_array)
+{
+	long lngth = 0;
+	char *host_list = NULL;
+	hostlist_t hl = NULL;
+	int hl_length = 0;
+	int i=0;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d",
+				 &host_list, &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((host_list == NULL) || !strcmp(host_list, "")) {
+		RETURN_LONG(-3);
+	}
+
+	hl = slurm_hostlist_create(host_list);
+	hl_length = slurm_hostlist_count(hl);
+
+	if (hl_length==0) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	for (i=0; i<hl_length; i++) {
+		char *name = slurm_hostlist_shift(hl);
+		add_next_index_string(return_value, name, 1);
+		free(name);
+	}
+}
+
+
+PHP_FUNCTION(slurm_array_to_hostlist)
+{
+	zval *node_arr = NULL, **data;
+	hostlist_t hl = NULL;
+	HashTable *arr_hash;
+	HashPosition pointer;
+	int arr_length = 0;
+	char *buf;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "a",
+				 &node_arr) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if (node_arr == NULL) {
+		RETURN_LONG(-3);
+	}
+
+	arr_hash = Z_ARRVAL_P(node_arr);
+	arr_length = zend_hash_num_elements(arr_hash);
+
+	if (arr_length==0) {
+		RETURN_LONG(-2);
+	}
+
+	hl = slurm_hostlist_create(NULL);
+	for (zend_hash_internal_pointer_reset_ex(arr_hash, &pointer);
+	     zend_hash_get_current_data_ex(arr_hash, (void**) &data,
+					   &pointer) == SUCCESS;
+		zend_hash_move_forward_ex(arr_hash, &pointer)) {
+		if (Z_TYPE_PP(data) == IS_STRING) {
+			slurm_hostlist_push_host(hl,Z_STRVAL_PP(data));
+		}
+	}
+
+	array_init(return_value);
+	buf = slurm_hostlist_ranged_string_xmalloc(hl);
+	_zend_add_valid_assoc_string(return_value, "HOSTLIST", buf);
+	xfree(buf);
+}
+
+
+/*****************************************************************************\
+ *	SLURM PARTITION READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_print_partition_names)
+{
+	int err = SLURM_SUCCESS;
+	int i;
+	partition_info_msg_t *prt_ptr = NULL;
+
+	err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
+
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	for (i = 0; i < prt_ptr->record_count; i++) {
+		add_next_index_string(return_value,
+				      prt_ptr->partition_array[i].name, 1);
+	}
+
+	slurm_free_partition_info_msg(prt_ptr);
+
+	if (i == 0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_get_specific_partition_info)
+{
+	long lngth = 0;
+	int err = SLURM_SUCCESS;
+	partition_info_msg_t *prt_ptr = NULL;
+	partition_info_t *prt_data = NULL;
+	char *name = NULL;
+	char *tmp = NULL;
+	int i = 0;
+	int y = 0;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &name,
+				  &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((name == NULL) || !strcmp(name, "")) {
+		RETURN_LONG(-3);
+	}
+
+	err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
+
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	if (prt_ptr->record_count != 0) {
+		for (i = 0; i < prt_ptr->record_count; i++) {
+			if (strcmp(prt_ptr->partition_array->name, name) == 0) {
+				prt_data = &prt_ptr->partition_array[i];
+				tmp = slurm_sprint_partition_info(prt_data, 1);
+				array_init(return_value);
+				_parse_assoc_array(tmp, "= ", return_value);
+				y++;
+				break;
+			}
+		}
+	}
+
+	slurm_free_partition_info_msg(prt_ptr);
+
+	if (y == 0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_get_partition_node_names)
+{
+	char *prt_name = NULL;
+	long lngth = 0;
+	int err = SLURM_SUCCESS;
+	partition_info_msg_t *prt_ptr = NULL;
+	partition_info_t *prt_data = NULL;
+	int i = 0;
+	int y = 0;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &prt_name,
+				  &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((prt_name == NULL) || (strcmp(prt_name,"")==0)) {
+		RETURN_LONG(-3);
+	}
+
+	err = slurm_load_partitions((time_t) NULL, &prt_ptr, 0);
+
+	if (err)
+		RETURN_LONG(-2);
+
+	if (prt_ptr->record_count != 0) {
+		for (i = 0; i < prt_ptr->record_count; i++) {
+			if (!strcmp(prt_ptr->partition_array->name, prt_name)) {
+				prt_data = &prt_ptr->partition_array[i];
+				array_init(return_value);
+				add_next_index_string(
+					return_value, prt_data->nodes, 1);
+				y++;
+				break;
+			}
+		}
+	}
+
+	slurm_free_partition_info_msg(prt_ptr);
+
+	if (y == 0)
+		RETURN_LONG(-1);
+}
+
+
+/*****************************************************************************\
+ *	SLURM NODE CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_get_node_names)
+{
+	int err = SLURM_SUCCESS;
+	int i = 0;
+	node_info_msg_t *node_ptr = NULL;
+
+	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	if (node_ptr->record_count > 0) {
+		array_init(return_value);
+		for (i = 0; i < node_ptr->record_count; i++) {
+			add_next_index_string(
+				return_value, node_ptr->node_array[i].name, 1);
+		}
+	}
+
+	slurm_free_node_info_msg(node_ptr);
+
+	if(i==0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_get_node_elements)
 {
-    RETURN_STRING("Hello World\n", 1);
+	int err = SLURM_SUCCESS;
+	int i = 0;
+	node_info_msg_t *node_ptr;
+	zval *sub_arr = NULL;
+
+	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	if (node_ptr->record_count > 0) {
+		array_init(return_value);
+		for (i = 0; i < node_ptr->record_count; i++) {
+			ALLOC_INIT_ZVAL(sub_arr);
+			array_init(sub_arr);
+			_parse_node_pointer(sub_arr, &node_ptr->node_array[i]);
+			add_assoc_zval(return_value,
+				       node_ptr->node_array[i].name,
+				       sub_arr);
+		}
+	}
+
+	slurm_free_node_info_msg(node_ptr);
+
+	if(i==0) {
+		RETURN_LONG(-1);
+	}
 }
 
-PHP_FUNCTION(print_partitions)
+
+PHP_FUNCTION(slurm_get_node_element_by_name)
 {
-	List sinfo_list = NULL;
-	int error_code = SLURM_SUCCESS;
-	uint16_t show_flags = 0;
-	static partition_info_msg_t *new_part_ptr;
-	printf("hey\n");
-	slurm_info("got here!");
-	printf("hey\n");
-	error_code = slurm_load_partitions((time_t) NULL, &new_part_ptr,
-					   show_flags);
-	if (error_code) {
-		error("slurm_load_part");
-		RETURN_INT(error_code);
+	int err = SLURM_SUCCESS;
+	int i = 0,y = 0;
+	node_info_msg_t *node_ptr;
+	char *node_name = NULL;
+	long lngth;
+	zval *sub_arr = NULL;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &node_name,
+				  &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((node_name == NULL) || (strcmp(node_name,"")==0)) {
+		RETURN_LONG(-3);
+	}
+
+	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+
+	for (i = 0; i < node_ptr->record_count; i++) {
+		if (strcmp(node_ptr->node_array->name, node_name) == 0) {
+			y++;
+			ALLOC_INIT_ZVAL(sub_arr);
+			array_init(sub_arr);
+			_parse_node_pointer(sub_arr, &node_ptr->node_array[i]);
+			add_assoc_zval(return_value, node_name,
+				       sub_arr);
+			break;
+		}
 	}
 
-//	sinfo_list = list_create(_sinfo_list_delete);
-			
-	RETURN_INT(error_code);
+	slurm_free_node_info_msg(node_ptr);
+
+	if (y == 0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_get_node_state_by_name)
+{
+	int err = SLURM_SUCCESS;
+	int i = 0,y = 0;
+	node_info_msg_t *node_ptr;
+	char *node_name = NULL;
+	long lngth;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &node_name,
+				  &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((node_name == NULL) || (strcmp(node_name,"")==0)) {
+		RETURN_LONG(-3);
+	}
+
+	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	for (i = 0; i < node_ptr->record_count; i++) {
+		if (strcmp(node_ptr->node_array->name, node_name) == 0) {
+			y++;
+			RETURN_LONG(node_ptr->node_array[i].node_state);
+			break;
+		}
+	}
+
+	slurm_free_node_info_msg(node_ptr);
+
+	if (i == 0) {
+		RETURN_LONG(-1);
+	}
+
+	if (y==0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_get_node_states)
+{
+	int err = SLURM_SUCCESS;
+	int i = 0;
+	node_info_msg_t *node_ptr;
+
+	err = slurm_load_node((time_t) NULL, &node_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	for (i = 0; i < node_ptr->record_count; i++) {
+		add_next_index_long(return_value,
+				    node_ptr->node_array[i].node_state);
+	}
+
+	slurm_free_node_info_msg(node_ptr);
+
+	if (i == 0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+/*****************************************************************************\
+ *	SLURM CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_get_control_configuration_keys)
+{
+	int err = SLURM_SUCCESS;
+	slurm_ctl_conf_t *ctrl_conf_ptr;
+	List lst;
+	ListIterator iter = NULL;
+	key_pair_t *k_p;
+
+	err = slurm_load_ctl_conf((time_t) NULL, &ctrl_conf_ptr);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	lst = slurm_ctl_conf_2_key_pairs(ctrl_conf_ptr);
+	if (!lst) {
+		RETURN_LONG(-1);
+	}
+
+	iter = slurm_list_iterator_create(lst);
+	array_init(return_value);
+	while ((k_p = slurm_list_next(iter))) {
+		add_next_index_string(return_value, k_p->name, 1);
+	}
+
+	slurm_free_ctl_conf(ctrl_conf_ptr);
+}
+
+
+PHP_FUNCTION(slurm_get_control_configuration_values)
+{
+	int err = SLURM_SUCCESS;
+	slurm_ctl_conf_t *ctrl_conf_ptr;
+	List lst;
+	ListIterator iter = NULL;
+	key_pair_t *k_p;
+
+	err = slurm_load_ctl_conf((time_t) NULL, &ctrl_conf_ptr);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	lst = slurm_ctl_conf_2_key_pairs(ctrl_conf_ptr);
+	if (!lst) {
+		RETURN_LONG(-1);
+	}
+
+	iter = slurm_list_iterator_create(lst);
+	array_init(return_value);
+	while ((k_p = slurm_list_next(iter))) {
+		if (k_p->value==NULL) {
+			add_next_index_null(return_value);
+		} else {
+			add_next_index_string(return_value, k_p->value, 1);
+		}
+	}
+
+	slurm_free_ctl_conf(ctrl_conf_ptr);
+}
+
+
+/*****************************************************************************\
+ *	SLURM JOB READ FUNCTIONS
+\*****************************************************************************/
+
+PHP_FUNCTION(slurm_load_job_information)
+{
+	int err = SLURM_SUCCESS;
+	int i = 0;
+	job_info_msg_t *job_ptr;
+	zval *sub_arr = NULL;
+	char *tmp;
+
+	err = slurm_load_jobs((time_t) NULL, &job_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	for (i = 0; i < job_ptr->record_count; i++) {
+		ALLOC_INIT_ZVAL(sub_arr);
+		array_init(sub_arr);
+		_parse_assoc_array(slurm_sprint_job_info(
+					   &job_ptr->job_array[i], 1),
+				   "= ", sub_arr);
+		tmp = slurm_xstrdup_printf("%u", job_ptr->job_array[i].job_id);
+		add_assoc_zval(return_value, tmp, sub_arr);
+		xfree(tmp);
+	}
+
+	slurm_free_job_info_msg(job_ptr);
+
+	if (i == 0) {
+		RETURN_LONG(-1);
+	}
+}
+
+
+PHP_FUNCTION(slurm_load_partition_jobs)
+{
+	int err = SLURM_SUCCESS;
+	int i = 0;
+	job_info_msg_t *job_ptr;
+	zval *sub_arr = NULL;
+	char *tmp;
+	char *pname = NULL;
+	long lngth;
+	long checker = 0;
+
+	if (zend_parse_parameters(ZEND_NUM_ARGS()TSRMLS_CC, "s|d", &pname,
+				  &lngth) == FAILURE) {
+		RETURN_LONG(-3);
+	}
+
+	if ((pname == NULL) || !strcmp(pname,"")) {
+		RETURN_LONG(-3);
+	}
+
+	err = slurm_load_jobs((time_t) NULL, &job_ptr, 0);
+	if (err) {
+		RETURN_LONG(-2);
+	}
+
+	array_init(return_value);
+	for (i = 0; i < job_ptr->record_count; i++) {
+		if (!strcmp(job_ptr->job_array->partition, pname)) {
+			checker++;
+			ALLOC_INIT_ZVAL(sub_arr);
+			array_init(sub_arr);
+			_parse_assoc_array(slurm_sprint_job_info(
+						   &job_ptr->job_array[i], 1),
+					   "= ", sub_arr);
+			tmp = slurm_xstrdup_printf(
+				"%u", job_ptr->job_array[i].job_id);
+			add_assoc_zval(return_value, tmp, sub_arr);
+			xfree(tmp);
+		}
+	}
+
+	slurm_free_job_info_msg(job_ptr);
+
+	if (i == 0) {
+		RETURN_LONG(-1);
+	}
+
+	if (checker==0)	{
+		RETURN_LONG(-1);
+	}
 }
diff --git a/contribs/phpext/slurm_php/slurm_php.h b/contribs/phpext/slurm_php/slurm_php.h
index d3be22f8c..3b1261d0f 100644
--- a/contribs/phpext/slurm_php/slurm_php.h
+++ b/contribs/phpext/slurm_php/slurm_php.h
@@ -1,52 +1,383 @@
 /*****************************************************************************\
  *  slurm_php.h - php interface to slurm.
  *
- *  $Id: slurm_php.h 13061 2008-01-22 21:23:56Z da $
  *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Copyright (C) 2011 - Trinity Centre for High Performance Computing
+ *  Copyright (C) 2011 - Trinity College Dublin
+ *  Written By : Vermeulen Peter <HoWest><Belgium>
+ *
+ *  This file is part of php-slurm, a resource management program.
  *  Please also read the included file: DISCLAIMER.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
+ *
+ *  php-slurm is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
+ *  In addition, as a special exception, the copyright holders give permission
  *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *
+ *  php-slurm is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  with php-slurm; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
+
 #ifndef SLURM_PHP_H
 #define SLURM_PHP_H 1
 
-#define SLURM_PHP_VERSION "1.0"
+#define SLURM_PHP_VERSION "1.0.1"
 #define SLURM_PHP_EXTNAME "slurm"
+/*
+ * Adjust this value to change the format of the returned string
+ * values.
+ *
+ * For more information on formatting options :
+ * http://www.java2s.com/Tutorial/C/0460__time.h/strftime.htm
+ */
+#define TIME_FORMAT_STRING "%c"
 
-PHP_FUNCTION(hello_world);
-PHP_FUNCTION(print_partitions);
+#include <php.h>
+#include <slurm/slurm.h>
+#include <slurm/slurmdb.h>
+
+#include <time.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "src/common/xmalloc.h"
 
 extern zend_module_entry slurm_php_module_entry;
+
+/*****************************************************************************\
+ *	TYPEDEFS
+\*****************************************************************************/
+
+typedef struct key_value {
+	char *name;		/* key */
+	char *value;		/* value */
+} key_pair_t;
+
+/* define functions needed to avoid warnings (they are defined in
+ * src/common/xstring.h)  If you can figure out a way to make it so we
+ * don't have to make these declarations that would be awesome.  I
+ * didn't have time to spend on it when I was working on it. -da
+ */
+
+/*
+** strdup which uses xmalloc routines
+*/
+char *slurm_xstrdup(const char *str);
+
+/*
+** strdup formatted which uses xmalloc routines
+*/
+char *slurm_xstrdup_printf(const char *fmt, ...)
+  __attribute__ ((format (printf, 1, 2)));
+
+
+/*****************************************************************************\
+ *	SLURM PHP HOSTLIST FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_hostlist_to_array - converts a hostlist string to
+ * a numerically indexed array.
+ *
+ * IN host_list - string value containing the hostlist
+ * RET numerically indexed array containing the names of the nodes
+ */
+PHP_FUNCTION(slurm_hostlist_to_array);
+
+/*
+ * slurm_array_to_hostlist - convert an array of nodenames into a hostlist
+ * 	string
+ *
+ * IN node_arr - Numerically indexed array containing a nodename on each index
+ * RET String variable containing the hostlist string
+ */
+PHP_FUNCTION(slurm_array_to_hostlist);
+
+
+/*****************************************************************************\
+ *	SLURM STATUS FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_ping - Issues the slurm interface to return the status of the slurm
+ *	primary and secondary controller
+ *
+ * RET associative array containing the status ( status = 0 if online, = -1 if
+ *	offline ) of both controllers
+ * NOTE : the error codes and their meaning are described in the section
+ * 	labelled EXTRA
+ */
+PHP_FUNCTION(slurm_ping);
+
+/*
+ * slurm_slurmd_status - Issues the slurm interface to return the
+ *	status of the slave daemon ( running on this machine )
+ *
+ * RET associative array containing the status or a negative long variable
+ *	containing an error code
+ * NOTE : the error codes and their meaning are described in the section
+ * 	labelled EXTRA
+ */
+PHP_FUNCTION(slurm_slurmd_status);
+
+/*
+ * slurm_version - Returns the slurm version number in the requested format
+ *
+ * IN option - long/integer value linking to the formatting of the version
+ *	number
+ * RET long value containing the specific formatted version number a numeric
+ *	array containing the version number or a negative long variable
+ *	containing an error code.
+ * NOTE : the possible cases and their meaning are described in the section
+ * 	labelled EXTRA
+ */
+PHP_FUNCTION(slurm_version);
+
+
+/*****************************************************************************\
+ *	SLURM PARTITION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_print_partition_names - Creates and returns a numerically
+ *	indexed array containing the names of the partitions
+ *
+ * RET numerically indexed array containing the partitionnames or a
+ *	negative long variable containing an error code NOTE : the
+ *	error codes and their meaning are described in the section
+ *	labelled EXTRA
+ */
+PHP_FUNCTION(slurm_print_partition_names);
+/*
+ * slurm_get_specific_partition_info - Searches for the requested
+ *	partition and if found it returns an associative array
+ *	containing the information about this specific partition
+ *
+ * IN name - a string variable containing the partitionname
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ *      partitionname
+ * RET an associative array containing the information about a
+ *	specific partition, or a negative long value containing an
+ *	error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_specific_partition_info);
+
+/*
+ * slurm_get_partition_node_names - Searches for the requested partition and
+ *	if found it parses the nodes into a numerically indexed array, which is
+ *	then returned to the calling function.
+ *
+ * IN name - a string variable containing the partitionname
+ *
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ * partitionname
+ *
+ * RET a numerically indexed array containing the names of all the
+ *	nodes connected to this partition, or a negative long value
+ *	containing an error code
+ *
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_partition_node_names);
+
+
+/*****************************************************************************\
+ *	SLURM NODE CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_get_node_names - Creates and returns a numerically index array
+ *	containing the nodenames.
+ *
+ * RET a numerically indexed array containing the requested nodenames,
+ *	or a negative long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_names);
+
+/*
+ * slurm_get_node_elements - Creates and returns an associative array
+ *	containing all the nodes indexed by nodename and as value an
+ *	associative array containing their information.
+ *
+ * RET an associative array containing the nodes as keys and their
+ *	information as value, or a long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_elements);
+
+/*
+ * slurm_get_node_element_by_name - Searches for the requested node
+ *	and if found it parses its information into an associative
+ *	array, which is then returned to the calling function.
+ *
+ * IN name - a string variable containing the nodename
+ * OPTIONAL IN lngth - a long variable containing the length of the nodename
+ * RET an assocative array containing the requested information or a
+ *	long value containing an error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_element_by_name);
+
+/*
+ * slurm_get_node_state_by_name - Searches for the requested node and
+ *	if found it returns the state of that node
+ *
+ * IN name - a string variable containing the nodename
+ * OPTIONAL IN lngth - a long variable containing the length of the nodename
+ * RET a long value containing the state of the node [0-7] or a
+ *	negative long value containing the error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_node_state_by_name);
+
+/*
+ * slurm_get_node_states - Creates a numerically indexed array
+ *	containing the state of each node ( only the state ! ) as a
+ *	long value. This function could be used to create a summary of
+ *	the node states without having to do a lot of processing ( or
+ *	having to deal with overlapping nodes between partitions ).
+ *
+ * RET a numerically indexed array containing node states
+ */
+PHP_FUNCTION(slurm_get_node_states);
+
+
+/*****************************************************************************\
+ *	SLURM CONFIGURATION READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * Due to the configuration being quite large, i decided to create 2 functions
+ * to return the keys and values separately. ( to prevent a buffer overflow )
+ */
+
+/*
+ * slurm_get_control_configuration_keys - Retreives the configuration
+ *	from the slurm daemon and parses it into a numerically indexed
+ *	array containg the keys that link to the values ( the values
+ *	are retreived by the slurm_get_control_configuration_values
+ *	function )
+ *
+ * RET a numerically indexed array containing keys that describe the
+ *	values of the configuration of the slurm daemon, or a long
+ *	value containing an error code
+ *
+ *  NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_control_configuration_keys);
+
+/*
+ * slurm_get_control_configuration_values - Retreives the
+ *	configuration from the slurm daemon and parses it into a
+ *	numerically indexed array containg the values that link to the
+ *	keys ( the keys are retreived by the
+ *	slurm_get_control_configuration_keys function )
+ *
+ * RET a numerically indexed array containing the values of the
+ *	configuration of the slurm daemon, or a long value containing
+ *	an error code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_get_control_configuration_values);
+
+
+/*****************************************************************************\
+ *	SLURM JOB READ FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_load_job_information - Loads the information of all the jobs,
+ *	parses it and returns the values as an associative array where
+ *	each key is the job id linking to an associative array with
+ *	the information of the job
+ *
+ * RET an associative array containing the information of all jobs, or
+ *	a long value containing an error code.
+ *
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_load_job_information);
+
+/*
+ * slurm_load_partition_jobs - Retreive the information of all the
+ *	jobs running on a single partition.
+ *
+ * IN pname - The partition name as a string value
+ * OPTIONAL IN lngth - a long variable containing the length of the
+ * partitionname
+ * RET an associative array containing the information of all the jobs
+ *	running on this partition. Or a long value containing an error
+ *	code
+ * NOTE : the error codes and their meaning are described in the
+ *	section labelled EXTRA
+ */
+PHP_FUNCTION(slurm_load_partition_jobs);
+
+
+/*****************************************************************************\
+ *	EXTRA
+ *****************************************************************************
+ *
+ *	[ERROR CODES]
+ *
+ *		-3	:	no/incorrect variables where passed on
+ *		-2	:	An error occurred whilst trying to communicate
+ *				with the daemon
+ *		-1	:	Your query produced no results
+ *
+ *	[VERSION FORMATTING OPTIONS]
+ *
+ *		0	:	major of the version number
+ *		1	:	minor of the version number
+ *		2	:	micro of the version number
+ *		default	:	full version number
+ *
+ *		[EXPLANATION]
+ *
+ *			Consider the version number 2.2.3,
+ *			if we were to split this into an array
+ *			where the "." sign is the delimiter
+ *			we would receive the following
+ *
+ *				[2]	=>	MAJOR
+ *				[2]	=>	MINOR
+ *				[3]	=>	MICRO
+ *
+ *			When requesting the major you would
+ *			only receive the major, when requesting
+ *			the full version you would receive the array
+ *			as depicted above.
+ *
+\*****************************************************************************/
+
 #define phpext_slurm_php_ptr &slurm_php_module_entry
 
 #endif
diff --git a/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt
new file mode 100644
index 000000000..37920d235
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_basic.phpt
@@ -0,0 +1,29 @@
+--TEST--
+Test function slurm_array_to_hostlist() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_array_to_hostlist") or die("skip function slurm_array_to_hostlist unavailable");
+?>
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$hosts=array();
+
+array_push($hosts, "host01");
+array_push($hosts, "host02");
+array_push($hosts, "another-host02");
+
+var_dump(slurm_array_to_hostlist($hosts));
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+array(1) {
+  ["HOSTLIST"]=>
+  string(26) "host[01-02],another-host02"
+}
diff --git a/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt b/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt
new file mode 100644
index 000000000..d0c96bf43
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_array_to_hostlist_error.phpt
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_array_to_hostlist() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_array_to_hostlist") or die("skip function slurm_array_to_hostlist unavailable");
+?>
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with incorrect numbers of arguments ***\n";
+
+$extra_arg = array();
+
+$ret = slurm_array_to_hostlist( $extra_arg );
+if ($ret < 0)
+	echo "! ret $ret < 0";
+
+/* this needs to be implemented better */
+/*
+$ret = slurm_array_to_hostlist(  );
+if ($ret < 0)
+	echo "! ret $ret < 0";
+*/
+?>
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+! ret -2 < 0
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt
new file mode 100644
index 000000000..aa4e90609
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_keys_basic.phpt
@@ -0,0 +1,35 @@
+--TEST--
+Test function slurm_get_control_configuration_keys() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb.peterv@gmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_control_configuration_keys") or die("skip function slurm_get_control_configuration_keys unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$config_keys_arr = slurm_get_control_configuration_keys();
+
+if(is_array($config_keys_arr)){
+	if(count($config_keys_arr)==0) {
+		$config_keys_arr = -1;
+	}
+}
+
+if((gettype($config_keys_arr)=="array") && ($config_keys_arr != NULL)) {
+	echo "! slurm_get_control_configuration_keys	:	SUCCESS";
+} else if($config_keys_arr == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($config_keys_arr == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($config_keys_arr == -1) {
+	echo "[SLURM:ERROR] -1 : No configuration data was found on your system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_control_configuration_keys	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt
new file mode 100644
index 000000000..0da73ef46
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_control_configuration_values_basic.phpt
@@ -0,0 +1,39 @@
+--TEST--
+Test function slurm_get_control_configuration_values() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_control_configuration_values") or die("skip function slurm_get_control_configuration_values unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+
+$config_values_arr = slurm_get_control_configuration_values();
+
+if(is_array($config_values_arr)){
+	if(count($config_values_arr)==0) {
+		$config_values_arr = -1;
+	}
+}
+
+if((gettype($config_values_arr)=="array") && ($config_values_arr != NULL)) {
+	echo "! slurm_get_control_configuration_values	:	SUCCESS";
+} else if($config_values_arr == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($config_values_arr == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($config_values_arr == -1) {
+	echo "[SLURM:ERROR] -1 : No configuration data was found on your system";
+}  else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_control_configuration_values	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt
new file mode 100644
index 000000000..8b8602217
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_basic.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Test function slurm_get_node_element_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_element_by_name") or die("skip function slurm_get_node_element_by_name() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function without any arguments ***\n";
+
+$node_info = slurm_get_node_element_by_name(5);
+if($node_info == -1) {
+	echo "[SLURM:ERROR] -1 : No node by that name was found on your system\n";
+}
+
+$node_info = slurm_get_node_element_by_name(NULL);
+if($node_info == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+}
+
+$node_info = slurm_get_node_element_by_name('');
+if($node_info == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function without any arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt
new file mode 100644
index 000000000..86e464550
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_element_by_name_error.phpt
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_get_node_element_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_element_by_name") or die("skip function slurm_get_node_element_by_name() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$node_info = slurm_get_node_element_by_name("ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c");
+if((gettype($node_info)!="integer") && ($node_info != NULL)) {
+	echo "! slurm_get_node_element_by_name('ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c')	:	SUCCESS";
+} else if($node_info == -1) {
+	echo "[SLURM:ERROR] -1 : No node by that name was found on your system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt
new file mode 100644
index 000000000..d1c089e7f
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_elements_basic.phpt
@@ -0,0 +1,33 @@
+--TEST--
+Test function slurm_get_node_elements() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_elements") or die("skip function slurm_get_node_elements() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$node_arr = slurm_get_node_elements();
+
+if(is_array($node_arr)){
+	if(count($node_arr)==0) {
+		$node_arr = -1;
+	}
+}
+
+if((gettype($node_arr)!="integer") && ($node_arr != NULL)) {
+	echo "! slurm_get_node_elements()	:	SUCCESS";
+} else if($nameArray == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($node_arr == -1) {
+	echo "[SLURM:ERROR] -1 : No nodes could be found on your system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_node_elements()	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt
new file mode 100644
index 000000000..c7d1af7d4
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_names_basic.phpt
@@ -0,0 +1,29 @@
+--TEST--
+Test function slurm_get_node_names() by calling it with its expected arguments
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with its expected arguments ***\n" ;
+
+$nameArray = slurm_get_node_names();
+
+if(is_array($nameArray)){
+	if(count($nameArray)==0) {
+		$nameArray = -1;
+	}
+}
+
+if((gettype($nameArray)=="array") && ($nameArray != NULL)) {
+	echo "! slurm_get_node_names	:	SUCCESS";
+} else if($nameArray == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($nameArray == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($nameArray == -1) {
+	echo "[SLURM:ERROR] -1 : No nodes reside on your system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_node_names	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt
new file mode 100644
index 000000000..79826d996
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_basic.phpt
@@ -0,0 +1,41 @@
+--TEST--
+Test function slurm_get_node_state_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_state_by_name") or die("skip function slurm_get_node_state_by_name() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with faulty arguments ***\n";
+
+$state = slurm_get_node_state_by_name(5);
+if($state == -1) {
+	echo "[SLURM:ERROR] -1 : No node by that name was found on your system\n";
+} else if($state == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+	exit();
+}
+
+$state = slurm_get_node_state_by_name(NULL);
+if($state == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+} else if($state == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+}
+
+$state = slurm_get_node_state_by_name('');
+if($state == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+} else if($state == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt
new file mode 100644
index 000000000..f0b5f015b
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_state_by_name_error.phpt
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_get_node_state_by_name() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_state_by_name") or die("skip function slurm_get_node_state_by_name() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with faulty arguments ***\n";
+
+$state = slurm_get_node_state_by_name("ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c");
+if($state == -1) {
+	echo "[SLURM:ERROR] -1 : No node by that name was found on your system\n";
+} else if($state == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No node by that name was found on your system
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt
new file mode 100644
index 000000000..a5a70dac2
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_node_states_basic.phpt
@@ -0,0 +1,28 @@
+--TEST--
+Test function slurm_get_node_states() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_node_states") or die("skip function slurm_get_node_states() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with correct arguments ***\n";
+
+$state = slurm_get_node_states();
+
+if(is_array($state)) {
+	if(count($state)==0) {
+		echo "[SLURM:ERROR] -1 : No nodes found on your system\n";
+	} else {
+		echo "[SLURM:SUCCESS] : slurm_get_node_states() succesfully returned it's data";
+	}
+} else if($state == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";	
+}
+?>
+--EXPECT--
+*** Test by calling method or function with correct arguments ***
+[SLURM:SUCCESS] : slurm_get_node_states() succesfully returned it's data
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt
new file mode 100644
index 000000000..658f0a010
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_basic.phpt
@@ -0,0 +1,17 @@
+--TEST--
+Test function slurm_get_partition_node_names() by calling it with its expected arguments
+--FILE--
+<?php
+
+
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$ret = slurm_get_partition_node_names("debug");
+
+if ($ret)
+	echo "! slurm_get_partition_node_names ok";
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_get_partition_node_names ok
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt b/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt
new file mode 100644
index 000000000..ba3c51e59
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_partition_node_names_error.phpt
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_get_partition_node_names() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_partition_node_names") or die("skip function slurm_get_partition_node_names() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$part_info_arr = slurm_get_partition_node_names("ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c");
+if((gettype($part_info_arr)!="integer") && ($part_info_arr != NULL)) {
+	echo "! slurm_get_partition_node_names('ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c')	:	SUCCESS";
+} else if($part_info_arr == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($part_info_arr == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($part_info_arr == -1) {
+	echo "[SLURM:ERROR] -1 : No partition by that name was found on your system";
+} else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No partition by that name was found on your system
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt
new file mode 100644
index 000000000..9d94e9fc7
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_basic.phpt
@@ -0,0 +1,37 @@
+--TEST--
+Test function slurm_get_specific_partition_info() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_specific_partition_info") or die("skip function slurm_get_specific_partition_info unavailable");
+function_exists("slurm_print_partition_names") or die("skip function slurm_print_partition_names unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$arr = slurm_print_partition_names();
+if(is_array($arr)) {
+	if(count($arr)!=0) {
+		$partition = slurm_get_specific_partition_info($arr[0]);
+		if(is_array($partition)) {
+			echo "[SLURM:SUCCESS]";
+		} else if($partition == -3) {
+			echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+		} else if($partition == -2) {
+			echo "[SLURM:ERROR] -2 : Daemons not online";
+		} else if($partition == -1) {
+			echo "[SLURM:ERROR] -1 : No partition by that name was found on your system";
+		}
+	} else {
+		echo "! No partitions available !";
+	}
+} else {
+	echo "[SLURM:ERROR] CODE=".$arr."| while trying to retreive partition names";
+}
+?>
+--EXPECTF--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:SUCCESS]
diff --git a/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt b/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt
new file mode 100644
index 000000000..319de78f9
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_get_specific_partition_info_error.phpt
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_get_specific_partition_info() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_get_specific_partition_info") or die("skip function slurm_get_specific_partition_info() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$part_info_arr = slurm_get_specific_partition_info("ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c");
+if((gettype($part_info_arr)!="integer") && ($part_info_arr != NULL)) {
+	echo "! slurm_get_specific_partition_info('ez79e8ezaeze46aze1ze3aer8rz7r897azr798r64f654ff65ds4f56dsf4dc13xcx2wc1wc31c')	:	SUCCESS";
+} else if($part_info_arr == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($part_info_arr == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($part_info_arr == -1) {
+	echo "[SLURM:ERROR] -1 : No partition by that name was found on your system";
+} else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+[SLURM:ERROR] -1 : No partition by that name was found on your system
diff --git a/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt
new file mode 100644
index 000000000..b8b864f28
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_basic.phpt
@@ -0,0 +1,27 @@
+--TEST--
+Test function slurm_hostlist_to_array() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_hostlist_to_array") or die("skip function slurm_hostlist_to_array unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$hosts = "host[01-02],another-host02";
+var_dump(slurm_hostlist_to_array($hosts));
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+array(3) {
+  [0]=>
+  string(6) "host01"
+  [1]=>
+  string(6) "host02"
+  [2]=>
+  string(14) "another-host02"
+}
diff --git a/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt b/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt
new file mode 100644
index 000000000..229711604
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_hostlist_to_array_error.phpt
@@ -0,0 +1,24 @@
+--TEST--
+Test function slurm_hostlist_to_array() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_hostlist_to_array") or die("skip function slurm_hostlist_to_array unavailable");
+?>
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with incorrect numbers of arguments ***\n";
+
+$extra_arg = NULL;
+
+var_dump(slurm_hostlist_to_array( $extra_arg ) );
+
+// var_dump(slurm_hostlist_to_array(  ) );
+
+?>
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+int(-3)
diff --git a/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt
new file mode 100644
index 000000000..3b9ae7ed6
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_load_job_information_basic.phpt
@@ -0,0 +1,32 @@
+--TEST--
+Test function slurm_load_job_information() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_load_job_information") or die("skip function slurm_load_job_information unavailable");
+?>
+--FILE--
+<?php
+
+
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$jobArr = slurm_load_job_information();
+if((gettype($jobArr)=="array") && ($jobArr != NULL)) {
+	echo "! slurm_load_job_information	:	SUCCESS";
+} else if($jobArr == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($jobArr == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($jobArr == -1) {
+	echo "! slurm_load_job_information	:	SUCCESS";
+}  else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_load_job_information	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt
new file mode 100644
index 000000000..2614d8226
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_basic.phpt
@@ -0,0 +1,36 @@
+--TEST--
+Test function slurm_load_partition_jobs() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb.peterv@gmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_load_partition_jobs") or die("skip function slurm_load_partition_jobs() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with correct arguments ***\n";
+
+$arr = slurm_print_partition_names();
+if(is_array($arr)) {
+	if(count($arr)!=0) {
+		$partition = slurm_load_partition_jobs($arr[0]);
+		if(is_array($partition)) {
+			echo "[SLURM:SUCCESS]";
+		} else if($partition == -3) {
+			echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+		} else if($partition == -2) {
+			echo "[SLURM:ERROR] -2 : Daemons not online";
+		} else if($partition == -1) {
+			echo "[SLURM:SUCCESS]";
+		}
+	} else {
+		echo "! No partitions available !";
+	}
+} else {
+	echo "[SLURM:ERROR] CODE=".$arr."| while trying to retreive partition names";
+}
+?>
+--EXPECTF--
+*** Test by calling method or function with correct arguments ***
+[SLURM:SUCCESS]
diff --git a/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt b/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt
new file mode 100644
index 000000000..2f2b5f8cf
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_load_partition_jobs_error.phpt
@@ -0,0 +1,34 @@
+--TEST--
+Test function slurm_load_partition_jobs() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_load_partition_jobs") or die("skip function slurm_load_partition_jobs() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with faulty arguments ***\n";
+
+$node_info = slurm_load_partition_jobs(5);
+if($node_info == -1) {
+	echo "[SLURM:ERROR] -1 : No jobs where found for a partition by that name\n";
+}
+
+$node_info = slurm_load_partition_jobs(NULL);
+if($node_info == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+}
+
+$node_info = slurm_load_partition_jobs('');
+if($node_info == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on\n";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with faulty arguments ***
+[SLURM:ERROR] -1 : No jobs where found for a partition by that name
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
+[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on
diff --git a/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt
new file mode 100644
index 000000000..0f525f8fb
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_ping_basic.phpt
@@ -0,0 +1,14 @@
+--TEST--
+Test function slurm_ping() by calling it with its expected arguments
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_ping") or die("skip function slurm_ping unavailable");
+?>
+--FILE--
+<?php
+$value=slurm_ping();
+var_dump($value["Prim. Controller"]);
+?>
+--EXPECT--
+int(0)
diff --git a/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt b/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt
new file mode 100644
index 000000000..4561f69d2
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_ping_error.phpt
@@ -0,0 +1,35 @@
+--TEST--
+Test function slurm_ping() by calling it more than or less than its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_ping") or die("skip function slurm_ping unavailable");
+?>
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with incorrect numbers of arguments ***\n";
+
+$extra_arg = NULL;
+
+$ping = slurm_ping( $extra_arg );
+if ( $ping["Prim. Controller"] == 0 )
+	echo "! slurm_ping $ping == 0 ok\n";
+if ( $ping["Sec. Controller"] == -1 )
+	echo "! slurm_ping $ping == -1 ok\n";
+
+$ping = slurm_ping( );
+if ( $ping["Prim. Controller"] == 0 )
+	echo "! slurm_ping $ping == 0 ok\n";
+if ( $ping["Sec. Controller"] == -1 )
+	echo "! slurm_ping $ping == -1 ok\n";
+
+?>
+--EXPECTF--
+*** Test by calling method or function with incorrect numbers of arguments ***
+! slurm_ping Array == 0 ok
+! slurm_ping Array == -1 ok
+! slurm_ping Array == 0 ok
+! slurm_ping Array == -1 ok
diff --git a/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt
new file mode 100644
index 000000000..bfd843c1a
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_print_partition_names_basic.phpt
@@ -0,0 +1,33 @@
+--TEST--
+Test function slurm_print_partition_names() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_print_partition_names") or die("skip function slurm_print_partition_names unavailable");
+?>
+--FILE--
+<?php
+
+
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$nameArray = slurm_print_partition_names();
+if((gettype($nameArray)=="array") && ($nameArray != NULL)) {
+	echo "! slurm_print_partition_names	:	SUCCESS";
+} else if($nameArray == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($nameArray == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($nameArray == -1) {
+	echo "[SLURM:ERROR] -1 : No partitions were found on the system";
+}  else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_print_partition_names	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt
new file mode 100644
index 000000000..0c6744642
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_slurmd_status_basic.phpt
@@ -0,0 +1,26 @@
+--TEST--
+Test function slurm_slurmd_status() by calling it with its expected arguments
+--CREDIT--
+Peter Vermeulen <nmb_pv@hotmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_slurmd_status") or die("skip function slurm_slurmd_status() unavailable");
+?>
+--FILE--
+<?php
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$slurm_status = slurm_slurmd_status();
+if((gettype($slurm_status)!="integer") && ($slurm_status != NULL)) {
+	echo "! slurm_slurmd_status()	:	SUCCESS";
+} else if($slurm_status == -2) {
+	echo "[SLURM:ERROR] -2 : status couldn't be loaded, this is a sign that the daemons are offline"."\n\n"."Please put the slurmd and slurmctld daemons online";
+}  else {
+	echo "[SLURM:ERROR] -4 : ?Unknown?";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_slurmd_status()	:	SUCCESS
diff --git a/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt b/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt
new file mode 100644
index 000000000..c499e3a01
--- /dev/null
+++ b/contribs/phpext/slurm_php/tests/slurm_version_basic.phpt
@@ -0,0 +1,30 @@
+--TEST--
+Test function slurm_version() by calling it with its expected arguments
+--CREDIT--
+Jimmy Tang <jtang@tchpc.tcd.ie>
+Peter Vermeulen <nmb.peterv@gmail.com>
+--SKIPIF--
+<?php
+extension_loaded("slurm") or die("skip slurm extension not loaded\n");
+function_exists("slurm_version") or die("skip function slurm_version unavailable");
+?>
+--FILE--
+<?php
+
+echo "*** Test by calling method or function with its expected arguments ***\n";
+
+$ver = slurm_version(0);
+if((gettype($ver)=="integer") && ($ver != NULL) && ($ver>0)) {
+	echo "! slurm_version	:	SUCCESS";
+} else if($ver == -3) {
+	echo "[SLURM:ERROR] -3 : Faulty variables ( or no variables ) where passed on";
+} else if($ver == -2) {
+	echo "[SLURM:ERROR] -2 : Daemons not online";
+} else if($ver == -1) {
+	echo "[SLURM:ERROR] -1 : No version was found on the system";
+}
+
+?>
+--EXPECT--
+*** Test by calling method or function with its expected arguments ***
+! slurm_version	:	SUCCESS
diff --git a/contribs/sjobexit/Makefile.in b/contribs/sjobexit/Makefile.in
index 5d893af50..7bedf60f9 100644
--- a/contribs/sjobexit/Makefile.in
+++ b/contribs/sjobexit/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/sjobexit/sjobexitmod.pl b/contribs/sjobexit/sjobexitmod.pl
index cfd055bd5..5fb0d235b 100755
--- a/contribs/sjobexit/sjobexitmod.pl
+++ b/contribs/sjobexit/sjobexitmod.pl
@@ -36,7 +36,7 @@ my (
 #
 # Format for listing job.
 #
-my $list_format = "JobID,Account,NNodes,NodeList,State,ExitCode,DerivedExitCode,DerivedExitStr";
+my $list_format = "JobID,Account,NNodes,NodeList,State,ExitCode,DerivedExitCode,Comment";
 
 
 #
@@ -128,7 +128,7 @@ sub getoptions
 #
 	$execute_line = "sacctmgr -i modify job jobid=$jobid set";
 
-	$execute_line .= " DerivedExitStr=\"$reason\"" if ($reason);
+	$execute_line .= " Comment=\"$reason\""        if ($reason);
 	$execute_line .= " DerivedExitCode=$code"      if ($code);
 	$execute_line .= " Cluster=$cluster"           if ($cluster);
 
@@ -163,7 +163,7 @@ sub usage
         $base [-man]
 
 	-e <exit code>		Modify the derived exit code to new value.
-	-r <reason string>	Modify the derived exit string to new value.
+	-r <reason string>	Modify the job's comment field to new value.
 	-c <cluster>		Name of cluster (optional).
 	-l 			List information for a completed job.
 	-h 			Show usage.
@@ -215,11 +215,11 @@ B<sjobexitmod> - Modifies a completed job in the slurmdbd
  sjobexitmod is a wrapper which effectively does the same operation as using the
  sacct utility to modify certain aspects of a completed job.
 
-	sacctmgr -i modify job jobid=1286 set DerivedExitCode=1 DerivedExitStr="code error"
+	sacctmgr -i modify job jobid=1286 set DerivedExitCode=1 Comment="code error"
 
  or to list certain aspects of a completed job.
 
-	sacct -o jobid,derivedexitcode,derivedexitstr,cluster
+	sacct -o jobid,derivedexitcode,comment,cluster
 
 =head1 OPTIONS
 
@@ -247,7 +247,7 @@ List selected attributes of a completed job.
 
 =item B<-r> I<reason_string>
 
-The reason (DerivedEixtStr) for job termination.
+The reason (Comment) for job termination.
 
 =item B<JobId>
 
diff --git a/contribs/sjstat b/contribs/sjstat
index b4e3cfeb1..6fee05efb 100755
--- a/contribs/sjstat
+++ b/contribs/sjstat
@@ -9,36 +9,36 @@
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Phil Eckert <eckert21@llnl.gov>.
 #  CODE-OCEC-09-009. All rights reserved.
-#  
+#
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
-#  
+#
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
 #  Software Foundation; either version 2 of the License, or (at your option)
 #  any later version.
 #
-#  In addition, as a special exception, the copyright holders give permission 
+#  In addition, as a special exception, the copyright holders give permission
 #  to link the code of portions of this program with the OpenSSL library under
-#  certain conditions as described in each individual source file, and 
-#  distribute linked combinations including the two. You must obey the GNU 
-#  General Public License in all respects for all of the code used other than 
-#  OpenSSL. If you modify file(s) with this exception, you may extend this 
-#  exception to your version of the file(s), but you are not obligated to do 
+#  certain conditions as described in each individual source file, and
+#  distribute linked combinations including the two. You must obey the GNU
+#  General Public License in all respects for all of the code used other than
+#  OpenSSL. If you modify file(s) with this exception, you may extend this
+#  exception to your version of the file(s), but you are not obligated to do
 #  so. If you do not wish to do so, delete this exception statement from your
-#  version.  If you delete this exception statement from all source files in 
+#  version.  If you delete this exception statement from all source files in
 #  the program, then also delete it here.
-#  
+#
 #  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
 #  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 #  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 #  details.
-#  
+#
 #  You should have received a copy of the GNU General Public License along
 #  with SLURM; if not, write to the Free Software Foundation, Inc.,
 #  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-#  
+#
 #  Based off code with permission copyright 2006, 2007 Cluster Resources, Inc.
 ###############################################################################
 
@@ -69,6 +69,16 @@ use autouse 'Pod::Usage' => qw(pod2usage);
 	my ($help, $man, $pool, $running, $verbose);
 	my (%MaxNodes, %MaxTime);
 
+#
+#	Check SLURM status.
+#
+	isslurmup();
+
+#
+#	See if bluegene system.
+#
+	my $bglflag = 1  if (`scontrol show config | grep -i bluegene`);
+
 #
 #	Get user options.
 #
@@ -98,6 +108,25 @@ use autouse 'Pod::Usage' => qw(pod2usage);
 	exit;
 
 
+#
+# Do usable for bluegene
+#
+sub Usable
+{
+	my ($tot, $out) = @_;
+
+	$tot *= 1024.0 if ($tot =~ /K/);
+	$out *= 1024.0 if ($out =~ /K/);
+
+	my $usable =  $tot - $out;
+	if ($usable > 1024.0) {
+		$usable  /=  1024.0;
+		$usable .= 'K';
+	}
+
+	return($usable);
+}
+	
 #
 # Get the SLURM partitions information.
 #
@@ -109,7 +138,7 @@ sub do_sinfo
 #
 #	Get the partition and node info.
 #
-	my $options = "\"%9P %6m %.4c %.16F %f\"";
+	my $options = "\"%9P %6m %.4c %.22F %f\"";
 
 	my $ct = 0;
 	my @sin = `sinfo -e -o $options`;
@@ -128,11 +157,13 @@ sub do_sinfo
 			$s_idle[$ct]   = $fields[1];
 			$s_out[$ct]    = $fields[2];
 			$s_total[$ct]  = $fields[3];
+
+		if ($bglflag) {
+			$s_usable[$ct] = Usable($s_total[$ct], $s_out[$ct]);
+		} else {
 			$s_usable[$ct] = $s_total[$ct] - $s_out[$ct];
-#
-#			Handle "k" factor for Blue Gene.
-#
-			$s_usable[$ct] .= 'K' if ($s_total[$ct] =~ /K/);
+		}
+
 		$s_feat[$ct] = ($line[4] .= " ");
 		$s_feat[$ct] =~ s/\(null\)//g;
 		$ct++;
@@ -140,10 +171,10 @@ sub do_sinfo
 
 	printf("\nScheduling pool data:\n");
 	if ($verbose) {
-		printf("------------------------------------------------------------------------------\n");
-		printf("                           Total  Usable   Free   Node   Time  Other          \n");
-		printf("Pool         Memory  Cpus  Nodes   Nodes  Nodes  Limit  Limit  traits         \n");
-		printf("------------------------------------------------------------------------------\n");
+		printf("----------------------------------------------------------------------------------\n");
+		printf("                           Total  Usable   Free   Node   Time      Other          \n");
+		printf("Pool         Memory  Cpus  Nodes   Nodes  Nodes  Limit  Limit      traits         \n");
+		printf("----------------------------------------------------------------------------------\n");
 	} else {
 		printf("-------------------------------------------------------------\n");
 		printf("Pool        Memory  Cpus  Total Usable   Free  Other Traits  \n");
@@ -154,17 +185,15 @@ sub do_sinfo
 		if ($verbose) {
 			my $p = $s_part[$i];
 			$p =~ s/\*//;
-			printf("%-9s  %6dMb %5d %6s %7s %6s %6s %6s  %-s\n",
+			printf("%-9s  %6dMb %5s %6s %7s %6s %6s %10s  %-s\n",
 				$s_part[$i], $s_mem[$i], $s_cpu[$i],
-				$s_total[$i],
-				$s_total[$i] - $s_out[$i], 
+				$s_total[$i], $s_usable[$i], 
 				$s_idle[$i], $MaxNodes{$p},
 				$MaxTime{$p}, $s_feat[$i]);
 		} else {
-			printf("%-9s %6dMb %5d %6s %6s %6s  %-s\n",
+			printf("%-9s %6dMb %5s %6s %6s %6s  %-s\n",
 				$s_part[$i], $s_mem[$i], $s_cpu[$i],
-				$s_total[$i],
-				$s_total[$i] - $s_out[$i], 
+				$s_total[$i], $s_usable[$i],
 				$s_idle[$i], $s_feat[$i]);
 		}
 	}
@@ -189,10 +218,10 @@ sub do_squeue
 	my $rval = system("scontrol show config | grep cons_res >> /dev/null");
 	if ($rval) {
         	$type = "Nodes";
-		$options =  "\"%8i  %8u %.6D %2t %.11S %.12l  %.9P %.11M  %1000R\"";
+		$options =  "\"%8i  %8u %.6D %2t %S %.12l  %.9P %.11M  %1000R\"";
 	} else {
         	$type = "Procs"; 
-		$options =  "\"%8i  %8u %.6C %2t %.11S %.12l  %.9P %.11M  %1000R\"";
+		$options =  "\"%8i  %8u %.6C %2t %S %.12l  %.9P %.11M  %1000R\"";
 	}
 
 #
@@ -211,6 +240,8 @@ sub do_squeue
 		$s_user[$ct]   = $line[1];
 		$s_nodes[$ct]  = $line[2];
 		$s_status[$ct] = $line[3];
+		$line[4] =~ s/^.....//;
+		$line[4] = "N/A" if ($line[3] =~ /PD/);
 		$s_begin[$ct]  = $line[4];
 		$s_limit[$ct]  = $line[5];
 		if ($line[5] eq "UNLIMITED") {
@@ -233,10 +264,10 @@ sub do_squeue
 	printf("Running job data:\n");
 
 	if ($verbose) {
-		printf("------------------------------------------------------------------------------------------------\n");
-		printf("                                                 Time        Time         Time                  \n");
-		printf("JobID    User      $type Pool      Status        Used       Limit      Started  Master/Other    \n");
-		printf("------------------------------------------------------------------------------------------------\n");
+		printf("---------------------------------------------------------------------------------------------------\n");
+		printf("                                                 Time        Time            Time                  \n");
+		printf("JobID    User      $type Pool      Status        Used       Limit         Started  Master/Other    \n");
+		printf("---------------------------------------------------------------------------------------------------\n");
 	} else {
 		printf("----------------------------------------------------------------------\n");
 		printf("JobID    User      $type Pool      Status        Used  Master/Other   \n");
@@ -245,7 +276,7 @@ sub do_squeue
 
 	for (my $i = 0; $i < $ct; $i++) {
 		if ($verbose) {
-			printf("%-8s %-8s %6s %-9s %-7s %10s %11s  %11s  %.12s\n",
+			printf("%-8s %-8s %6s %-9s %-7s %10s %11s  %14s  %.12s\n",
 				$s_job[$i], $s_user[$i], $s_nodes[$i],
 				$s_pool[$i], $s_status[$i],
 				$s_used[$i], $s_limit[$i], $s_begin[$i],
@@ -277,13 +308,12 @@ sub do_scontrol_part
 	foreach my $tmp (@scon) {
 		chomp $tmp;
 		my @line = split(' ',$tmp);
-		($part) = ($tmp =~ m/PartitionName=(\S+)\s+/) if ($tmp =~ /PartitionName=/);
+		($part) = ($tmp =~ m/PartitionName=(\S+)/) if ($tmp =~ /PartitionName=/);
 
 		($MaxTime{$part})  = ($tmp =~ m/MaxTime=(\S+)\s+/)  if ($tmp =~ /MaxTime=/);
 		($MaxNodes{$part}) = ($tmp =~ m/MaxNodes=(\S+)\s+/) if ($tmp =~ /MaxNodes=/);
 		$MaxTime{$part}  =~ s/UNLIMITED/UNLIM/ if ($MaxTime{$part});
 		$MaxNodes{$part} =~ s/UNLIMITED/UNLIM/ if ($MaxNodes{$part}); 
-
 	}
 
 	return;
@@ -380,12 +410,26 @@ sub usage
 }
 
 
+#
+# Determine if SLURM is available.
+#
+sub isslurmup
+{
+	my $out = `scontrol show part 2>&1`;
+	if ($?) {
+		printf("\n SLURM is not communicating.\n\n");
+		exit(1);
+	}
+
+	return;
+}
+
 
 __END__
 
 =head1 NAME
 
-B<sjstat> - List attributes of jobs under SLURM control
+B<sjstat> - List attributes of jobs under the SLURM control
 
 =head1 SYNOPSIS
 
@@ -393,7 +437,7 @@ B<sjstat> [B<-h> ] [B<-c>] [B<-r> ] [B<-v>]
 
 =head1 DESCRIPTION
 
-The B<sjstat> command is used to display statistics of jobs under control of SLURM. 
+The B<sjstat> command is used to display statistics of jobs under control of SLURM.
 The output is designed to give information on the resource usage and availablilty,
 as well as information about jobs that are currently active on the machine. This output
 is built using the SLURM utilities, sinfo, squeue and scontrol, the man pages for these
@@ -462,7 +506,7 @@ The following is a basic request for status.
 
      The Running job data contains information pertaining to the:
 
- 	JobID		either the SLURM job id
+ 	JobID		the SLURM job id
  	User		owner of the job
  	Nodes		nodes required, or in use by the job
 			(Note: On cpu scheduled machines, this field
@@ -495,10 +539,11 @@ The following is a basic request for status.
      pbatch*     15000Mb     8   1072    1070    174  UNLIM  UNLIM  (null)
 
      Running job data:
-     ----------------------------------------------------------------------------------------------
-     JobID    User      Nodes  Pool        Status        Used        Limit        Start  Master/Other
-     ----------------------------------------------------------------------------------------------
-     395      sam         200  pbatch      PD            0:00        30:00          N/A  (JobHeld)
+     ---------------------------------------------------------------------------------------------------
+                                                      Time        Time            Time                  
+     JobID    User      Nodes Pool      Status        Used       Limit         Started  Master/Other    
+     ---------------------------------------------------------------------------------------------------
+     38562    tom           4 pbatch    PD            0:00     1:00:00  01-14T18:11:22  (JobHeld)
 
      The added fields to the "Scheduling pool data" are:
 
@@ -510,11 +555,8 @@ The following is a basic request for status.
  	Limit		Time limit of job.
  	Start		Start time of job.
 
-=head1 AUTHOR
-
-Written by Philip D. Eckert
-
 =head1 REPORTING BUGS
 
 Report bugs to <eckert2@llnl.gov>
 
+=cut
diff --git a/contribs/slurmdb-direct/Makefile.in b/contribs/slurmdb-direct/Makefile.in
index e93edb9c1..5a97f4599 100644
--- a/contribs/slurmdb-direct/Makefile.in
+++ b/contribs/slurmdb-direct/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/time_login.c b/contribs/time_login.c
index 4c5321793..b63c86e8a 100644
--- a/contribs/time_login.c
+++ b/contribs/time_login.c
@@ -17,7 +17,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/Makefile.in b/contribs/torque/Makefile.in
index 688a626c5..c1e2ba77d 100644
--- a/contribs/torque/Makefile.in
+++ b/contribs/torque/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -118,7 +120,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -155,6 +160,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -212,6 +218,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -247,6 +254,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/contribs/torque/mpiexec.pl b/contribs/torque/mpiexec.pl
index 9724a5316..9f6791b53 100755
--- a/contribs/torque/mpiexec.pl
+++ b/contribs/torque/mpiexec.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/pbsnodes.pl b/contribs/torque/pbsnodes.pl
index 7c958a265..98b242cbc 100755
--- a/contribs/torque/pbsnodes.pl
+++ b/contribs/torque/pbsnodes.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/qdel.pl b/contribs/torque/qdel.pl
index 27167d839..410ac48e1 100755
--- a/contribs/torque/qdel.pl
+++ b/contribs/torque/qdel.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/qhold.pl b/contribs/torque/qhold.pl
index 41c61dc94..4edb55edf 100755
--- a/contribs/torque/qhold.pl
+++ b/contribs/torque/qhold.pl
@@ -12,7 +12,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/qrls.pl b/contribs/torque/qrls.pl
index 851df7013..9f2e32780 100755
--- a/contribs/torque/qrls.pl
+++ b/contribs/torque/qrls.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/qstat.pl b/contribs/torque/qstat.pl
index 2a62d86f0..9c1c4733e 100755
--- a/contribs/torque/qstat.pl
+++ b/contribs/torque/qstat.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/contribs/torque/qsub.pl b/contribs/torque/qsub.pl
index 6aa8ad92b..366b3928a 100755
--- a/contribs/torque/qsub.pl
+++ b/contribs/torque/qsub.pl
@@ -11,7 +11,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
@@ -186,7 +186,11 @@ if($res_opts{walltime}) {
 $command .= " --tmp=$res_opts{file}" if $res_opts{file};
 $command .= " --mem=$res_opts{mem}" if $res_opts{mem};
 $command .= " --nice=$res_opts{nice}" if $res_opts{nice};
-
+# Cray-specific options
+$command .= " -n$res_opts{mppwidth}"		    if $res_opts{mppwidth};
+$command .= " -w$res_opts{mppnodes}"		    if $res_opts{mppnodes};
+$command .= " --cpus-per-task=$res_opts{mppdepth}"  if $res_opts{mppdepth};
+$command .= " --ntasks-per-node=$res_opts{mppnppn}" if $res_opts{mppnppn};
 
 $command .= " --begin=$start_time" if $start_time;
 $command .= " --account=$account" if $account;
@@ -225,6 +229,12 @@ sub parse_resource_list {
 		   'pvmem' => "",
 		   'software' => "",
 		   'vmem' => "",
+		   # Cray-specific resources
+		   'mppwidth' => "",
+		   'mppdepth' => "",
+		   'mppnppn' => "",
+		   'mppmem' => "",
+		   'mppnodes' => "",
 		   'walltime' => ""
 		   );
 	my @keys = keys(%opt);
@@ -238,7 +248,9 @@ sub parse_resource_list {
 		$opt{cput} = get_minutes($opt{cput});
 	}
 
-	if($opt{mem}) {
+	if($opt{mppmem}) {
+		$opt{mem} = convert_mb_format($opt{mppmem});
+	} elsif($opt{mem}) {
 		$opt{mem} = convert_mb_format($opt{mem});
 	}
 
diff --git a/contribs/web_apps/chart_stats.cgi b/contribs/web_apps/chart_stats.cgi
index 496e0c87a..22ffbf99f 100755
--- a/contribs/web_apps/chart_stats.cgi
+++ b/contribs/web_apps/chart_stats.cgi
@@ -15,7 +15,7 @@
 #  CODE-OCEC-09-009. All rights reserved.
 #
 #  This file is part of SLURM, a resource management program.
-#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  For details, see <http://www.schedmd.com/slurmdocs/>.
 #  Please also read the included file: DISCLAIMER.
 #
 #  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/doc/Makefile.in b/doc/Makefile.in
index 3b257d907..91ae1e510 100644
--- a/doc/Makefile.in
+++ b/doc/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am
index 8128ed38f..1cbc421ea 100644
--- a/doc/html/Makefile.am
+++ b/doc/html/Makefile.am
@@ -1,7 +1,45 @@
 
 htmldir = ${prefix}/share/doc/@PACKAGE@-@VERSION@/html
 
+if HAVE_MAN2HTML
+man_html = \
+	../man/man1/sacct.html \
+	../man/man1/sacctmgr.html \
+	../man/man1/salloc.html \
+	../man/man1/sattach.html \
+	../man/man1/sbatch.html \
+	../man/man1/sbcast.html \
+	../man/man1/scancel.html \
+	../man/man1/scontrol.html \
+	../man/man1/sinfo.html \
+	../man/man1/smap.html \
+	../man/man1/sprio.html \
+	../man/man1/squeue.html \
+	../man/man1/sreport.html \
+	../man/man1/srun.html \
+	../man/man1/srun_cr.html \
+	../man/man1/sshare.html \
+	../man/man1/sstat.html \
+	../man/man1/strigger.html \
+	../man/man1/sview.html \
+	../man/man5/bluegene.conf.html \
+	../man/man5/cgroup.conf.html \
+	../man/man5/gres.conf.html \
+	../man/man5/slurm.conf.html \
+	../man/man5/slurmdbd.conf.html \
+	../man/man5/topology.conf.html \
+	../man/man5/wiki.conf.html \
+	../man/man8/slurmctld.html \
+	../man/man8/slurmd.html \
+	../man/man8/slurmdbd.html \
+	../man/man8/slurmstepd.html \
+	../man/man8/spank.html
+else
+	man_html =
+endif
+
 generated_html = \
+	${man_html} \
 	accounting.html \
 	accounting_storageplugins.html \
 	api.html \
@@ -12,25 +50,31 @@ generated_html = \
 	checkpoint_plugins.html \
 	cons_res.html \
 	cons_res_share.html \
+	cpu_management.html \
 	cray.html \
 	crypto_plugins.html \
+	disclaimer.html \
 	dist_plane.html \
 	documentation.html \
 	download.html \
 	faq.html \
 	gang_scheduling.html \
 	gres.html \
+	gres_design.html \
 	gres_plugins.html \
 	help.html \
 	high_throughput.html \
 	ibm.html \
 	jobacct_gatherplugins.html \
 	job_exit_code.html \
+	job_launch.html \
 	job_submit_plugins.html \
 	jobcompplugins.html \
 	mail.html \
+	man_index.html \
 	maui.html \
 	mc_support.html \
+	meetings.html \
 	moab.html \
 	mpi_guide.html \
 	mpiplugins.html \
@@ -53,8 +97,12 @@ generated_html = \
 	reservations.html \
 	resource_limits.html \
 	schedplugins.html \
+	select_design.html \
 	selectplugins.html \
 	slurm.html \
+	slurm_ug_agenda.html \
+	slurm_ug_cfp.html \
+	slurm_ug_registration.html \
 	sun_const.html \
 	switchplugins.html \
 	taskplugins.html \
@@ -68,10 +116,11 @@ html_DATA = \
 	${generated_html} \
 	allocation_pies.gif \
 	arch.gif \
-	configurator.html \
 	coding_style.pdf \
+	configurator.html \
 	entities.gif \
 	example_usage.gif \
+	linuxstyles.css \
 	lll.gif \
 	mc_support.gif \
 	plane_ex1.gif \
@@ -87,7 +136,6 @@ html_DATA = \
 	sponsors.gif \
 	topo_ex1.gif \
 	topo_ex2.gif \
-	linuxstyles.css \
 	usage_pies.gif
 
 MOSTLYCLEANFILES = ${generated_html}
@@ -98,3 +146,12 @@ SUFFIXES = .html
 
 .shtml.html:
 	`dirname $<`/shtml2html.py $<
+
+if HAVE_MAN2HTML
+.1.html:
+	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+.5.html:
+	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+.8.html:
+	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+endif
diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in
index 8b8d75c88..9cd84b348 100644
--- a/doc/html/Makefile.in
+++ b/doc/html/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -116,7 +118,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -153,6 +158,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -210,6 +216,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -245,6 +252,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -297,7 +305,41 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
+@HAVE_MAN2HTML_TRUE@man_html = \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sacct.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sacctmgr.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/salloc.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sattach.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sbatch.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sbcast.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/scancel.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/scontrol.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sinfo.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/smap.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sprio.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/squeue.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sreport.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/srun.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/srun_cr.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sshare.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sstat.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/strigger.html \
+@HAVE_MAN2HTML_TRUE@	../man/man1/sview.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/bluegene.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/cgroup.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/gres.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/slurm.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/slurmdbd.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/topology.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man5/wiki.conf.html \
+@HAVE_MAN2HTML_TRUE@	../man/man8/slurmctld.html \
+@HAVE_MAN2HTML_TRUE@	../man/man8/slurmd.html \
+@HAVE_MAN2HTML_TRUE@	../man/man8/slurmdbd.html \
+@HAVE_MAN2HTML_TRUE@	../man/man8/slurmstepd.html \
+@HAVE_MAN2HTML_TRUE@	../man/man8/spank.html
+
 generated_html = \
+	${man_html} \
 	accounting.html \
 	accounting_storageplugins.html \
 	api.html \
@@ -308,25 +350,31 @@ generated_html = \
 	checkpoint_plugins.html \
 	cons_res.html \
 	cons_res_share.html \
+	cpu_management.html \
 	cray.html \
 	crypto_plugins.html \
+	disclaimer.html \
 	dist_plane.html \
 	documentation.html \
 	download.html \
 	faq.html \
 	gang_scheduling.html \
 	gres.html \
+	gres_design.html \
 	gres_plugins.html \
 	help.html \
 	high_throughput.html \
 	ibm.html \
 	jobacct_gatherplugins.html \
 	job_exit_code.html \
+	job_launch.html \
 	job_submit_plugins.html \
 	jobcompplugins.html \
 	mail.html \
+	man_index.html \
 	maui.html \
 	mc_support.html \
+	meetings.html \
 	moab.html \
 	mpi_guide.html \
 	mpiplugins.html \
@@ -349,8 +397,12 @@ generated_html = \
 	reservations.html \
 	resource_limits.html \
 	schedplugins.html \
+	select_design.html \
 	selectplugins.html \
 	slurm.html \
+	slurm_ug_agenda.html \
+	slurm_ug_cfp.html \
+	slurm_ug_registration.html \
 	sun_const.html \
 	switchplugins.html \
 	taskplugins.html \
@@ -364,10 +416,11 @@ html_DATA = \
 	${generated_html} \
 	allocation_pies.gif \
 	arch.gif \
-	configurator.html \
 	coding_style.pdf \
+	configurator.html \
 	entities.gif \
 	example_usage.gif \
+	linuxstyles.css \
 	lll.gif \
 	mc_support.gif \
 	plane_ex1.gif \
@@ -383,7 +436,6 @@ html_DATA = \
 	sponsors.gif \
 	topo_ex1.gif \
 	topo_ex2.gif \
-	linuxstyles.css \
 	usage_pies.gif
 
 MOSTLYCLEANFILES = ${generated_html}
@@ -392,7 +444,7 @@ SUFFIXES = .html
 all: all-am
 
 .SUFFIXES:
-.SUFFIXES: .html .shtml
+.SUFFIXES: .html .1 .5 .8 .shtml
 $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
 	@for dep in $?; do \
 	  case '$(am__configure_deps)' in \
@@ -601,10 +653,18 @@ uninstall-am: uninstall-htmlDATA
 	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
 	uninstall uninstall-am uninstall-htmlDATA
 
+@HAVE_MAN2HTML_FALSE@	man_html =
 
 .shtml.html:
 	`dirname $<`/shtml2html.py $<
 
+@HAVE_MAN2HTML_TRUE@.1.html:
+@HAVE_MAN2HTML_TRUE@	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+@HAVE_MAN2HTML_TRUE@.5.html:
+@HAVE_MAN2HTML_TRUE@	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+@HAVE_MAN2HTML_TRUE@.8.html:
+@HAVE_MAN2HTML_TRUE@	`dirname $<`/../man2html.py $(srcdir)/header.txt $(srcdir)/footer.txt $<
+
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
 .NOEXPORT:
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index 887f4c2d0..3391be7e5 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -1,10 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1>Accounting</h1>
-
-<p>NOTE: This documents accounting features available in SLURM version
-1.3, which are far more extensive than those available in previous
-releases.</p>
+<h1>Accounting and Resource Limits</h1>
 
 <p>SLURM can be configured to collect accounting information for every
 job and job step executed.
@@ -386,7 +382,7 @@ make sure the StorageUser is given permissions in MySQL to do so.
 As the <i>mysql</i> user grant privileges to that user using a
 command such as:</p>
 
-<p>GRANT ALL ON StorageLoc.* TO 'StorageUser'@'StorageHost';
+<p>GRANT ALL ON StorageLoc.* TO 'StorageUser'@'StorageHost';<br>
 (The ticks are needed)</p>
 
 <p>(You need to be root to do this. Also in the info for password
@@ -394,7 +390,7 @@ usage there is a line that starts with '->'. This a continuation
 prompt since the previous mysql statement did not end with a ';'. It
 assumes that you wish to input more info.)</p>
 
-<p>live example:</p>
+<p>Live example:</p>
 
 <pre>
 mysql@snowflake:~$ mysql
@@ -664,8 +660,10 @@ If a user has a limit set SLURM will read in those,
 if not we will refer to the account associated with the job.
 If the account doesn't have the limit set we will refer to
 the cluster's limits.
-If the cluster doesn't have the limit set no limit will be enforced.
-<p>All of the above entities can include limits as described below...
+If the cluster doesn't have the limit set no limit will be enforced.</p>
+
+<p>All of the above entities can include limits as described below and
+in the <a href="resource_limits.html">Resource Limits</a> document.</p>
 
 <ul>
 
@@ -673,7 +671,6 @@ If the cluster doesn't have the limit set no limit will be enforced.
   Essentially this is the amount of claim this association and it's
   children have to the above system. Can also be the string "parent",
   this means that the parent association is used for fairshare.
-  </li>
 </li>
 
 <li><b>GrpCPUMins=</b> A hard limit of cpu minutes to be used by jobs
@@ -780,7 +777,7 @@ as deleted.
 If an entity has existed for less than 1 day, the entity will be removed
 completely. This is meant to clean up after typographic errors.</p>
 
-<p style="text-align: center;">Last modified 27 January 2010</p>
+<p style="text-align: center;">Last modified 10 June 2010</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
index 1a6d17975..296451b82 100644
--- a/doc/html/accounting_storageplugins.shtml
+++ b/doc/html/accounting_storageplugins.shtml
@@ -303,7 +303,7 @@ modified on success, or<br>
 List acct_storage_p_modify_job(void *db_conn, uint32_t uid,
 acct_job_modify_cond_t *job_cond, acct_job_rec_t *job)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-Used to modify two fields (the derived exit code and derived exit string) of an
+Used to modify two fields (the derived exit code and the comment string) of an
 existing job in the storage type.  Can only modify one job at a time.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
@@ -784,6 +784,24 @@ running on the host is grabbed from the connection.<br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
+<p class="commandline">
+int clusteracct_storage_p_fini_ctld(void *db_conn, char *ip,
+uint16_t port, char *cluster_nodes)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Used when a controller is turned off to tell the storage type the
+  slurmctld has gone away.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type.<br>
+<span class="commandline">ip</span> (input) ip of connected slurmctld.<br>
+<span class="commandline">port</span> (input) port on host cluster is
+running on the host is grabbed from the connection.<br>
+<span class="commandline">cluster_nodes</span> (input) name of all
+nodes currently on the cluster.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
 <p class="commandline">
 int jobacct_storage_p_job_start(void *db_conn, struct job_record *job_ptr)
 <p style="margin-left:.2in"><b>Description</b>:<br>
diff --git a/doc/html/big_sys.shtml b/doc/html/big_sys.shtml
index a33f7d5ea..d4866ebe2 100644
--- a/doc/html/big_sys.shtml
+++ b/doc/html/big_sys.shtml
@@ -72,7 +72,7 @@ on data transmission.</p>
 on each compute node and use it for scheduling purposes, this entails
 extra overhead.
 Optimize performance by specifying the expected configuration using
-the available parameters (<i>RealMemory</i>, <i>Procs</i>, and
+the available parameters (<i>RealMemory</i>, <i>CPUs</i>, and
 <i>TmpDisk</i>).
 If the node is found to contain less resources than configured,
 it will be marked DOWN and not used.
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index d9e7d8252..4ac6fbf28 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -6,40 +6,45 @@
 
 <p>This document describes the unique features of SLURM on the
 <a href="http://www.research.ibm.com/bluegene/">IBM BlueGene</a> systems.
-You should be familiar with the SLURM's mode of operation on Linux clusters
+You should be familiar with SLURM's mode of operation on Linux clusters
 before studying the relatively few differences in BlueGene operation
 described in this document.</p>
 
 <p>BlueGene systems have several unique features making for a few
 differences in how SLURM operates there.
-The BlueGene system consists of one or more <i>base partitions</i> or
-<i>midplanes</i> connected in a three-dimensional torus.
-Each <i>base partition</i> consists of 512 <i>c-nodes</i> each containing two processors;
-one designed primarily for computations and the other primarily for managing communications.
-The <i>c-nodes</i> can execute only one process and thus are unable to execute both
-the user's jobs and SLURM's <i>slurmd</i> daemon.
-Thus the <i>slurmd</i> daemon executes on one of the BlueGene <i>Front End Nodes</i>.
-This single <i>slurmd</i> daemon provides (almost) all of the normal SLURM services
+BlueGene systems consists of one or more <i>base partitions</i> or
+<i>midplanes</i> connected in a three-dimensional (BlueGene/L and BlueGene/P
+systems) or five-dimensional (BlueGene/Q) torus.
+Each <i>base partition</i> typically includes 512 <i>c-nodes</i> or compute
+nodes each containing two or more cores;
+one core is typically designed primarily for managing communications while the
+other cores are used primarily for computations.
+Each <i>c-node</i> can execute only one process and thus are unable to execute
+both the user's application plus SLURM's <i>slurmd</i> daemon.
+Thus the <i>slurmd</i> daemon(s) executes on one or more of the BlueGene
+<i>Front End Nodes</i>.
+The <i>slurmd</i> daemons provide (almost) all of the normal SLURM services
 for every <i>base partition</i> on the system. </p>
 
 <p>Internally SLURM treats each <i>base partition</i> as one node with
-1024 processors, which keeps the number of entities being managed reasonable.
+a processor count equal to the number of cores on the base partition, which
+keeps the number of entities being managed by SLURM more reasonable.
 Since the current BlueGene software can sub-allocate a <i>base partition</i>
-into blocks of 32 and/or 128 <i>c-nodes</i>, more than one user job can execute
-on each <i>base partition</i> (subject to system administrator configuration).
+into smaller blocks, more than one user job can execute on each <i>base
+partition</i> (subject to system administrator configuration). In the case of
+BlueGene/Q systems, more than one user job can also execute in each block.
 To effectively utilize this environment, SLURM tools present the user with
-the view that each <i>c-nodes</i> is a separate node, so allocation requests
-and status information use <i>c-node</i> counts (this is a new feature in
-SLURM version 1.1).
+the view that each <i>c-node</i> is a separate node, so allocation requests
+and status information use <i>c-node</i> counts.
 Since the <i>c-node</i> count can be very large, the suffix "k" can be used
-to represent multiples of 1024 (e.g. "2k" is equivalent to "2048").</p>
+to represent multiples of 1024 or "m" for multiples of 1,048,576 (1024 x 1024).
+For example, "2k" is equivalent to "2048".</p>
 
 <h2>User Tools</h2>
 
-<p>The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and scontrol
-provide all of the expected services except support for job steps.
-SLURM performs resource allocation for the job, but initiation of tasks is performed
-using the <i>mpirun</i> command. SLURM has no concept of a job step on BlueGene.
+<p>The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and
+scontrol provide all of the expected services except support for job steps,
+which is detailed later.
 Seven new sbatch options are available:
 <i>--geometry</i> (specify job size in each dimension),
 <i>--no-rotate</i> (disable rotation of geometry),
@@ -52,25 +57,25 @@ Seven new sbatch options are available:
 <i>--ramdisk-image</i> (specify alternative ramdisk image for bluegene block.  Default if not set, BGL only.)
 The <i>--nodes</i> option with a minimum and (optionally) maximum node count continues
 to be available.
-
 Note that this is a c-node count.</p>
 
-<p>To reiterate: sbatch is used to submit a job script,
-but mpirun is used to launch the parallel tasks.
-Note that a SLURM batch job's default stdout and stderr file names are generated
-using the SLURM job ID.
-When the SLURM control daemon is restarted, SLURM job ID values can be repeated,
-therefore it is recommended that batch jobs explicitly specify unique names for
-stdout and stderr files using the srun options <i>--output</i> and <i>--error</i>
-respectively.
-While the salloc command may be used to create an interactive SLURM job,
-it will be the responsibility of the user to insure that the <i>bgblock</i>
-is ready for use before initiating any mpirun commands.
-SLURM will assume this responsibility for batch jobs.
-The script that you submit to SLURM can contain multiple invocations of mpirun as
-well as any desired commands for pre- and post-processing.
+<h3>Task Launch on BlueGene/Q only</h3>
+
+<p>Use SLURM's srun command to launch tasks (srun is a wrapper for IBM's
+<i>runjob</i> command.
+SLURM job step information including accounting functions as expected.</p>
+
+<h3>Task Launch on BlueGene/L and BlueGene/P only</h3>
+
+<p>SLURM performs resource allocation for the job, but initiation of tasks is
+performed using the <i>mpirun</i> command. SLURM has no concept of a job step
+on BlueGene/L or BlueGene/P systems.
+To reiterate: salloc or sbatch are used to create a job allocation, but
+<i>mpirun</i> is used to launch the parallel tasks.
+The script that you submit to SLURM can contain multiple invocations of mpirun
+as well as any desired commands for pre- and post-processing.
 The mpirun command will get its <i>bgblock</i> information from the
-<i>MPIRUN_PARTITION</i> as set by SLURM. A sample script is shown below.
+<i>MPIRUN_PARTITION</i> as set by SLURM. A sample script is shown below.</p>
 <pre>
 #!/bin/bash
 # pre-processing
@@ -80,51 +85,50 @@ mpirun -exec /home/user/prog -cwd /home/user -args 123
 mpirun -exec /home/user/prog -cwd /home/user -args 124
 # post-processing
 date
-</pre></p>
-
-<h3><a name="naming">Naming Convensions</a></h3>
-<p>The naming of base partitions includes a three-digit suffix representing the its
-coordinates in the X, Y and Z dimensions with a zero origin.
-For example, "bg012" represents the base partition whose coordinate is at X=0, Y=1 and Z=2.  In a system
-configured with <i>small blocks</i> (any block less than a full base partition) there will be divisions
-into the base partition notation.  For example, if there were 64 psets in the
-configuration, bg012[0-15] represents
-the first quarter or first 16 ionodes of a midplane.  In BlueGene/L
-this would be 128 c-node block.  To represent the first nodecard in the
-second quarter or ionodes 16-19 the notation would be bg012[16-19], or
-a 32 c-node block.
-Since jobs must allocate consecutive base partitions in all three dimensions, we have developed
-an abbreviated format for describing the base partitions in one of these three-dimensional blocks.
-The base partition has a prefix determined from the system which is followed by the end-points
-of the block enclosed in square-brackets and separated by an "x".
-For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block
-with end-points and bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721,
-bg730 and bg731).</p></a>
-
-<p>
-<b>IMPORTANT:</b> SLURM version 1.2 or higher can handle a bluegene system of
-sizes up to 36x36x36.  To try to keep with the 'three-digit suffix
-representing the its coordinates in the X, Y and Z dimensions with a
-zero origin', we now support A-Z as valid numbers.  This makes it so
-the prefix <b>must always be lower case</b>, and any letters in the
-three-digit suffix <b> must always be upper case</b>.  This schema
-should be used in your slurm.conf file and in your bluegene.conf file
-if you put a prefix there even though it is not necessary there.  This
-schema should also be used to specify midplanes or locations in
-configure mode of smap.
+</pre>
 
+<h3><a name="naming">Naming Conventions</a></h3>
+<p>The naming of base partitions includes a numeric suffix representing the its
+coordinates with a zero origin. The suffix contains three digits on BlueGene/L
+and BlueGene/P systems, while four digits are required for the BlueGene/Q
+systems. For example, "bgp012" represents the base partition whose coordinate
+is at X=0, Y=1 and Z=2.
+SLURM uses an abbreviated format for describing base partitions in which the
+end-points of the block enclosed are in square-brackets and separated by an "x".
+For example, "bgp[620x731]" is used to represent the eight base partitions
+enclosed in a block with end-points and bgp620 and bgp731 (bgp620, bgp621,
+bgp630, bgp631, bgp720, bgp721, bgp730 and bgp731).</p>
+
+<p><b>IMPORTANT:</b> SLURM higher can support up to 36 elements in each
+BlueGene dimension by supporting "A-Z" as valid numbers. SLURM requires the
+prefix to be lower case and any letters in the suffix must always be upper
+case. This schema must be used in both the slurm.conf and bluegene.conf
+configuration files when specifying midplane/node names (the prefix is
+optional). This schema should also be used to specify midplanes or locations
+in configure mode of smap:
 <br>
-valid: bgl[000xC44] bgl000 bglZZZ
+valid: bgl[000xC44], bgl000, bglZZZ
 <br>
-invalid: BGL[000xC44] BglC00 bglb00 Bglzzz
+invalid: BGL[000xC44], BglC00, bglb00, Bglzzz
 </p>
 
-<p>One new tool provided is <i>smap</i>.
-As of SLURM version 1.2, <i>sview</i> is
-another new tool offering even more viewing and configuring options.
-Smap is aware of system topography and provides a map of what base partitions
-are allocated to jobs, partitions, etc.
-See the smap man page for details.
+<p>In a system configured with <i>small blocks</i> (any block less
+than a full base partition) there will be divisions in the base partition
+notation. On BlueGene/L and BlueGene/P systems, the base partition name may
+be followed by a square bracket enclosing ID numbers of the IO nodes associated
+with the block. For example, if there are 64 psets in a BlueGene/L
+configuration, "bgl012[0-15]" represents the first quarter or first 16 IO nodes
+of a midplane.  In BlueGene/L this would be 128 c-node block.  To represent
+the first nodecard in the second quarter or IO nodes 16-19, the notation would
+be "bgl012[16-19]", or a 32 c-node block. On BlueGene/Q systems, the specific
+c-nodes would be identified in square brackets using their five digit
+coordinates. For example "bgq0123[00000x11111]" would represent the 32 c-nodes
+in midplane "bgq0123" having coordinates (within that midplane) from zero to
+one in each of the five dimensions.</p>
+
+<p>Two topology-aware graphical user interfaces are provided: <i>smap</i> and
+<i>sview</i> (<i>sview</i> provides more viewing and configuring options).
+See each command's man page for details.
 A sample of smap output is provided below showing the location of five jobs.
 Note the format of the list of base partitions allocated to each job.
 Also note that idle (unassigned) base partitions are indicated by a period.
@@ -226,14 +230,17 @@ building SLURM. </p>
 <p>The slurmctld daemon should execute on the system's service node.
 If an optional backup daemon is used, it must be in some location where
 it is capable of executing Bridge APIs.
-One slurmd daemon should be configured to execute on one of the front end nodes.
-That one slurmd daemon represents communications channel for every base partition.
-You can use the scontrol command to drain individual nodes as desired and
-return them to service. </p>
-
-<p>The <i>slurm.conf</i> (configuration) file needs to have the value of <i>InactiveLimit</i>
-set to zero or not specified (it defaults to a value of zero).
-This is because there are no job steps and we don't want to purge jobs prematurely.
+The slurmd daemons executes the user scripts and there must be at least one
+front end node configured for this purpose. Multiple front end nodes may be
+configured for slurmd use to improve performance and fault tolerance.
+Each slurmd can execute jobs for every base partition and the work will be
+distributed among the slurmd daemons to balance the workload.
+You can use the scontrol command to drain individual compute nodes as desired
+and return them to service.</p>
+
+<p>The <i>slurm.conf</i> (configuration) file needs to have the value of
+<i>InactiveLimit</i> set to zero or not specified (it defaults to a value of zero).
+This is because if there are no job steps, we don't want to purge jobs prematurely.
 The value of <i>SelectType</i> must be set to "select/bluegene" in order to have
 node selection performed using a system aware of the system's topography
 and interfaces.
@@ -248,7 +255,7 @@ that serves this function and calls the supplied program <i>sbin/slurm_epilog</i
 The prolog and epilog programs are used to insure proper synchronization
 between the slurmctld daemon, the user job, and MMCS.
 A multitude of other functions may also be placed into the prolog and
-epilog as desired (e.g. enabling/disabling user logins, puring file systmes,
+epilog as desired (e.g. enabling/disabling user logins, purging file systems,
 etc.).  Sample prolog and epilog scripts follow. </p>
 
 <pre>
@@ -257,8 +264,9 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 #
 # Wait for bgblock to be ready for this job's use
 /usr/sbin/slurm_prolog
+</pre>
 
-
+<pre>
 #!/bin/bash
 # Sample BlueGene Epilog script
 #
@@ -294,31 +302,47 @@ the scontrol reconfig command. </p>
 
 <p>SLURM node and partition descriptions should make use of the
 <a href="#naming">naming</a> conventions described above. For example,
-"NodeName=bg[000x733] NodeAddr=frontend0 NodeHostname=frontend0 Procs=1024"
+"NodeName=bg[000x733] CPUs=1024"
 is used in <i>slurm.conf</i> to define a BlueGene system with 128 midplanes
-in an 8 by 4 by 4 matrix.
+in an 8 by 4 by 4 matrix and each midplane is configured with 1024 processors
+(cores).
 The node name prefix of "bg" defined by NodeName can be anything you want,
 but needs to be consistent throughout the <i>slurm.conf</i> file.
-Note that the values of both NodeAddr and NodeHostname for all
-128 base partitions is the name of the front-end node executing
-the slurmd daemon.
 No computer is actually expected to a hostname of "bg000" and no
-attempt will be made to route message traffic to this address. </p>
+attempt will be made to route message traffic to this address.</p>
+
+<p>Front end nodes used for executing the slurmd daemons must also be defined
+in the <i>slurm.conf</i> file.
+It is recommended that at least two front end nodes be dedicated to use by
+the slurmd daemons for fault tolerance.
+For example:
+"FrontendName=frontend[00-03] State=UNKNOWN"
+is used to define four front end nodes for running slurmd daemons.</p>
 
-<p>While users are unable to initiate SLURM job steps on BlueGene systems,
-this restriction does not apply to user root or <i>SlurmUser</i>.
-Be advised that the one slurmd supporting all nodes is unable to manage a
-large number of job steps, so this ability should be used only to verify normal
-SLURM operation.
+<pre>
+# Portion of slurm.conf for BlueGene system
+InactiveLimit=0
+SelectType=select/bluegene
+Prolog=/usr/sbin/prolog
+Epilog=/usr/sbin/epilog
+#
+FrontendName=frontend[00-01] State=UNKNOWN
+NodeName=bg[000x733] CPUs=1024 State=UNKNOWN
+</pre>
+
+<p>While users are unable to initiate SLURM job steps on BlueGene/L or BlueGene/P
+systems, this restriction does not apply to user root or <i>SlurmUser</i>.
+Be advised that the slurmd daemon is unable to manage a large number of job
+steps, so this ability should be used only to verify normal SLURM operation.
 If large numbers of job steps are initiated by slurmd, expect the daemon to
 fail due to lack of memory or other resources.
-It is best to minimize other work on the front-end node executing slurmd
+It is best to minimize other work on the front end nodes executing slurmd
 so as to maximize its performance and minimize other risk factors.</p>
 
 <a name="bluegene-conf"><h2>Bluegene.conf File Creation</h2></a>
 <p>In addition to the normal <i>slurm.conf</i> file, a new
 <i>bluegene.conf</i> configuration file is required with information pertinent
-to the sytem.
+to the system.
 Put <i>bluegene.conf</i> into the SLURM configuration directory with
 <i>slurm.conf</i>.
 A sample file is installed in <i>bluegene.conf.example</i>.
@@ -326,7 +350,7 @@ System administrators should use the <i>smap</i> tool to build appropriate
 configuration file for static partitioning.
 Note that <i>smap -Dc</i> can be run without the SLURM daemons
 active to establish the initial configuration.
-Note that the defined bgblocks may not overlap (except for the
+Note that the bgblocks defined using smap may not overlap (except for the
 full-system bgblock, which is implicitly created).
 See the smap man page for more information.</p>
 
@@ -371,7 +395,7 @@ the name prefix in addition to the numeric coordinates.</p>
 Dynamic partitioning was developed primarily for smaller BlueGene systems,
 but can be used on larger systems.
 Dynamic partitioning may introduce fragmentation of resources.
-This fragementaiton may be severe since SLURM will run a job anywhere
+This fragmentation may be severe since SLURM will run a job anywhere
 resources are available with little thought of the future.
 As with overlap partitioning, <b>use dynamic partitioning with
 caution!</b>
@@ -383,17 +407,17 @@ for this mode.</p>
 <p>Blocks can be freed or set in an error state with scontrol,
 (i.e. "<i>scontrol update BlockName=RMP0 state=error</i>").
 This will end any job on the block and set the state of the block to ERROR
-making it so no job will run on the block.  To set it back to a useable
-state set the state to free (i.e.
+making it so no job will run on the block.  To set it back to a usable
+state, set the state to free (i.e.
 "<i>scontrol update BlockName=RMP0 state=free</i>").
 
 <p>Alternatively, if only part of a base partition needs to be put
 into an error state which isn't already in a block of the size you
-need, you can set a set of ionodes into an error state with scontrol,
+need, you can set a collection of IO nodes into an error state using scontrol
 (i.e. "<i>scontrol update subbpname=bg000[0-3] state=error</i>").
 This will end any job on the nodes listed, create a block there, and set
 the state of the block to ERROR making it so no job will run on the
-block.  To set it back to a useable state set the state to free (i.e.
+block.  To set it back to a usable state set the state to free (i.e.
 "<i>scontrol update BlockName=RMP0 state=free</i>" or
  "<i>scontrol update subbpname=bg000[0-3] state=free</i>"). This is
  helpful to allow other jobs to run on the unaffected nodes in
@@ -411,14 +435,19 @@ file (i.e. <i>BasePartitionNodeCnt=512</i> and <i>NodeCardNodeCnt=32</i>).</p>
 
 <p>Note that the <i>Numpsets</i> values defined in
 <i>bluegene.conf</i> is used only when SLURM creates bgblocks this
-determines if the system is IO rich or not.  For most bluegene/L
+determines if the system is IO rich or not.  For most BlueGene/L
 systems this value is either 8 (for IO poor systems) or 64 (for IO rich
-systems).
-<p>The <i>Images</i> can change during job start based on input from
-the user.
+systems).</p>
+
+<p>The <i>Images</i> file specifications identify which images are used when
+booting a bgblock and the valid images are different for each BlueGene system
+type (e.g. L, P and Q). Their values can change during job allocation based on
+input from the user.
 If you change the bgblock layout, then slurmctld and slurmd should
-both be cold-started (e.g. <b>/etc/init.d/slurm startclean</b>).
-If you wish to modify the <i>Numpsets</i> values
+both be cold-started (without preserving any state information,
+"/etc/init.d/slurm startclean").</p>
+
+<p>If you wish to modify the <i>Numpsets</i> values
 for existing bgblocks, either modify them manually or destroy the bgblocks
 and let SLURM recreate them.
 Note that in addition to the bgblocks defined in <i>bluegene.conf</i>, an
@@ -429,7 +458,7 @@ bgblocks.
 A sample <i>bluegene.conf</i> file is shown below.
 <pre>
 ###############################################################################
-# Global specifications for BlueGene system
+# Global specifications for a BlueGene/L system
 #
 # BlrtsImage:           BlrtsImage used for creation of all bgblocks.
 # LinuxImage:           LinuxImage used for creation of all bgblocks.
@@ -531,7 +560,7 @@ BridgeAPIVerbose=0
 # volume = 1x1x1 = 1
 BPs=[000x000] Type=TORUS                            # 1x1x1 =  1 midplane
 BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized
-                                                    # cnode blocks 3-Base
+                                                    # c-node blocks 3-Base
                                                     # Partition Quarter sized
                                                     # c-node blocks
 
@@ -539,8 +568,8 @@ BPs=[001x001] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1x1x1 = 4-Nodecard sized
 
 <p>The above <i>bluegene.conf</i> file defines multiple bgblocks to be
 created in a single midplane (see the "SMALL" option).
-Using this mechanism, up to 32 independent jobs each consisting of 1
-  32 cnodes can be executed
+Using this mechanism, up to 32 independent jobs each consisting of
+32 c-nodes can be executed
 simultaneously on a one-rack BlueGene system.
 If defining bgblocks of <i>Type=SMALL</i>, the SLURM partition
 containing them as defined in <i>slurm.conf</i> must have the
@@ -552,9 +581,10 @@ scheduler performance.
 As in all SLURM configuration files, parameters and values
 are case insensitive.</p>
 
-<p> With a BlueGene/P system the image names are different.  The
-  correct image names are CnloadImage, MloaderImage, and IoloadImage.
-  You can also use alternate images just the same as discribed above.
+<p>The valid image names on a BlueGene/P system are CnloadImage, MloaderImage,
+and IoloadImage. The only image name on BlueGene/Q systems is MloaderImage.
+Alternate images may be specified as described above for all BlueGene system
+types.</p>
 
 <p>One more thing is required to support SLURM interactions with
 the DB2 database (at least as of the time this was written).
@@ -601,9 +631,9 @@ repeated reboots and the likely failure of user jobs.
 A system administrator should address the problem before returning
 the base partitions to service.</p>
 
-<p>If you cold-start slurmctld (<b>/etc/init.d/slurm startclean</b>
-or <b>slurmctld -c</b>) it is recommended that you also cold-start
-the slurmd at the same time.
+<p>If the slurmctld daemon is cold-started (<b>/etc/init.d/slurm startclean</b>
+or <b>slurmctld -c</b>) it is recommended that the slurmd daemon(s) be
+cold-started at the same time.
 Failure to do so may result in errors being reported by both slurmd
 and slurmctld due to bgblocks that previously existed being deleted.</p>
 
@@ -614,20 +644,32 @@ Run <i>sfree --help</i> for more information.</p>
 
 <h4>Resource Reservations</h4>
 
-<p><b>This reservation mechanism for less than an entire midplane is still
-under development.</b></p>
+<p>SLURM's advance reservation mechanism can accept a node count specification
+as input rather than identification of specific nodes/midplanes. In that case,
+SLURM may reserve nodes/midplanes which may not be formed into an appropriate
+bgblock. Work is planned for SLURM version 2.4 to remedy this problem. Until
+that time, identifying the specific nodes/midplanes to be included in an
+advanced reservation may be necessary.</p>
 
 <p>SLURM's advance reservation mechanism is designed to reserve resources
 at the level of whole nodes, which on a BlueGene systems would represent
 whole midplanes. In order to support advanced reservations with a finer
 grained resolution, you can configure one license per c-node on the system
-and reserve c-nodes instead of entire midplanes. For example, in slurm.conf
-specify something of this sort: "<i>Licenses=cnode*512</i>". Then create an
-advanced reservation with a command like this:<br>
-"<i>scontrol create reservation licenses="cnode*32" starttime=now duration=30:00 users=joe</i>".</p>
+and reserve c-nodes instead of entire midplanes. Note that reserved licenses
+are treated somewhat differently than reserved nodes. When nodes are reserved
+then jobs using that reservation can use only those nodes. Reserved licenses
+can only be used by jobs associated with that reservation, but licenses not
+explicitly reserved are available to any job.</p>
+
+<p>For example, in <i>slurm.conf</i> specify something of this sort:
+"<i>Licenses=cnode*512</i>". Then create an advanced reservation with a
+command like this:<br>
+"<i>scontrol create reservation licenses="cnode*32" starttime=now duration=30:00 users=joe</i>".<br>
+Jobs run in this reservation will then have <b>at least</b> 32 c-nodes
+available for their use, but could use more given an appropriate workload.</p>
 
 <p>There is also a job_submit/cnode plugin available for use that will
-automatically set a job's license specification to match his c-node request
+automatically set a job's license specification to match its c-node request
 (i.e. a command like<br>
 "<i>sbatch -N32 my.sh</i>" would automatically be translated to<br>
 "<i>sbatch -N32 --licenses=cnode*32 my.sh</i>" by the slurmctld daemon.
@@ -639,7 +681,7 @@ Enable this plugin in the slurm.conf configuration file with the option
 <p>All of the testing and debugging guidance provided in
 <a href="quickstart_admin.html"> Quick Start Administrator Guide</a>
 apply to BlueGene systems.
-One can start the <i>slurmctld</i> and <i>slurmd</i> in the foreground
+One can start the <i>slurmctld</i> and <i>slurmd</i> daemons in the foreground
 with extensive debugging to establish basic functionality.
 Once running in production, the configured <i>SlurmctldLog</i> and
 <i>SlurmdLog</i> files will provide historical system information.
@@ -659,18 +701,22 @@ Run <b>configure</b> with the <b>--enable-bgl-emulation</b> option.
 This will define "HAVE_BG", "HAVE_BGL", and "HAVE_FRONT_END" in the
 config.h file.
 You can also emulate a BlueGene/P system with
-  the <b>--enable-bgp-emulation</b> option.
+the <b>--enable-bgp-emulation</b> option.
 This will define "HAVE_BG", "HAVE_BGP", and "HAVE_FRONT_END" in the
 config.h file.
+You can also emulate a BlueGene/Q system using
+the <b>--enable-bgq-emulation</b> option.
+This will define "HAVE_BG", "HAVE_BGQ", and "HAVE_FRONT_END" in the
+config.h file.
 Then execute <b>make</b> normally.
 These variables will build the code as if it were running
 on an actual BlueGene computer, but avoid making calls to the
-Bridge libary (that is controlled by the variable "HAVE_BG_FILES",
+Bridge library (that is controlled by the variable "HAVE_BG_FILES",
 which is left undefined). You can use this to test configurations,
 scheduling logic, etc. </p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 17 March 2009</p>
+<p style="text-align:center;">Last modified 16 August 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/bull.jpg b/doc/html/bull.jpg
new file mode 100755
index 0000000000000000000000000000000000000000..dec0f75669e2c85cfa9b17e50f6edc79982864e6
GIT binary patch
literal 1555
zcmY*Tdpy$%9R1DDFof;yVTPeG6S8HR%A-8T9g|SHy*w%(lO~T|*Q1AO&B~^m63P@G
zqP0>n`ZT4CJ~2Y*f#lJm<(a76b^qvoI_IBrzUT8f=c{_98UTy}eCfUb1OfmMH33yW
zZ~}m7X(14XMo1$h3V}jl5J+Pr8f${Ypv*9EEXMdJ&1~=(ECGwg;8$AM5J@z9dz3ku
z=Hy7VSz%9tz;tz&=^G#o4KXBBv?+;1a-fn{l1NmtTK)#d-`p>wdIZ3AfOSA72(bb*
z;1CcFQ9S{00H6Uve!lt(S`etVCa9qfeT)DN2nYe8x_VI9e>p@01T^7VP=uv6c|E_w
z$~$Bi`#@IiwL5=o@Cx0OBq$L#%F));sM|=)asnkfS*ldm7_0k#p8z158lXBcgF^rf
z4Rz=LP%pqO$(o1|RUe=as$)0^2Rs0IO3*#Dz)$za5?3-xObMe~uuVrTl+}fW75bS7
zj+@P$$?(ZEmptP46Z7JTY}w09TiH!_pQ(*<>Nu}=gu;jw_XpS4q9*8z_^>8aiOiC4
zm&XyFiPG{nU;nhOXS-)Qp{T99=KcMesCSW-YmYmiywIW(OA+Q--@2Nr`kE!pNgnea
z$0W&fB^+B_+mg%@4!50w!d^9jsSl`GKjK`^RH?w3(;_K+4CuTi#Gu>=9dEmb+xGM|
zcE}u>Lo%Hde0pQ~q^qool6NTNhw0Hz=E_j!^p)hI-L9lJ&S|jNglKFiP7>t{W{^<?
z@o3%nNcEEroafOz57rTspC~o|*br%RBGlF5z!%MoH||8+_F{ox@<+|KC*H}C^)<Ek
z4xW4xP`Mmwz)u{!AGkB8{ZQ78Wz>V=vDkrL&BGK!apkPzJKlta4mRCj$BD_?&vaUO
z_(<W-Hx+F-*GJmL@~q1%yin9B<7ij&$nyi!a3%w}39W2TN;oS--JNn=E#yUDhbZO#
zZD*|nmi^OWAr@zkJWn&QFb6vshjXOjda3pj?0aYUp}3W7OxHPO6|}qj<*KS-kucBO
zKYyQ+@PIWuqXJ69y3aK5H^)%ONq9g|ge%EQIh#KHU^-O=$j1u=cYbW&;kp0?w;@A3
zH=eTFGtZtD9g#9-id$|eyMId0tTOxXp%L3FA_ITxAca0@9ZXv~P&O#zf*-2a$X?z2
z95CUWJqaT;heh*g@9liq<5kRf1-@wUR&NClKl#>z7R*)zHQDq9NcTUp-tzLk%beoE
zi#4mb-{Lozd~6+ksF;hVAlNK6bU4w=|G8UsmT+GJk<60T?^m$vA&(N8n57GLZNBtN
zrFhxGN#;lY79X+K{<Rm0-Q|9w9$!H}`SB`j5J<vG*CiSH@sInJRvk%IxLwY++_Epw
zmZrbSug-t`X>fJ53ScF;C#P(kxo#F;V^tI%e)6$S_rPk})34`D-!BALlsvr0x|FQ~
ze%Pyk%&uiO8=mh7_h_^m-9>U)^r?g1tt`fF$hdmF;E*Wy>#=lr7$&m!_TACxa1~Hg
zwo|b@4rjr2pH7UqjYk@=4pvq&aeB4rj0uMXOJ=oxtQ4d*KcltLsLVxlYgrs#<c!H_
zk?`iX*M906;dN)veh4)_#|{(Yj<`mx@l<XES%C(HrRk44FA~F3*_Te%NFNCF9ya}X
z@UKF-gnM8#2l*xN+vi;kZ@9TWIZMKFB9ON;jXu^!bqZnbqL;Xw{v4t3&2T$h&Fcdj
zVibP;RgD}8gB;sY;5Eii5l(alU2IR%LTrZu?9EyFbY-pCi*=sG4ttRy^rfqD3!~_%
zt+uJBW%*M`ab0E_eK5C<hF?a^am$#YAC}K4b~G_uxEXWlDqxI1E1c+IWal<q5GnH`
zt`El!4u=s0kqyXK<&4ZTan|)mLE*K1{4KiwxSO=TINt|*U9kJ_*N#&Z6iYuQHp@Jq
zcdW@LDxvh=DS6E7v;JE|d4cEr&BDElDG!F!FFT!T@AsasRF8z-w^<W8(p$#=kZyR?
Y&JLlMW*;=SDVOKo(oPQLa8$kj0&FyzB>(^b

literal 0
HcmV?d00001

diff --git a/doc/html/checkpoint_plugins.shtml b/doc/html/checkpoint_plugins.shtml
index 6275148d2..2e21ec7a5 100644
--- a/doc/html/checkpoint_plugins.shtml
+++ b/doc/html/checkpoint_plugins.shtml
@@ -22,7 +22,6 @@ We recommend, for example:</p>
 Berkeley Lab Checkpoint/Restart (BLCR)</a></li>
 <li><b>none</b>&#151;No job checkpoint.</li>
 <li><b>ompi</b>&#151;OpenMPI checkpoint (requires OpenMPI version 1.3 or higher).</li>
-<li><b>xlch</b>&#151;XLCH</li>
 </ul></p>
 
 <p>The <span class="commandline">plugin_name</span> and
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 4e41421cf..0296d4af6 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -1,11 +1,11 @@
 <!--
 Copyright (C) 2005-2007 The Regents of the University of California.
-Copyright (C) 2008-2010 Lawrence Livermore National Security.
+Copyright (C) 2008-2011 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 Written by Morris Jette <jette1@llnl.gov> and Danny Auble <da@llnl.gov>
 
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -39,27 +39,6 @@ function get_field2(name,form)
   return ""
 }
 
-function get_accounting_storage_type_field(gather, form_storage)
-{
-  for (var i=0; i < form_storage.length; i++)
-  {
-    if (form_storage[i].checked)
-    {
-      if(form_storage[i].value == "none" && !(gather == "none"))
-      {
-	return "filetxt"
-      }
-      else if(!(form_storage[i].value == "none") && gather == "none")
-      {
-	return "none"
-      }
-      else {
-	return form_storage[i].value
-      }
-    }
-  }
-}
-
 function get_radio_field_skipfirst(name,form)
 {
   for (var i=1; i < form.length; i++)
@@ -152,6 +131,7 @@ function displayfile()
    get_field("Epilog",document.config.epilog) + "<br>" +
    "#PrologSlurmctld= <br>" +
    "#FirstJobId=1 <br>" +
+   "#MaxJobId=999999 <br>" +
    "#GresTypes= <br>" +
    "#GroupUpdateForce=0 <br>" +
    "#GroupUpdateTime=600 <br>" +
@@ -165,6 +145,7 @@ function displayfile()
    "#Licenses=foo*4,bar <br>" +
    "#MailProg=/bin/mail <br>" +
    "#MaxJobCount=5000 <br>" +
+   "#MaxStepCount=40000 <br>" +
    "#MaxTasksPerNode=128 <br>" +
    "MpiDefault=" + get_radio_value(document.config.mpi_default) + "<br>" +
    "#MpiParams=ports=#-# <br>" +
@@ -253,8 +234,9 @@ function displayfile()
    get_field("AccountingStorageLoc",document.config.accounting_storage_loc) + "<br>" +
    get_field("AccountingStoragePass",document.config.accounting_storage_pass) + "<br>" +
    get_field("AccountingStoragePort",document.config.accounting_storage_port) + "<br>" +
-   "AccountingStorageType=accounting_storage/" + get_accounting_storage_type_field(get_radio_value(document.config.job_acct_gather_type), document.config.accounting_storage_type) + "<br>" +
+   "AccountingStorageType=accounting_storage/" + get_radio_value(document.config.accounting_storage_type) + "<br>" +
    get_field("AccountingStorageUser",document.config.accounting_storage_user) + "<br>" +
+   get_field("AccountingStoreJobComment",document.config.acctng_store_job_comment) + "<br>" +
    get_field("ClusterName",document.config.cluster_name) + "<br>" +
    "#DebugFlags= <br>" +
    get_field("JobCompHost",document.config.job_comp_host) + "<br>" +
@@ -288,7 +270,7 @@ function displayfile()
    "# COMPUTE NODES <br>" +
    "NodeName=" + document.config.node_name.value +
    get_field2(" NodeAddr",document.config.node_addr) +
-   get_field2(" Procs",document.config.procs) +
+   get_field2(" CPUs",document.config.procs) +
    get_field2(" RealMemory",document.config.memory) +
    get_field2(" Sockets",document.config.sockets) +
    get_field2(" CoresPerSocket",document.config.cores_per_socket) +
@@ -350,9 +332,7 @@ file. It will appear on your web browser. Save the file in text format
 as <I>slurm.conf</I> for use by SLURM.
 
 <P>For more information about SLURM, see
-<A HREF="https://computing.llnl.gov/linux/slurm/">https://computing.llnl.gov/linux/slurm/</A>
-<P>
-<A HREF="https://www.llnl.gov/disclaimer.html"><B>Privacy and legal notice</B></A>
+<A HREF="http://www.schedmd.com/slurmdocs/slurm.html">http://www.schedmd.com/slurmdocs/slurm.html</A>
 
 <H2>Control Machines</H2>
 Define the hostname of the computer on which the SLURM controller and
@@ -360,7 +340,7 @@ optional backup controller will execute. You can also specify addresses
 of these computers if desired (defaults to their hostnames).
 The IP addresses can be either numeric IP addresses or names.
 Hostname values should should not be the fully qualified domain
-name (e.g. use <I>linux</I> rather than <I>linux.llnl.gov</I>).
+name (e.g. use <I>tux</I> rather than <I>tux.abc.com</I>).
 <P>
 <input type="text" name="control_machine" value="linux0"> <B>ControlMachine</B>:
 Master Controller Hostname
@@ -404,19 +384,19 @@ Name of the one partition to be created
 Maximum time limit of jobs in minutes or INFINITE
 <P>
 The following parameters describe a node's configuration.
-Set a value for <B>Procs</B>.
+Set a value for <B>CPUs</B>.
 The other parameters are optional, but provide more control over scheduled resources:
 <P>
-<input type="text" name="procs" value="1"> <B>Procs</B>: Count of processors
+<input type="text" name="procs" value="1"> <B>CPUs</B>: Count of processors
 on each compute node.
-If Procs is omitted, it will be inferred from:
+If CPUs is omitted, it will be inferred from:
 Sockets, CoresPerSocket, and ThreadsPerCore.
 <P>
 <input type="text" name="sockets" value="">
 <B>Sockets</B>:
 Number of physical processor sockets/chips on the node.
 If Sockets is omitted, it will be inferred from:
-Procs, CoresPerSocket, and ThreadsPerCore.
+CPUs, CoresPerSocket, and ThreadsPerCore.
 <P>
 <input type="text" name="cores_per_socket" value="">
 <B>CoresPerSocket</B>:
@@ -584,7 +564,7 @@ with a job step.<BR>
 Select one value for <B>ProctrackType</B>:<BR>
 <input type="radio" name="proctrack_type" value="aix"> <B>AIX</B>: Use AIX kernel
 extension, recommended for AIX systems<BR>
-<input type="radio" name="proctrack_type" value="cgroup"> <B>Cgroup</B>: Use
+<input type="radio" name="proctrack_type" value="cgroup"> <B>Cgroup</B>:  Use
 Linux <i>cgroups</i> to create a job container and track processes.
 Build a <i>cgroup.conf</i> file as well<BR>
 <input type="radio" name="proctrack_type" value="pgid" checked> <B>Pgid</B>: Use Unix
@@ -661,7 +641,10 @@ Select one value for <B>TaskPlugin</B>:<BR>
 <input type="radio" name="task_plugin" value="none" checked> <B>None</B>: No task launch actions<BR>
 <input type="radio" name="task_plugin" value="affinity"> <B>Affinity</B>:
 CPU affinity support
-(see srun man pages for the --cpu_bind, --mem_bind, and -E options)
+(see srun man pages for the --cpu_bind, --mem_bind, and -E options)<BR>
+<input type="radio" name="task_plugin" value="cgroup"> <B>Cgroup</B>:
+Allocated resources constraints enforcement using Linux Control Groups
+(see cgroup.conf man page)
 <DL><DL>
 <DT><B>TaskPluginParam</B> (As used by <I>TaskPlugin=Affinity</I> only):
 <DT><input type="radio" name="task_plugin_param" value="Cpusets">
@@ -720,7 +703,8 @@ log goes to syslog, string "%h" in name gets replaced with hostname)
 <P>
 
 <H2>Job Completion Logging</H2>
-Define the job completion logging mechanism to be used.<BR>
+Define the job completion logging mechanism to be used. SlurmDBD and None are
+recommended. The PGSQL plugin is not fully supported. <BR>
 Select one value for <B>JobCompType</B>:<BR>
 <input type="radio" name="job_comp_type" value="none" checked> <B>None</B>:
 No job completion logging<BR>
@@ -778,7 +762,7 @@ save the data from many Slurm managed clusters into a common database<BR>
 <input type="radio" name="accounting_storage_type" value="mysql"> <B>MySQL</B>:
 Write job accounting to a MySQL database<BR>
 <input type="radio" name="accounting_storage_type" value="pgsql"> <B>PGSQL</B>:
-Write job accounting to a PostreSQL database<BR>
+Write job accounting to a PostreSQL database (not fully supported)<BR>
 <input type="radio" name="accounting_storage_type" value="slurmdbd"> <B>SlurmDBD</B>:
 Write job accounting to Slurm DBD (database daemon) which can securely
 save the data from many Slurm managed clusters into a common database<BR>
@@ -798,6 +782,8 @@ Password we are to use to talk to the database for Job Accounting.
 In the case of SlurmDBD, this will be an alternate socket name for use with a Munge
 daemon providing enterprise-wide authentication (while the default Munge socket
 would provide cluster-wide authentication only).<br>
+<input type="text" name="acctng_store_job_comment" value="YES"> <B>AccountingStoreJobComment</B>:
+Set to NO to prevent the job comment field from being saved to the database<br>
 <input type="text" name="cluster_name" value="cluster"> <B>ClusterName</B>:
 Name to be recorded in database for jobs from this cluster.
 This is important if a single database is used to record information
@@ -857,6 +843,6 @@ before terminating all remaining tasks. A value of zero indicates unlimited wait
 <P>
 </FORM>
 <HR>
-<P class="footer">LLNL-WEB-402631<BR>
-Last modified 25 August 2010</P>
+<a href="disclaimer.html" target="_blank" class="privacy">Legal Notices</a><br>
+Last modified 8 August 2011</P>
 </BODY>
diff --git a/doc/html/cpu_management.shtml b/doc/html/cpu_management.shtml
new file mode 100644
index 000000000..7d57f1148
--- /dev/null
+++ b/doc/html/cpu_management.shtml
@@ -0,0 +1,3543 @@
+<!--#include virtual="header.txt"-->
+
+<h1> CPU Management User and Administrator Guide</h1>
+<a name="Overview"></a>
+<h2>Overview</h2>
+<p>The purpose of this guide is to assist SLURM users and administrators in selecting configuration options 
+and composing command lines to manage the use of CPU resources by jobs, steps and tasks. The document 
+is divided into the following sections:</p>
+<ul>
+<li><a href="#Overview">Overview</a></li>
+<li><a href="#Section1">CPU Management Steps performed by SLURM</a></li>
+<li><a href="#Section2">Getting Information about CPU usage by Jobs/Steps/Tasks</a></li>
+<li><a href="#Section3">CPU Management and SLURM Accounting</a></li>
+<li><a href="#Section4">CPU Management Examples</a></li>
+</ul>
+
+<p>CPU Management through user commands is constrained by the configuration parameters
+chosen by the SLURM administrator. The interactions between different CPU management options are complex 
+and often difficult to predict. Some experimentation may be required to discover the exact combination 
+of options  needed to produce a desired outcome. Users and administrators should refer to the man pages 
+for <a href="slurm.conf.html">slurm.conf</a>, <a href="cgroup.conf.html">cgroup.conf</a>
+<a href="salloc.html">salloc</a>, 
+<a href="sbatch.html">sbatch</a> and <a href="srun.html">srun</a> for detailed explanations of each 
+option. The following html documents may also be useful:</p>
+
+<p>
+<a href="cons_res.html">Consumable Resources in SLURM</a><br>
+<a href="cons_res_share.html">Sharing Consumable Resources</a><br>
+<a href="mc_support.html">Support for Multi-core/Multi-thread 
+Architectures</a><br>
+<a href="dist_plane.html">Plane distribution</a></p>
+
+<p>This document describes SLURM CPU management for conventional Linux clusters only.  For
+information on Cray and IBM BlueGene systems, please refer to the appropriate documents.</p>
+<p>The information and examples in this document have been verified on SLURM version 2.3.0. Some
+information may not be valid for previous SLURM versions.</p><br>
+<a name="Section1"></a>
+<h2>CPU Management Steps performed by SLURM</h2>
+<p>SLURM uses four basic steps to manage CPU resources for a job/step:</p>
+<ul>
+<li><a href="#Step1">Step 1: Selection of Nodes</a>
+</li><li><a href="#Step2">Step 2: Allocation of CPUs from the selected Nodes</a>
+</li><li><a href="#Step3">Step 3: Distribution of Tasks to the selected Nodes</a>
+</li><li><a href="#Step4">Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node 
+</a>
+</li></ul>
+<a name="Step1"></a>
+<h3>Step 1: Selection of Nodes</h3>
+<p>In Step 1, SLURM selects the set of nodes from which CPU resources are to be allocated to a job or 
+job step.  Node selection is therefore influenced by many of the configuration and command line options
+that control the allocation of CPUs (Step 2 below).
+If <font face="Courier New, monospace">
+SelectType=select/linear</font> is configured, all resources on the selected nodes will be allocated
+to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured, 
+individual sockets, cores and threads may be allocated from the selected nodes as 
+<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by 
+<font face="Courier New, monospace">SelectTypeParameters.</font>
+<br>
+<br>
+Step 1 is performed by slurmctld and the select plugin.
+<br>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 1</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>slurm.conf
+				parameter</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Possible values</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_NodeName">NodeName</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;name of the node&gt;<br><br>
+				Plus additional parameters. See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				a node. This includes the number and layout of sockets, cores,
+				threads and processors (logical CPUs) on the node.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_PartitionName">PartitionName</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;name of the partition&gt;<br><br>
+				Plus additional parameters. See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				a partition. Several parameters of the partition definition
+				affect the selection of nodes (e.g., Nodes,
+				Shared, MaxNodes)</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_FastSchedule">FastSchedule</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">0 | 1 | 2</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
+				how the information in a node definition is used.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_SelectType">SelectType</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+                               <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
+				whether CPU resources are allocated to jobs and job steps in
+				units of whole nodes or as consumable resources (sockets, cores
+				or threads).</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="17" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_SelectTypeParameters">SelectTypeParameters</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">CR_CPU | CR_CPU_Memory | CR_Core |
+CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional options.  See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				the consumable resource type and controls other aspects of CPU
+				resource allocation by the select plugin.</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center> 
+<br>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 1</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="17" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+				line option</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+			</td><td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_extra-node-info">-B, --extra-node-info</a>
+				</font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;sockets[:cores[:threads]]&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with a specified layout of sockets, cores
+				and threads.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_constraint">-C, --constraint</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;list&gt;
+				</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with specified attributes</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_contiguous">--contiguous</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to contiguous nodes</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_cores-per-socket">--cores-per-socket</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;cores&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of cores per socket</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_cpus-per-task">-c, --cpus-per-task</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;ncpus&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the number of CPUs allocated per task</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_exclusive">--exclusive</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Prevents
+				sharing of allocated nodes with other jobs. Suballocates CPUs to job steps.</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="salloc.html#OPT_nodefile">-F, --nodefile</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;node file&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">File
+				containing a list of specific nodes to be selected for the job (salloc and sbatch only)</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_hint">--hint</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">compute_bound |
+                                memory_bound | [no]multithread</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Additional
+				controls on allocation of CPU resources</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_mincpus">--mincpus</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;n&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the minimum number of CPUs allocated per node</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_nodes">-N, --nodes</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;minnodes[-maxnodes]&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the minimum/maximum number of nodes allocated to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks">-n, --ntasks</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the number of tasks to be created for the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-core">--ntasks-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-socket">--ntasks-per-socket</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated socket</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-node">--ntasks-per-node</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated node</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_overcommit">-O, --overcommit</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Allows
+				fewer CPUs to be allocated than the number of tasks</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_partition">-p, --partition</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;partition_names&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				which partition is used for the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_share">-s, --share</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Allows
+				sharing of allocated nodes with other jobs</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_sockets-per-node">--sockets-per-node</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;sockets&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of sockets</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_threads-per-core">--threads-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;threads&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of threads per core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_nodelist">-w, --nodelist</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;host1,host2,... or filename&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
+				of specific nodes to be allocated to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_exclude">-x, --exclude</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+                                &lt;host1,host2,... or filename&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
+				of specific nodes to be excluded from allocation to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="17" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_no-allocate">-Z, --no-allocate</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Bypass
+				normal allocation (privileged option available to users
+				&#8220;SlurmUser&#8221; and &#8220;root&#8221; only)</font></font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Step2"></a>
+<h3>Step 2: Allocation of CPUs from the selected Nodes</h3>
+<p>In Step 2, SLURM allocates CPU resources to a job/step from the set of nodes selected 
+in Step 1. CPU allocation is therefore influenced by the configuration and command line options
+that relate to node selection.
+If <font face="Courier New, monospace">
+SelectType=select/linear</font> is configured, all resources on the selected nodes will be allocated
+to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured, 
+individual sockets, cores and threads may be allocated from the selected nodes as 
+<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by 
+<font face="Courier New, monospace">SelectTypeParameters.</font>
+<br>
+</p><p>When using <font face="Courier New, monospace">SelectType=select/cons_res</font>, 
+the default allocation method across nodes is block allocation (allocate all available CPUs in 
+a node before using another node). The default allocation method within a node is cyclic 
+allocation (allocate available CPUs in a round-robin fashion across the sockets within a node). 
+Users may override the default behavior using the appropriate command 
+line options described below.  The choice of allocation methods may influence which specific
+CPUs are allocated to the job/step.
+<br><br>
+Step 2 is performed by slurmctld and the select plugin.
+<br>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 2</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>slurm.conf
+				parameter</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Possible values</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_NodeName">NodeName</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;name of the node&gt;<br><br>
+				Plus additional parameters. See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				a node. This includes the number and layout of sockets, cores,
+				threads and processors (logical CPUs) on the node.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_PartitionName">PartitionName</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;name of the partition&gt;<br><br>
+				Plus additional parameters. See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				a partition. Several parameters of the partition definition
+				affect the allocation of CPU resources to jobs (e.g., Nodes,
+				Shared, MaxNodes)</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_FastSchedule">FastSchedule</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">0 | 1 | 2</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
+				how the information in a node definition is used.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_SelectType">SelectType</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+                               <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
+				whether CPU resources are allocated to jobs and job steps in
+				units of whole nodes or as consumable resources (sockets, cores
+				or threads).</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="17" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_SelectTypeParameters">SelectTypeParameters</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">CR_CPU | CR_CPU_Memory | CR_Core |
+CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional options.  See man page for details.</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Defines
+				the consumable resource type and controls other aspects of CPU
+				resource allocation by the select plugin.</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center> 
+<br>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 2</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="17" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+				line option</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+			</td><td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_extra-node-info">-B, --extra-node-info</a>
+				</font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;sockets[:cores[:threads]]&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with a specified layout of sockets, cores
+				and threads.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_constraint">-C, --constraint</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;list&gt;
+				</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with specified attributes</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_contiguous">--contiguous</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to contiguous nodes</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_cores-per-socket">--cores-per-socket</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;cores&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of cores per socket</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_cpus-per-task">-c, --cpus-per-task</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;ncpus&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the number of CPUs allocated per task</font></font></p>
+			</td>
+		</tr>
+				<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_distribution">--distribution, -m</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				block|cyclic<br>|arbitrary|plane=&lt;options&gt;[:block|cyclic]</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">The second specified distribution (after the ":")
+				can be used to override the default allocation method within nodes</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_exclusive">--exclusive</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Prevents
+				sharing of allocated nodes with other jobs</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="salloc.html#OPT_nodefile">-F, --nodefile</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;node file&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">File
+				containing a list of specific nodes to be selected for the job (salloc and sbatch only)</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_hint">--hint</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">compute_bound |
+                                memory_bound | [no]multithread</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Additional
+				controls on allocation of CPU resources</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_mincpus">--mincpus</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;n&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the minimum number of CPUs allocated per node</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_nodes">-N, --nodes</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;minnodes[-maxnodes]&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the minimum/maximum number of nodes allocated to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks">-n, --ntasks</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the number of tasks to be created for the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-core">--ntasks-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-socket">--ntasks-per-socket</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated socket</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-node">--ntasks-per-node</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				the maximum number of tasks per allocated node</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_overcommit">-O, --overcommit</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Allows
+				fewer CPUs to be allocated than the number of tasks</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_partition">-p, --partition</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;partition_names&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				which partition is used for the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_share">-s, --share</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Allows
+				sharing of allocated nodes with other jobs</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_sockets-per-node">--sockets-per-node</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;sockets&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of sockets</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_threads-per-core">--threads-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;threads&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Restricts
+				node selection to nodes with at least the specified number of threads per core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_nodelist">-w, --nodelist</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				&lt;host1,host2,... or filename&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
+				of specific nodes to be allocated to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_exclude">-x, --exclude</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+                                &lt;host1,host2,... or filename&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
+				of specific nodes to be excluded from allocation to the job</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="17" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_no-allocate">-Z, --no-allocate</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Bypass
+				normal allocation (privileged option available to users
+				&#8220;SlurmUser&#8221; and &#8220;root&#8221; only)</font></font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Step3"></a>
+<h3>Step 3: Distribution of Tasks to the selected Nodes</h3>
+<p>In Step 3, SLURM distributes tasks to the nodes that were selected for 
+the job/step in Step 1. Each task is distributed to only one node, but more than one 
+task may be distributed to each node.  Unless overcommitment of CPUs to tasks is
+specified for the job, the number of tasks distributed to a node is
+constrained by the number of CPUs allocated on the node and the number of CPUs per
+task. If consumable resources is configured, or resource sharing is allowed, tasks from 
+more than one job/step may run on the same node concurrently.  
+<br><br>
+Step 3 is performed by slurmctld.
+<br>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 3</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>slurm.conf
+				parameter</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Possible values</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_MaxTasksPerNode">MaxTasksPerNode</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				Controls the maximum number of tasks that a job step can spawn on a single node
+				</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 3</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="17" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+				line option</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+			</td><td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_distribution">--distribution, -m</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				block|cyclic<br>|arbitrary|plane=&lt;options&gt;[:block|cyclic]</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">The first specified distribution (before the ":")
+				controls the sequence in which tasks are distributed to each of the selected nodes. Note that 
+				this option does not affect the number of tasks distributed to each node, but only the sequence of 
+				distribution.</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-core">--ntasks-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;
+				</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
+				Controls the maximum number of tasks per allocated core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-socket">--ntasks-per-socket</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
+				Controls the maximum number of tasks per allocated socket</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-node">--ntasks-per-node</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
+				Controls the maximum number of tasks per allocated node</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_relative">-r, --relative</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">N/A</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Controls
+				which node is used for a job step</font></font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Step4"></a>
+<h3>Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node</h3>
+<p>In optional Step 4, SLURM distributes and binds each task to a specified subset of 
+the allocated CPUs on the node to which the task was distributed in Step 3. Different 
+tasks distributed to the same node may be bound to the same subset of CPUs or to 
+different subsets. This step is known as task affinity or task/CPU binding.
+<br><br>
+Step 4 is performed by slurmd and the task plugin.
+<br>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 4</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>slurm.conf
+				parameter</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Possible values</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_TaskPlugin">TaskPlugin</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				task/none | task/affinity | task/cgroup</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				Controls whether this step is enabled and which task plugin to use
+				</font></p>
+			</td>
+		</tr>
+				<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="slurm.conf.html#OPT_TaskPluginParam">TaskPluginParam</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">See man page</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				For task/affinity, controls the binding unit (sockets, cores or threads) and the 
+                                binding method (sched or cpusets)</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>cgroup.conf options that control Step 4 (task/cgroup plugin only)</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>cgroup.conf
+				parameter</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" height="18" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Possible values</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="cgroup.conf.html">ConstrainCores</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				yes|no</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				Controls whether jobs are constrained to their allocated CPUs
+				</font></p>
+			</td>
+		</tr>
+				<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="cgroup.conf.html">TaskAffinity</a></font></font></p>
+			</td>
+			<td height="18" width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				yes|no</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				Controls whether task-to-CPU binding is enabled</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
+                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 4</b></caption>
+		<colgroup><col width="20%">
+		<col width="20%">
+		<col width="60%">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="17" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+				line option</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+			</td><td bgcolor="#e0e0e0" width="60%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_cpu_bind">--cpu_bind</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+				See man page</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls binding of tasks to CPUs</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_ntasks-per-core">--ntasks-per-core</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">&lt;number&gt;
+				</font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
+				Controls the maximum number of tasks per allocated core</font></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td height="18" width="20%">
+				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">
+				<a href="srun.html#OPT_distribution">--distribution, -m</a></font></font></p>
+			</td>
+			<td width="20%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1"><font face="Courier New, monospace">
+				block|cyclic<br>|arbitrary|plane=&lt;options&gt;[:block|cyclic]</font></font></p>
+			</td>
+			<td width="60%">
+				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
+				The second specified distribution (after the ":") controls the sequence in which tasks are 
+				distributed to allocated CPUs within a node for binding of tasks to CPUs</font></font></p>
+			</td>
+		</tr>
+
+	</tbody></table>
+</center>
+<br><br>
+<h2>Additional Notes on CPU Management Steps</h2>
+<p>For consumable resources, it is important for users to understand the difference between 
+cpu allocation (Step 2) and task affinity/binding (Step 4).  Exclusive (unshared) allocation 
+of CPUs as consumable resources limits the number of jobs/steps/tasks that 
+can use a node concurrently.  But it does not limit the set of CPUs on the node that each 
+task distributed to the node can use.  Unless some form of CPU/task binding is used 
+(e.g., a task or spank plugin), all tasks distributed to a node can use all of 
+the CPUs on the node, including CPUs not allocated to their job/step.  This may have 
+unexpected adverse effects on performance, since it allows one job to use CPUs allocated 
+exclusively to another job.  For this reason, it may not be advisable to configure 
+consumable resources without also configuring task affinity.  Note that task affinity 
+can also be useful when select/linear (whole node allocation) is configured, to improve 
+performance by restricting each task to a particular socket or other subset of CPU 
+resources on a node.</p>
+<br><br>
+<a name="Section2"></a>
+<h2>Getting Information about CPU usage by Jobs/Steps/Tasks</h2>
+<p>There is no easy way to generate a comprehensive set of CPU management information 
+for a job/step (allocation, distribution and binding). However, several 
+commands/options provide limited information about CPU usage.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="3" cellspacing="0" width="100%">
+		<tbody><tr>
+			<td bgcolor="#e0e0e0" width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Command/Option</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="70%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Information</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+scontrol show job option: 
+<font face="Courier New, monospace">--details</font></font></p>
+			</td>
+			<td width="70%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+This option provides a list of the nodes selected for the job and the CPU ids allocated to the job on each 
+node. Note that the CPU ids reported by this command are SLURM abstract CPU ids, not Linux/hardware CPU ids 
+(as reported by, for example, /proc/cpuinfo).
+</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+Linux command: <font face="Courier New, monospace">env</font></font></p>
+			</td>
+			<td width="70%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+Many SLURM environment variables provide information related to node and CPU usage:
+<br><br>
+<font face="Courier New, monospace">
+SLURM_JOB_CPUS_PER_NODE<br>
+SLURM_CPUS_PER_TASK<br>
+SLURM_CPU_BIND<br>
+SLURM_DISTRIBUTION<br>
+SLURM_NODELIST<br>
+SLURM_TASKS_PER_NODE<br>
+SLURM_STEP_NODELIST<br>
+SLURM_STEP_NUM_NODES<br>
+SLURM_STEP_NUM_TASKS<br>
+SLURM_STEP_TASKS_PER_NODE<br>
+SLURM_NNODES<br>
+SLURM_NTASKS<br>
+SLURM_NPROCS<br>
+SLURM_CPUS_ON_NODE<br>
+SLURM_NODEID<br>
+SLURMD_NODENAME<br>
+</font>
+</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+srun/salloc/sbatch option: 
+<font face="Courier New, monospace">--cpu_bind=verbose</font></font></p>
+			</td>
+			<td width="70%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+This option provides a list of the CPU masks used by task affinity to bind tasks to CPUs. 
+Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not SLURM 
+abstract CPU ids as reported by scontrol, etc.
+</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+srun/salloc/sbatch option: 
+<font face="Courier New, monospace">-l</font></font></p>
+			</td>
+			<td width="70%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+This option adds the task id as a prefix to each line of output from a task sent to stdout/stderr. 
+This can be useful for distinguishing node-related and CPU-related information by task id 
+for multi-task jobs/steps.
+</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td width="30%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+Linux command:<br> 
+<font face="Courier New, monospace">cat /proc/&lt;pid&gt;/status | grep Cpus_allowed_list</font></font></p>
+			</td>
+			<td width="70%">
+				<p align="LEFT"><font style="font-size: 8pt" size="1">
+Given a task's pid (or "self" if the command is executed by the task itself), this command 
+produces a list of the CPU ids bound to the task. This is the same information that is 
+provided by <font face="Courier New, monospace">--cpu_bind=verbose</font>, but in a more readable format.
+</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<h3>A Note on CPU Numbering</h3>
+<p>The number and layout of logical CPUs known to SLURM is described in the node definitions in slurm.conf. This may
+differ from the physical CPU layout on the actual hardware.  For this reason, SLURM generates its own internal, or
+"abstract", CPU numbers.  These numbers may not match the physical, or "machine", CPU numbers known to Linux.
+A utility to convert between SLURM abstract CPU numbers and physical machine CPU numbers is provided by SLURM.  See
+module src/slurmd/slurmd/get_mach_stat.c for details.</p>
+<br>
+<a name="Section3"></a>
+<h2>CPU Management and SLURM Accounting</h2>
+<p>CPU management by SLURM users is subject to limits imposed by SLURM Accounting. Accounting limits may be applied on CPU
+usage at the level of users, groups and clusters. For details, see the sacctmgr man page.</p>
+<br>
+<a name="Section4"></a>
+<h2>CPU Management Examples</h2>
+<p>The following examples illustrate some scenarios for managing CPU
+resources using SLURM. Many additional scenarios are possible. In
+each example, it is assumed that all CPUs on each node are available
+for allocation.</p> 
+<ul>
+<li><a href="#Example">Example Node and Partition Configuration</a><br>
+</li><li><a href="#Example1">Example 1: Allocation of whole nodes</a><br>
+</li><li><a href="#Example2">Example 2: Simple allocation of cores as consumable resources</a>
+</li><li><a href="#Example3">Example 3: Consumable resources with balanced allocation across nodes</a>
+</li><li><a href="#Example4">Example 4: Consumable resources with minimization of resource fragmentation</a>
+</li><li><a href="#Example5">Example 5: Consumable resources with cyclic distribution of tasks to nodes</a>
+</li><li><a href="#Example6">Example 6: Consumable resources with default allocation and plane distribution of tasks to nodes</a>
+</li><li><a href="#Example7">Example 7: Consumable resources with overcommitment of CPUs to tasks</a>
+</li><li><a href="#Example8">Example 8: Consumable resources with resource sharing between jobs</a>
+</li><li><a href="#Example9">Example 9: Consumable resources on multithreaded node, allocating only one thread per core</a>
+</li><li><a href="#Example10">Example 10: Consumable resources with task affinity and core binding</a>
+</li><li><a href="#Example11">Example 11: Consumable resources with task affinity and socket binding, Case 1</a>
+</li><li><a href="#Example12">Example 12: Consumable resources with task affinity and socket binding, Case 2</a>
+</li><li><a href="#Example13">Example 13: Consumable resources with task affinity and socket binding, Case 3</a>
+</li><li><a href="#Example14">Example 14: Consumable resources with task affinity and customized allocation and distribution</a>
+</li><li><a href="#Example15">Example 15: Consumable resources with task affinity to optimize the performance of a multi-task, 
+multi-thread job</a>
+</li><li><a href="#Example16">Example 16: Consumable resources with task cgroup and core binding</a>
+</li></ul><br>
+<a name="Example"></a>
+<h3>Example Node and Partition Configuration</h3>
+<p>For these examples, the SLURM cluster contains the following nodes:</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="402">
+		<colgroup><col width="262">
+		<col width="20">
+		<col width="20">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="262">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n3</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="262">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">Number
+				of Sockets</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="25" width="262">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1">Number
+				of Cores per Socket</font></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="25" width="262">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1">Total
+				Number of Cores</font></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="25" width="262">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1">Number
+				of Threads (CPUs) per Core</font></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="24" width="262">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1">Total
+				Number of CPUs</font></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">16</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<p>And the following partitions:</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="301">
+		<colgroup><col width="109">
+		<col width="72">
+		<col width="82">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="109">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>PartitionName</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="72">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>regnodes</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="82">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>hypernode</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="109">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">Nodes</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="72">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">n0
+				 n1  n2</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="82">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">n3</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="24" width="109">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1">Default</font></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="72">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">YES</font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="82">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<p>These entities are defined in slurm.conf as follows:</p>
+<pre>Nodename=n0 NodeAddr=node0 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8
+Nodename=n1 NodeAddr=node1 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
+Nodename=n2 NodeAddr=node2 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
+Nodename=n3 NodeAddr=node3 Sockets=2 CoresPerSocket=4 ThreadsPerCore=2 Procs=16 State=IDLE
+PartitionName=regnodes Nodes=n0,n1,n2 Shared=YES Default=YES State=UP 
+PartitionName=hypernode Nodes=n3 State=UP
+</pre>
+<br>
+<a name="Example1"></a>
+<h3>Example 1: Allocation of whole nodes</h3>
+<p>Allocate a minimum of two whole nodes to a job.</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/linear
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=2 ...
+</pre>
+<p>Comments:</p>
+The <font face="Courier New, monospace">SelectType=select/linear</font>
+configuration option specifies allocation in units of whole nodes.
+The<font face="Courier New, monospace"> --nodes=2</font> srun option causes
+SLURM to allocate at least 2 nodes to the job.<p></p>
+<br>
+<a name="Example2"></a>
+<h3>Example 2: Simple allocation of cores as consumable resources</h3>
+<p>A job requires 6 CPUs (2 tasks and 3 CPUs per task with no overcommitment). Allocate the 6 CPUs as consumable resources 
+from a single node in the default partition.</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 ...
+</pre>
+<p>Comments:</p>
+<p>The <font face="Courier New, monospace">SelectType</font> configuration options define cores as consumable resources. 
+The <font face="Courier New, monospace">--nodes=1-1</font> srun option
+ restricts the job to a single node. The following table shows a possible pattern of allocation
+  for this job.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="312">
+		<colgroup><col width="204">
+		<col width="20">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="25" width="204">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1"><b>Number
+				of Allocated CPUs</b></font></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="24" width="204">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1"><b>Number
+				of Tasks</b></font></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example3"></a>
+<h3>Example 3: Consumable resources with balanced allocation across nodes</h3>
+<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). 
+Allocate 3 CPUs from each of the 3 nodes in the default partition.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=3-3 --ntasks=3 --cpus-per-task=3 ...
+</pre>
+<p>Comments:</p>
+<p>The options specify the following conditions for the job: 3 tasks, 3 unique CPUs
+ per task, using exactly 3 nodes. To satisfy these conditions, SLURM must
+  allocate 3 CPUs from each node. The following table shows the allocation
+   for this job.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="312">
+		<colgroup><col width="204">
+		<col width="20">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="25" width="204">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1"><b>Number
+				of Allocated CPUs</b></font></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="24" width="204">
+				<p align="CENTER"><font color="#000000"><font style="font-size: 8pt" size="1"><b>Number
+				of Tasks</b></font></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example4"></a>
+<h3>Example 4: Consumable resources with minimization of resource fragmentation</h3>
+<p>A job requires 12 CPUs (12 tasks and 1 CPU per task with no overcommitment). Allocate 
+CPUs using the minimum number of nodes and the minimum number of sockets required for 
+the job in order to minimize fragmentation of allocated/unallocated CPUs in the cluster.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+</pre>
+<p>Command line:</p>
+<pre>srun --ntasks=12 ...
+</pre>
+<p>Comments:</p>
+<p>The default allocation method across nodes is block. This minimizes the number of nodes
+ used for the job. The configuration option <font face="Courier New, monospace"> 
+ CR_CORE_DEFAULT_DIST_BLOCK</font> sets the default allocation method within a 
+ node to block. This minimizes the number of sockets used for the job within a node. 
+ The combination of these two methods causes SLURM to allocate the 12 CPUs using the 
+ minimum required number of nodes (2 nodes) and sockets (3 sockets).The following 
+ table shows a possible pattern of allocation for this job.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="349">
+		<colgroup><col width="204">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="9">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="32">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="32">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="31">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="204">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td colspan="2" width="32">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td colspan="2" width="32">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="2" width="31">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example5"></a>
+<h3>Example 5: Consumable resources with cyclic distribution of tasks to nodes</h3>
+<p>A job requires 12 CPUs (6 tasks and 2 CPUs per task with no overcommitment). Allocate 
+6 CPUs from each of 2 nodes in the default partition. Distribute tasks to nodes cyclically.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=2-2 --ntasks-per-node=3 --distribution=cyclic <BR/>
+--ntasks=6 --cpus-per-task=2 ...
+</pre>
+<p>Comments:</p>
+<p>The options specify the following conditions for the job: 6 tasks, 2 unique CPUs per task, 
+using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, SLURM 
+must allocate 6 CPUs from each of 2 nodes. The <font face="Courier New, monospace">
+--distribution=cyclic</font> option causes the tasks to be distributed to the nodes in a 
+round-robin fashion. The following table shows a possible pattern of allocation and 
+distribution for this job.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="418">
+		<colgroup><col width="310">
+		<col width="20">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Distribution
+				of Tasks to Nodes, by Task id</b></font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+				0<br>2<br>4</font></p></td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">
+				1<br>3<br>5</font></p></td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example6"></a>
+<h3>Example 6: Consumable resources with default allocation and plane distribution of tasks to nodes</h3>
+<p>A job requires 16 CPUs (8 tasks and 2 CPUs per task with no overcommitment). 
+Use all 3 nodes in the default partition. Distribute tasks to each node in blocks of two in a round-robin fashion.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=3-3 --distribution=plane=2 --ntasks=8 --cpus-per-task=2 ...
+</pre>
+<p>Comments:</p>
+<p>The options specify the following conditions for the job: 8 tasks, 2 unique CPUs 
+per task, using all 3 nodes in the partition. To satisfy these conditions using 
+the default allocation method across nodes (block), SLURM allocates 8 CPUs from 
+the first node, 6 CPUs from the second node and 2 CPUs from the third node. 
+The <font face="Courier New, monospace">--distribution=plane=2</font> option causes SLURM 
+to distribute tasks in blocks of two to each of the nodes in a round-robin fashion,
+subject to the number of CPUs allocated on each node.  So, for example, only 1 task
+is distributed to the third node because only 2 CPUs were allocated on that node and
+each task requires 2 CPUs. The following table shows a possible pattern of allocation 
+and distribution for this job.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="434">
+		<colgroup><col width="310">
+		<col width="28">
+		<col width="28">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Distribution
+				of Tasks to Nodes, by Task id</b></font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0
+				 1<br>5 6</font></p>
+			</td>
+			<td width="28">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2
+				 3<br>7</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4<br> 
+				</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example7"></a>
+<h3>Example 7: Consumable resources with overcommitment of CPUs to tasks</h3>
+<p>A job has 20 tasks. Run the job in a single node.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=20 --overcommit ...
+</pre>
+<p>Comments:</p>
+<p>The 
+<font face="Courier New, monospace">--overcommit</font> option allows the job to 
+run in only one node by overcommitting CPUs to tasks.The following table shows
+ a possible pattern of allocation and distribution for this job.
+</p>
+
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="440">
+		<colgroup><col width="310">
+		<col width="42">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" height="17" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="42">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="18" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td width="42">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="18" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td width="42">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">20</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" height="17" width="310">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Distribution
+				of Tasks to Nodes, by Task id</b></font></p>
+			</td>
+			<td width="42">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0
+				- 19</font></p>
+			</td>
+			<td width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example8"></a>
+<h3>Example 8: Consumable resources with resource sharing between jobs</h3>
+<p>2 jobs each require 6 CPUs (6 tasks per job with no overcommitment). 
+Run both jobs simultaneously in a single node.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --nodelist=n0 --ntasks=6 --share ...
+srun --nodes=1-1 --nodelist=n0 --ntasks=6 --share ...
+</pre>
+<p>Comments:</p>
+<p>The <font face="Courier New, monospace">--nodes=1-1</font> and <font face="Courier New, monospace">-w n0</font> 
+srun options together restrict both jobs to node n0. The 
+<font face="Courier New, monospace">Shared=YES</font> option in the partition definition plus 
+the <font face="Courier New, monospace">--share</font> srun option allows the two 
+jobs to share CPUs on the node.
+</p><br>
+<a name="Example9"></a>
+<h3>Example 9: Consumable resources on multithreaded node, allocating only one thread per core</h3>
+<p>A job requires 8 CPUs (8 tasks with no overcommitment). Run the job on node n3, 
+allocating only one thread per core.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_CPU
+</pre>
+<p>Command line:</p>
+<pre>srun --partition=hypernode --ntasks=8 --hint=nomultithread ...
+</pre>
+<p>Comments:</p>
+<p>The <font face="Courier New, monospace">CR_CPU</font> configuration 
+option enables the allocation of only one thread per core. 
+The <font face="Courier New, monospace">--hint=nomultithread</font> 
+srun option causes SLURM to allocate only one thread from each core to 
+this job. The following table shows a possible pattern of allocation 
+for this job.
+</p>
+ <center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="3" cellspacing="0" width="591">
+		<colgroup><col width="210">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="16">
+		<col width="20">
+		<col width="20">
+		<col width="20">
+		<col width="20">
+		<col width="20">
+		<col width="19">
+		</colgroup><tbody><tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="16" bgcolor="#e0e0e0" width="367">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n3</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="170">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="191">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Core id</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="38">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="38">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="38">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="38">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="38">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="45">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="45">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td colspan="2" bgcolor="#e0e0e0" width="44">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>8</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="16">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>9</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>10</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>11</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>12</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>13</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="20">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>14</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="19">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>15</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="8" width="170">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="8" width="191">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="210">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="8" width="170">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0
+				  2   4   6</font></p>
+			</td>
+			<td colspan="8" width="191">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8
+				 10  12  14</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example10"></a>
+<h3>Example 10: Consumable resources with task affinity and core binding</h3>
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a 
+single node in the default partition. Apply core binding to each task.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=6 --cpu_bind=cores ...
+</pre>
+<p>Comments:</p>
+<p>Using the default allocation method within nodes (cyclic), SLURM allocates 
+3 CPUs on each socket of 1 node. Using the default distribution method 
+within nodes (cyclic), SLURM distributes and binds each task to an allocated 
+core in a round-robin fashion across the sockets. The following table shows 
+a possible pattern of allocation, distribution and binding for this job. 
+For example, task id 2 is bound to CPU id 1.
+</p><p>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="443">
+		<colgroup><col width="196">
+		<col width="47">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="9">
+		</colgroup><tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="163">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4 5 6</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="196">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task id</b></font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">5</font></p>
+			</td>
+			<td width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+</a><a name="Example11"></a>
+<h3>Example 11: Consumable resources with task affinity and socket binding, Case 1</h3>
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in 
+a single node in the default partition. Apply socket binding to each task.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=6 --cpu_bind=sockets ...
+</pre>
+<p>Comments:</p>
+<p>Using the default allocation method within nodes (cyclic), SLURM allocates 3 
+CPUs on each socket of 1 node. Using the default distribution method within nodes 
+(cyclic), SLURM distributes and binds each task to all of the allocated CPUs in 
+one socket in a round-robin fashion across the sockets. The following table shows 
+a possible pattern of allocation, distribution and binding for this job. For 
+example, task ids 1, 3 and 5 are all bound to CPU ids 4, 5 and 6.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="443">
+		<colgroup><col width="196">
+		<col width="47">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="9">
+		</colgroup><tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="163">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4 5 6</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="196">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task ids</b></font></p>
+			</td>
+			<td colspan="3" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0
+				 2  4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td colspan="3" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1
+				 3  5</font></p>
+			</td>
+			<td width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example12"></a>
+<h3>Example 12: Consumable resources with task affinity and socket binding, Case 2</h3>
+<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in 
+a single node in the default partition. Allocate cores using the block allocation method.
+Distribute cores using the block distribution method. Apply socket binding to each task.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 --cpu_bind=sockets <BR/>
+--distribution=block:block ...
+</pre>
+<p>Comments:</p>
+<p>Using the block allocation method, SLURM allocates 4 
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the block distribution method within  
+nodes, SLURM distributes 3 CPUs to each task.  Applying socket binding, SLURM binds each task to all
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows 
+a possible pattern of allocation, distribution and binding for this job. In this example, using the
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on 
+socket id 1.  Using the block distribution method, CPU ids 0-2 were distributed to task id 0, and CPU ids 
+3-5 were distributed to task id 1.  Applying socket binding, task id 0 is therefore bound to the allocated 
+CPUs on socket 0, and task id 1 is bound to the allocated CPUs on both sockets.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="404">
+		<colgroup><col width="196">
+		<col width="47">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="9">
+		</colgroup><tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="150">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4 5</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="196">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task ids</b></font></p>
+			</td>
+			<td colspan="4" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1</font></p>
+			</td>
+			<td colspan="2" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td colspan="2" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example13"></a>
+<h3>Example 13: Consumable resources with task affinity and socket binding, Case 3</h3>
+<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in 
+a single node in the default partition. Allocate cores using the block allocation method.
+Distribute cores using the cyclic distribution method. Apply socket binding to each task.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 --cpu_bind=sockets <BR/>
+--distribution=block:cyclic ...
+</pre>
+<p>Comments:</p>
+<p>Using the block allocation method, SLURM allocates 4 
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the cyclic distribution method within  
+nodes, SLURM distributes 3 CPUs to each task.  Applying socket binding, SLURM binds each task to all
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows 
+a possible pattern of allocation, distribution and binding for this job. In this example, using the
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on 
+socket id 1.  Using the cyclic distribution method, CPU ids 0, 1 and 4 were distributed to task id 0, and CPU ids 
+2, 3 and 5 were distributed to task id 1.  Applying socket binding, both tasks are therefore bound to the 
+allocated CPUs on both sockets.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="404">
+		<colgroup><col width="196">
+		<col width="47">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		</colgroup><tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="150">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4 5</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="196">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="58">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="18">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="58">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task ids</b></font></p>
+			</td>
+			<td colspan="4" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1</font></p>
+			</td>
+			<td colspan="2" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1</font></p>
+			</td>
+			<td colspan="2" width="54">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example14"></a>
+<h3>Example 14: Consumable resources with task affinity and customized allocation and distribution</h3>
+<p>A job requires 18 CPUs (18 tasks with no overcommitment). Run the job in the 
+default partition. Allocate 6 CPUs on each node using block allocation within 
+nodes. Use cyclic distribution of tasks to nodes and block distribution of 
+tasks for CPU binding.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=3-3 --ntasks=18 --ntasks-per-node=6 <BR/>
+--distribution=cyclic:block --cpu_bind=cores ...
+</pre>
+<p>Comments:</p>
+<p>This example shows the use of task affinity with customized allocation of CPUs and 
+distribution of tasks across nodes and within nodes for binding. The srun options 
+specify the following conditions for the job: 18 tasks, 1 unique CPU per task, using 
+all 3 nodes in the partition, with 6 tasks per node. 
+The <font face="Courier New, monospace">CR_CORE_DEFAULT_DIST_BLOCK</font> 
+configuration option specifies block allocation within nodes. To satisfy these 
+conditions, SLURM allocates 6 CPUs on each node, with 4 CPUs allocated on one socket 
+and 2 CPUs on the other socket. The <font face="Courier New, monospace">
+--distribution=cyclic:block</font> option specifies cyclic distribution of 
+tasks to nodes and block distribution of tasks to CPUs within nodes for binding. 
+The following table shows a possible pattern of allocation, distribution and binding 
+for this job. For example, task id 10 is bound to CPU id 3 on node n1.
+</p>
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="1" cellspacing="0" width="100%">
+		<tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3 4 5</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3 4 5</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3 4 5</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Distribution
+				of Tasks to Nodes, by Task id</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0<br>
+                        3<br>
+                        6<br>
+                        9<br>
+                        12<br>
+                        15
+                        </font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1<br>
+                        4<br>
+                        7<br>
+                        10<br>
+                        13<br>
+                        16
+                        </font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2<br>
+                        5<br>
+                        8<br>
+                        11<br>
+                        14<br>
+                        17
+                         </font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="15%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task id</b></font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">6</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">9</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">12</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">15</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">7</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">10</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">13</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">16</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">5</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">8</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">11</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">14</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">17</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example15"></a>
+<h3>Example 15: Consumable resources with task affinity to optimize the performance of a multi-task, 
+multi-thread job</h3>
+<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). Run 
+the job in the default partition, managing the CPUs to optimize the performance 
+of the job.</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
+TaskPlugin=task/affinity
+TaskPluginParam=sched
+</pre>
+<p>Command line:</p>
+<pre>srun --ntasks=3 --cpus-per-task=3 --ntasks-per-node=1 --cpu_bind=cores ...
+</pre>
+<p>Comments:</p>
+<p>To optimize the performance of this job, the user wishes to allocate 3 CPUs from each of 
+3 sockets and bind each task to the 3 CPUs in a single socket. The 
+<font face="Courier New, monospace">SelectTypeParameters</font> configuration option specifies 
+a consumable resource type of cores and block allocation within nodes. The 
+<font face="Courier New, monospace">TaskPlugin</font> 
+<font face="Courier New, monospace">and TaskPluginParam</font> 
+configuration options enable task affinity. The srun options specify the following 
+conditions for the job: 3 tasks, with 3 unique CPUs per task, with 1 task per node. To satisfy 
+these conditions, SLURM allocates 3 CPUs from one socket in each of the 3 nodes in the default partition. The 
+<font face="Courier New, monospace">--cpu_bind=cores</font> option causes SLURM to bind 
+each task to the 3 allocated CPUs on the node to which it is distributed. The 
+following table shows a possible pattern of allocation, distribution and binding 
+for this job. For example, task id 2 is bound to CPU ids 0, 1 and 2 on socket id 0 of node n2.
+</p>
+
+<center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="1" cellspacing="0" width="100%">
+		<tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n1</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n2</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td colspan="4" width="12%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Tasks</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="28%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Distribution
+				of Tasks to Nodes, by Task id</b></font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td colspan="8" width="24%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="15%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="3%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task id</b></font></p>
+			</td>
+			<td colspan="3" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td colspan="5" width="15%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td colspan="3" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td colspan="5" width="15%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td colspan="3" width="9%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td colspan="5" width="15%">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">--</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+<br>
+<a name="Example16"></a>
+<h3>Example 16: Consumable resources with task cgroup and core binding</h3>
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a 
+single node in the default partition. Apply core binding to each task using the task/cgroup plugin.
+</p>
+<p>slurm.conf options:</p>
+<pre>SelectType=select/cons_res
+SelectTypeParameters=CR_Core
+TaskPlugin=task/cgroup
+</pre>
+<p>cgroup.conf options:</p>
+<pre>ConstrainCores=yes
+TaskAffinity=yes
+</pre>
+<p>Command line:</p>
+<pre>srun --nodes=1-1 --ntasks=6 --cpu_bind=cores ...
+</pre>
+<p>Comments:</p>
+<p>The task/cgroup plugin currently supports only the block method for
+allocating cores within nodes and distributing tasks to CPUs for binding.
+The following table shows a possible pattern of allocation, distribution 
+and binding for this job. For example, task id 2 is bound to CPU id 2.
+</p><p>
+</p><center>
+	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="443">
+		<colgroup><col width="196">
+		<col width="47">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="10">
+		<col width="9">
+		</colgroup><tbody><tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Nodename</b></font></p>
+			</td>
+			<td colspan="8" bgcolor="#e0e0e0" width="163">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>n0</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Socket id</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td colspan="4" bgcolor="#e0e0e0" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Number of
+				Allocated CPUs</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td colspan="2" bgcolor="#e0e0e0" width="254">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Allocated
+				CPU ids</b></font></p>
+			</td>
+			<td colspan="4" width="76">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0 1 2 3</font></p>
+			</td>
+			<td colspan="4" width="75">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4 5</font></p>
+			</td>
+		</tr>
+		<tr>
+			<td rowspan="2" bgcolor="#e0e0e0" width="196">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Binding of
+				Tasks to CPUs</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>CPU id</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>0</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>1</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>2</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>3</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>4</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>5</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>6</b></font></p>
+			</td>
+			<td bgcolor="#e0e0e0" width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>7</b></font></p>
+			</td>
+		</tr>
+		<tr>
+			<td bgcolor="#e0e0e0" width="47">
+				<p align="CENTER"><font style="font-size: 8pt" size="1"><b>Task id</b></font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">0</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">1</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">2</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">3</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">5</font></p>
+			</td>
+			<td width="10">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+			<td width="9">
+				<p align="CENTER"><font style="font-size: 8pt" size="1">-</font></p>
+			</td>
+		</tr>
+	</tbody></table>
+</center>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 26 September 2011</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/cray.shtml b/doc/html/cray.shtml
index 0a1265fc4..b10b5515e 100644
--- a/doc/html/cray.shtml
+++ b/doc/html/cray.shtml
@@ -2,144 +2,637 @@
 
 <h1>SLURM User and Administrator Guide for Cray systems</h1>
 
-<b>NOTE: As of January 2009, the SLURM interface to Cray systems is incomplete.</b>
-
 <h2>User Guide</h2>
 
-<p>This document describes the unique features of SLURM on
-Cray computers.
+<p>This document describes the unique features of SLURM on Cray computers.
 You should be familiar with the SLURM's mode of operation on Linux clusters
-before studying the relatively few differences in Cray system
-operation described in this document.</p>
-
-<p>SLURM's primary mode of operation is designed for use on clusters with
-nodes configured in a one-dimensional space.
-Minor changes were required for the <i>smap</i> and <i>sview</i> tools
-to map nodes in a three-dimensional space.
-Some changes are also desirable to optimize job placement in three-dimensional
-space.</p>
-
-<p>SLURM has added an interface to Cray's Application Level Placement Scheduler
-(ALPS). The ALPS <i>aprun</i> command must used for task launch rather than SLURM's
-<i>srun</i> command. You should create a resource reservation using SLURM's
-<i>salloc</i> or <i>sbatch</i> command and execute <i>aprun</i> from within
-that allocation. <//p>
+before studying the differences in Cray system operation described in this
+document.</p>
+
+<p>SLURM version 2.3 is designed to operate as a job scheduler over Cray's
+Application Level Placement Scheduler (ALPS).
+Use SLURM's <i>sbatch</i> or <i>salloc</i> commands to create a resource
+allocation in ALPS.
+Then use ALPS' <i>aprun</i> command to launch parallel jobs within the resource
+allocation.
+The resource allocation is terminated once the the batch script or the
+<i>salloc</i> command terminates.
+Alternately there is an <i>aprun</i> wrapper distributed with SLURM in
+<i>contribs/cray/srun</i> which will translate <i>srun</i> options
+into the equivalent <i>aprun</i> options. This wrapper will also execute
+<i>salloc</i> as needed to create a job allocation in which to run the
+<i>aprun</i> command. The <i>srun</i> script contains two new options:
+<i>--man</i> will print a summary of the options including notes about which
+<i>srun</i> options are not supported and <i>--alps="</i> which can be used
+to specify <i>aprun</i> options which lack an equivalent within <i>srun</i>.
+For example, <i>srun --alps="-a xt" -n 4 a.out</i>.
+Since <i>aprun</i> is used to launch tasks (the equivalent of a SLURM
+job step), the job steps will not be visible using SLURM commands.
+Other than SLURM's <i>srun</i> command being replaced by <i>aprun</i>
+and the job steps not being visible, all other SLURM commands will operate
+as expected. Note that in order to build and install the aprun wrapper
+described above, execute "configure" with the <i>--with-srun2aprun</i>
+option or add <i>%_with_srun2aprun  1</i> to your <i>~/.rpmmacros</i> file.</p>
+
+<h3>Node naming and node geometry on Cray XT/XE systems</h3>
+<p>SLURM node names will be of the form "nid#####" where "#####" is a five-digit sequence number.
+   Other information available about the node are it's XYZ coordinate in the node's <i>NodeAddr</i>
+   field and it's component label in the <i>HostNodeName</i> field.
+   The format of the component label is "c#-#c#s#n#" where the "#" fields represent in order:
+   cabinet, row, cage, blade or slot, and node.
+   For example "c0-1c2s5n3" is cabinet 0, row 1, cage 3, slot 5 and node 3.</p>
+
+<p>Cray XT/XE systems come with a 3D torus by default. On smaller systems the cabling in X dimension is
+   omitted, resulting in a two-dimensional torus (1 x Y x Z). On Gemini/XE systems, pairs of adjacent nodes
+   (nodes 0/1 and 2/3 on each blade) share one network interface each. This causes the same Y coordinate to
+   be  assigned to those nodes, so that the number of distinct torus coordinates is half the number of total
+   nodes.</p>
+<p>The SLURM <i>smap</i> and <i>sview</i> tools can visualize node torus positions. Clicking on a particular
+   node shows its <i>NodeAddr</i> field, which is its (X,Y,Z) torus coordinate base-36 encoded as a 3-character
+   string. For example, a NodeAddr of '07A' corresponds to the coordinates X = 0, Y = 7, Z = 10.
+   The <i>NodeAddr</i> of a node can also be shown using 'scontrol show node nid#####'.</p>
+
+<p>Please note that the sbatch/salloc options "<i>--geometry</i>" and "<i>--no-rotate</i>" are BlueGene-specific
+   and have no impact on Cray systems. Topological node placement depends on what Cray makes available via the
+   ALPS_NIDORDER configuration option (see below).</p>
+
+<h3>Specifying thread depth</h3>
+<p>For threaded applications, use the <i>--cpus-per-task</i>/<i>-c</i> parameter of sbatch/salloc to set
+   the thread depth per node. This corresponds to mppdepth in PBS and to the aprun -d parameter. Please
+   note that SLURM does not set the OMP_NUM_THREADS environment variable. Hence, if an application spawns
+   4 threads, an example script would look like</p>
+<pre>
+ #SBATCH --comment="illustrate the use of thread depth and OMP_NUM_THREADS"
+ #SBATCH --ntasks=3
+ #SBATCH -c 4
+ export OMP_NUM_THREADS=4
+ aprun -n 3 -d $OMP_NUM_THREADS ./my_exe
+</pre>
+
+<h3>Specifying number of tasks per node</h3>
+<p>SLURM uses the same default as ALPS, assigning each task to a single core/CPU. In order to
+   make more resources available per task, you can reduce the number of processing elements
+   per node (<i>aprun -N</i> parameter, <i>mppnppn</i> in PBS) with the
+   <i>--ntasks-per-node</i> option of <i>sbatch/salloc</i>.
+   This is in particular necessary when tasks require more memory than the per-CPU default.</p>
+
+<h3>Specifying per-task memory</h3>
+<p>In Cray terminology, a task is also called a "processing element" (PE), hence below we
+   refer to the per-task memory and "per-PE" memory interchangeably. The per-PE memory
+   requested through the batch system corresponds to the <i>aprun -m</i> parameter.</p>
+
+<p>Due to the implicit default assumption that 1 task runs per core/CPU, the default memory
+   available per task is the <i>per-CPU share</i> of node_memory / number_of_cores. For
+   example, on a XT5 system with 16000MB per 12-core node, the per-CPU share is 1333MB.</p>
+
+<p>If nothing else is specified, the <i>--mem</i> option to sbatch/salloc can only be used to
+   <i>reduce</i> the per-PE memory below the per-CPU share. This is also the only way that
+   the <i>--mem-per-cpu</i> option can be applied (besides, the <i>--mem-per-cpu</i> option
+   is ignored if the user forgets to set --ntasks/-n).
+   Thus, the preferred way of specifying  memory is the more general <i>--mem</i> option.</p>
+
+<p>To <i>increase</i> the per-PE memory settable via the <i>--mem</i> option requires making
+   more per-task resources available using the <i>--ntasks-per-node</i> option to sbatch/salloc.
+   This allows <i>--mem</i> to request up to node_memory / ntasks_per_node MegaBytes.</p>
+
+<p>When <i>--ntasks-per-node</i> is 1, the entire node memory may be requested by the application.
+   Setting <i>--ntasks-per-node</i> to the number of cores per node yields the default per-CPU share
+   minimum value.</p>
+
+<p>For all cases in between these extremes, set --mem=per_task_memory and</p>
+<pre>
+   --ntasks-per-node=floor(node_memory / per_task_memory)
+</pre>
+<p>whenever per_task_memory needs to be larger than the per-CPU share.</p>
+
+<p><b>Example:</b> An application with 64 tasks needs 7500MB per task on a cluster with 32000MB and 24 cores
+   per node.  Hence  ntasks_per_node = floor(32000/7500) = 4.</p>
+<pre>
+    #SBATCH --comment="requesting 7500MB per task on 32000MB/24-core nodes"
+    #SBATCH --ntasks=64
+    #SBATCH --ntasks-per-node=4
+    #SBATCH --mem=7500
+</pre>
+<p>If you would like to fine-tune the memory limit of your application, you can set the same parameters in
+   a salloc session and then check directly, using</p>
+<pre>
+    apstat -rvv -R $BASIL_RESERVATION_ID
+</pre>
+<p>to see how much memory has been requested.</p>
+
+<h3>Using aprun -B</h3>
+<p>CLE 3.x allows a nice <i>aprun</i> shortcut via the <i>-B</i> option, which
+   reuses all the batch system parameters (<i>--ntasks, --ntasks-per-node,
+   --cpus-per-task, --mem</i>) at application launch, as if the corresponding
+   (<i>-n, -N, -d, -m</i>) parameters had been set; see the aprun(1) manpage
+   on CLE 3.x systems for details.</p>
+
+<h3>Node ordering options</h3>
+<p>SLURM honours the node ordering policy set for Cray's Application Level Placement Scheduler (ALPS). Node 
+   ordering is a configurable system option (ALPS_NIDORDER in /etc/sysconfig/alps). The current
+   setting is reported by '<i>apstat -svv</i>'  (look for the line starting with "nid ordering option") and
+   can not be changed at  runtime. The resulting, effective node ordering is revealed by '<i>apstat -no</i>'
+   (if no special node ordering has been configured, 'apstat -no' shows the
+   same order as '<i>apstat -n</i>').</p>
+
+<p>SLURM uses exactly the same order as '<i>apstat -no</i>' when selecting
+   nodes for a job. With the <i>--contiguous</i> option to <i>sbatch/salloc</i>
+   you can request a contiguous (relative to the current ALPS nid ordering) set
+   of nodes. Note that on a busy system there is typically more fragmentation,
+   hence it may take longer (or even prove impossible) to allocate contiguous
+   sets of a larger size.</p>
+
+<p>Cray/ALPS node ordering is a topic of ongoing work, some information can be found in the CUG-2010 paper
+   "<i>ALPS, Topology, and Performance</i>" by Carl Albing and Mark Baker.</p>
 
 <h2>Administrator Guide</h2>
 
-<h3>Cray/ALPS configuration</h3>
-
-<p>Node names must have a three-digit suffix describing their
-zero-origin position in the X-, Y- and Z-dimension respectively (e.g.
-"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3).
-Rectangular prisms of nodes can be specified in SLURM commands and
-configuration files using the system name prefix with the end-points
-enclosed in square brackets and separated by an "x".
-For example "tux[620x731]" is used to represent the eight nodes in a
-block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630,
-tux631, tux720, tux721, tux730, tux731).
-<b>NOTE:</b> We anticipate that Cray will provide node coordinate
-information via the ALPS interface in the future, which may result
-in a more flexible node naming convention.</p>
-
-<p>In ALPS, configure each node to be scheduled using SLURM as type
-BATCH.</p>
-
-<h3>SLURM configuration</h3>
-
-<p>Four variables must be defined in the <i>config.h</i> file:
-<i>APBASIL_LOC</i> (location of the <i>apbasil</i> command),
-<i>HAVE_FRONT_END</i>, <i>HAVE_CRAY</i> and <i>HAVE_3D</i>.
-The <i>apbasil</i> command should automatically be found.
-If that is not the case, please notify us of its location on your system
-and we will add that to the search paths tested at configure time.
-The other variable definitions can be initiated in several different
-ways depending upon how SLURM is being built.
-<ol>
-<li>Execute the <i>configure</i> command with the option
-<i>--enable-cray-xt</i> <b>OR</b></li>
-<li>Execute the <i>rpmbuild</i> command with the option
-<i>--with cray_xt</i> <b>OR</b></li>
-<li>Add <i>%with_cray_xt 1</i> to your <i>~/.rpmmacros</i> file.</li>
-</ol></p>
-
-<p>One <i>slurmd</i> will be used to run all of the batch jobs on
-the system. It is from here that users will execute <i>aprun</i>
-commands to launch tasks.
+<h3>Install supporting rpms</h3>
+
+<p>The build requires a few -devel RPMs listed below. You can obtain these from
+SuSe/Novell.
+<ul>
+<li>CLE 2.x uses SuSe SLES 10 packages (rpms may be on the normal isos)</li>
+<li>CLE 3.x uses Suse SLES 11 packages (rpms are on the SDK isos, there
+are two SDK iso files for SDK)</li>
+</ul></p>
+
+<p>You can check by logging onto the boot node and running</p>
+<pre>
+boot: # xtopview
+default: # rpm -qa
+</pre>
+
+<p>The list of packages that should be installed is:</p>
+<ul>
+<li>expat-2.0.xxx</li>
+<li>libexpat-devel-2.0.xxx</li>
+<li>cray-MySQL-devel-enterprise-5.0.64 (this should be on the Cray iso)</li>
+</ul>
+
+<p>For example, loading MySQL can be done like this:</p>
+<pre>
+smw: # mkdir mnt
+smw: # mount -o loop, ro xe-sles11sp1-trunk.201107070231a03.iso mnt
+smw: # find mnt -name cray-MySQL-devel-enterprise\*
+mnt/craydist/xt-packages/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64.rpm
+smw: # scp mnt/craydist/xt-packages/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64
+</pre>
+
+<p>Then switch to boot node and run:</p>
+<pre>
+boot: # xtopview
+default: # rpm -ivh /software/cray-MySQL-devel-enterprise-5.0.64.1.0000.2899.19.2.x86_64.rpm
+default: # exit
+</pre>
+
+<p>All Cray-specific PrgEnv and compiler modules should be removed and root
+privileges will be required to install these files.</p>
+
+<h3>Create a build root</h3>
+
+<p>The build is done on a normal service node, where you like
+(e.g. <i>/ufs/slurm/build</i> would work).
+Most scripts check for the environment variable LIBROOT. 
+You can either edit the scripts or export this variable. Easiest way:</p>
+
+<pre>
+login: # export LIBROOT=/ufs/slurm/build
+login: # mkdir -vp $LIBROOT
+login: # cd $LIBROOT
+</pre>
+
+<h3>Install SLURM modulefile</h3>
+
+<p>This file is distributed as part the SLURM tar-ball in
+<i>contribs/cray/opt_modulefiles_slurm</i>. Install it as
+<i>/opt/modulefiles/slurm</i> (or anywhere else in your module path).
+It means that you can use Munge as soon as it is built.</p>
+<pre>
+login: # scp ~/slurm/contribs/cray/opt_modulefiles_slurm root@boot:/rr/current/software/
+</pre>
+
+<h3>Build and install Munge</h3>
+
+<p>Note the Munge installation process on Cray systems differs
+somewhat from that described in the 
+<a href="http://code.google.com/p/munge/wiki/InstallationGuide">
+MUNGE Installation Guide</a>.</p>
+
+<p>Munge is the authentication daemon and needed by SLURM. Download
+munge-0.5.10.tar.bz2 or newer from
+<a href="http://code.google.com/p/munge/downloads/list">
+http://code.google.com/p/munge/downloads/list</a>. This is how one
+can build on a login node and install it.</p>
+<pre>
+login: # cd $LIBROOT
+login: # cp ~/slurm/contribs/cray/munge_build_script.sh $LIBROOT
+login: # mkdir -p ${LIBROOT}/munge/zip
+login: # curl -O http://munge.googlecode.com/files/munge-0.5.10.tar.bz2
+login: # cp munge-0.5.10.tar.bz2 ${LIBROOT}/munge/zip
+login: # chmod u+x ${LIBROOT}/munge/zip/munge_build_script.sh
+login: # ${LIBROOT}/munge/zip/munge_build_script.sh
+(generates lots of output and enerates a tar-ball called
+$LIBROOT/munge_build-.*YYYY-MM-DD.tar.gz)
+login: # scp munge_build-2011-07-12.tar.gz root@boot:/rr/current/software
+</pre>
+
+<p>Install the tar-ball by on the boot node and build an encryption
+key file executing:
+<pre>
+boot: # xtopview
+default: # tar -zxvf $LIBROOT/munge_build-*.tar.gz -C /rr/current /
+default: # dd if=/dev/urandom bs=1 count=1024 >/opt/slurm/munge/etc/munge.key
+default: # chmod go-rxw /opt/slurm/munge/etc/munge.key
+default: # exit
+</pre>
+
+<h3>Configure Munge</h3>
+
+<p>The following steps apply to each login node and the sdb, where
+<ul>
+<li>The <i>slurmd</i> or <i>slurmctld</i> daemon will run and/or</li>
+<li>Users will be submitting jobs</li>
+</ul></p>
+
+<pre>
+login: # mkdir --mode=0711 -vp /var/lib/munge
+login: # mkdir --mode=0700 -vp /var/log/munge
+login: # mkdir --mode=0755 -vp /var/run/munge
+login: # module load slurm
+</pre>
+<pre>
+sdb: # mkdir --mode=0711 -vp /var/lib/munge
+sdb: # mkdir --mode=0700 -vp /var/log/munge
+sdb: # mkdir --mode=0755 -vp /var/run/munge
+</pre>
+
+<p>Start the munge daemon and test it.</p>
+<pre>
+login: # munged --key-file /opt/slurm/munge/etc/munge.key
+login: # munge -n
+MUNGE:AwQDAAAEy341MRViY+LacxYlz+mchKk5NUAGrYLqKRUvYkrR+MJzHTgzSm1JALqJcunWGDU6k3vpveoDFLD7fLctee5+OoQ4dCeqyK8slfAFvF9DT5pccPg=:
+</pre>
+
+<p>When done, verify network connectivity by executing:
+<ul>
+<li><i>munge -n | ssh other-login-host /opt/slurm/munge/bin/unmunge</i></li>
+</ul>
+
+
+<p>If you decide to keep the installation, you may be interested in automating
+the process using an <i>init.d</i> script distributed with the Munge. This
+should be installed on all nodes running munge, e.g., 'xtopview -c login' and
+'xtopview -n sdbNodeID'
+</p>
+<pre>
+boot: # xtopview -c login
+login: # cp /software/etc_init_d_munge /etc/init.d/munge
+login: # chmod u+x /etc/init.d/munge
+login: # chkconfig munge on
+login: # exit
+boot: # xtopview -n 31
+node/31: # cp /software/etc_init_d_munge /etc/init.d/munge
+node/31: # chmod u+x /etc/init.d/munge
+node/31: # chkconfig munge on
+node/31: # exit
+</pre>
+
+<h3>Enable the Cray job service</h3>
+
+<p>This is a common dependency on Cray systems. ALPS relies on the Cray job service to
+   generate cluster-unique job container IDs (PAGG IDs). These identifiers are used by
+   ALPS to track running (aprun) job steps. The default (session IDs) is not unique
+   across multiple login nodes. This standard procedure is described in chapter 9 of
+   <a href="http://docs.cray.com/books/S-2393-30/">S-2393</a> and takes only two
+   steps, both to be done on all 'login' class nodes (xtopview -c login):</p>
+   <ul>
+	   <li>make sure that the /etc/init.d/job service is enabled (chkconfig) and started</li>
+	   <li>enable the pam_job.so module from /opt/cray/job/default in /etc/pam.d/common-session<br/>
+	   (NB: the default pam_job.so is very verbose, a simpler and quieter variant is provided
+		in contribs/cray.)</li>
+   </ul>
+<p>The latter step is required only if you would like to run interactive
+   <i>salloc</i> sessions.</p>
+<pre>
+boot: # xtopview -c login
+login: # chkconfig job on
+login: # emacs -nw /etc/pam.d/common-session
+(uncomment the pam_job.so line)
+session optional /opt/cray/job/default/lib64/security/pam_job.so
+login: # exit
+boot: # xtopview -n 31
+node/31:# chkconfig job on
+node/31:# emacs -nw /etc/pam.d/common-session
+(uncomment the pam_job.so line as shown above)
+</pre>
+
+<h3>Build and Configure SLURM</h3>
+
+<p>SLURM can be built and installed as on any other computer as described
+<a href="quickstart_admin.html">Quick Start Administrator Guide</a>.
+An example of building and installing SLURM version 2.3.0 is shown below.</p>
+
+<pre>
+login: # mkdir build && cd build
+login: # slurm/configure \
+  --prefix=/opt/slurm/2.3.0 \
+  --with-munge=/opt/slurm/munge/ \
+  --with-mysql_config=/opt/cray/MySQL/5.0.64-1.0000.2899.20.2.gem/bin \
+  --with-srun2aprun
+login: # make -j
+login: # mkdir install
+login: # make DESTDIR=/tmp/slurm/build/install install
+login: # make DESTDIR=/tmp/slurm/build/install install-contrib
+login: # cd install
+login: # tar czf slurm_opt.tar.gz opt
+login: # scp slurm_opt.tar.gz boot:/rr/current/software
+</pre>
+
+<pre>
+boot: # xtopview
+default: # tar xzf /software/slurm_opt.tar.gz -C /
+default: # cd /opt/slurm/
+default: # ln -s 2.3.0 default
+</pre>
+
+<p>When building SLURM's <i>slurm.conf</i> configuration file, use the
+<i>NodeName</i> parameter to specify all batch nodes to be scheduled.
+If nodes are defined in ALPS, but not defined in the <i>slurm.conf</i> file, a
+complete list of all batch nodes configured in ALPS will be logged by
+the <i>slurmctld</i> daemon when it starts.
+One would typically use this information to modify the <i>slurm.conf</i> file
+and restart the <i>slurmctld</i> daemon.
+Note that the <i>NodeAddr</i> and <i>NodeHostName</i> fields should not be
+configured, but will be set by SLURM using data from ALPS.
+<i>NodeAddr</i> be set to the node's XYZ coordinate and be used by SLURM's
+<i>smap</i> and <i>sview</i> commands.
+<i>NodeHostName</i> will be set to the node's component label.
+The format of the component label is "c#-#c#s#n#" where the "#" fields
+represent in order: cabinet, row, cate, blade or slot, and node.
+For example "c0-1c2s5n3" is cabinet 0, row 1, cage 3, slot 5 and node 3.</p>
+
+<p>The <i>slurmd</i> daemons will not execute on the compute nodes, but will
+execute on one or more front end nodes.
+It is from here that batch scripts will execute <i>aprun</i> commands to
+launch tasks.
 This is specified in the <i>slurm.conf</i> file by using the
-<i>NodeName</i> field to identify the compute nodes and both the
-<i>NodeAddr</i> and <i>NodeHostname</i> fields to identify the
-computer when <i>slurmd</i> runs (normally some sort of front-end node)
+<i>FrontendName</i> and optionally the <i>FrontEndAddr</i> fields
 as seen in the examples below.</p>
 
-<p>Next you need to select from two options for the resource selection
-plugin (the <i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration
-file):
-<ol>
-<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a
-one-dimensional space to allocate whole nodes, sockets, or cores to jobs
-based upon other configuration parameters.</li>
-<li><b>select/linear</b> - Performs a best-fit algorithm based upon a
-one-dimensional space to allocate whole nodes to jobs.</li>
-</ol>
-
-<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to
-allocate resources physically nearby in three-dimensional space, the
-nodes be specified in SLURM's <i>slurm.conf</i> configuration file in
-such a fashion that those nearby in <i>slurm.conf</i> (one-dimensional
-space) are also nearby in the physical three-dimensional space.
-If the definition of the nodes in SLURM's <i>slurm.conf</i> configuration
-file are listed on one line (e.g. <i>NodeName=tux[000x333]</i>),
-SLURM will automatically perform that conversion using a
-<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>.
-Otherwise you may construct your own node name ordering and list them
-one node per line in <i>slurm.conf</i>.
-Note that each node must be listed exactly once and consecutive
-nodes should be nearby in three-dimensional space.
-Also note that each node must be defined individually rather than using
-a hostlist expression in order to preserve the ordering (there is no
-problem using a hostlist expression in the partition specification after
-the nodes have already been defined).
-The open source code used by SLURM to generate the Hilbert curve is
-included in the distribution at <i>contribs/skilling.c</i> in the event
-that you wish to experiment with it to generate your own node ordering.
-Two examples of SLURM configuration files are shown below:</p>
-
-<pre>
-# slurm.conf for Cray XT system of size 4x4x4
-# Parameters removed here
-SelectType=select/linear
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
-NodeName=tux[000x333] NodeAddr=front_end NodeHostname=front_end
-PartitionName=debug Nodes=tux[000x333] Default=Yes State=UP
-</pre>
-
-<pre>
-# slurm.conf for Cray XT system of size 4x4x4
-# Parameters removed here
-SelectType=select/linear
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
-NodeName=tux000 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux100 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux110 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux010 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux011 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux111 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux101 NodeAddr=front_end NodeHostname=front_end
-NodeName=tux001 NodeAddr=front_end NodeHostname=front_end
-PartitionName=debug Nodes=tux[000x111] Default=Yes State=UP
-</pre>
-
-<p>In both of the examples above, the node names output by the
-<i>scontrol show nodes</i> will be ordered as defined (sequentially
-along the Hilbert curve or per the ordering in the <i>slurm.conf</i> file)
-rather than in numeric order (e.g. "tux001" follows "tux101" rather
-than "tux000").
-SLURM partitions should contain nodes which are defined sequentially
-by that ordering for optimal performance.</p>
+<p>Note that SLURM will by default kill running jobs when a node goes DOWN,
+while a DOWN node in ALPS only prevents new jobs from being scheduled on the
+node. To help avoid confusion, we recommend that <i>SlurmdTimeout</i> in the
+<i>slurm.conf</i> file be set to the same value as the <i>suspectend</i>
+parameter in ALPS' <i>nodehealth.conf</i> file.</p>
+
+<p>You need to specify the appropriate resource selection plugin (the
+<i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration file).
+Configure <i>SelectType</i> to <i>select/cray</i> The <i>select/cray</i> 
+plugin provides an interface to ALPS plus issues calls to the
+<i>select/linear</i>, which selects resources for jobs using a best-fit
+algorithm to allocate whole nodes to jobs (rather than individual sockets,
+cores or threads).</p>
+
+<p>Note that the system topology is based upon information gathered from
+the ALPS database and is based upon the ALPS_NIDORDER configuration in
+<i>/etc/sysconfig/alps</i>. Excerpts of a <i>slurm.conf</i> file for
+use on a Cray systems follow:</p>
+
+<pre>
+#---------------------------------------------------------------------
+# SLURM USER
+#---------------------------------------------------------------------
+# SLURM user on cray systems must be root
+# This requirement derives from Cray ALPS:
+# - ALPS reservations can only be created by the job owner or root
+#   (confirmation may be done by other non-privileged users)
+# - Freeing a reservation always requires root privileges
+SlurmUser=root
+
+#---------------------------------------------------------------------
+# PLUGINS
+#---------------------------------------------------------------------
+# Network topology (handled internally by ALPS)
+TopologyPlugin=topology/none
+
+# Scheduling
+SchedulerType=sched/backfill
+
+# Node selection: use the special-purpose "select/cray" plugin.
+# Internally this uses select/linar, i.e. nodes are always allocated
+# in units of nodes (other allocation is currently not possible, since
+# ALPS does not yet allow to run more than 1 executable on the same
+# node, see aprun(1), section LIMITATIONS).
+#
+# Add CR_memory as parameter to support --mem/--mem-per-cpu.
+SelectType=select/cray
+SelectTypeParameters=CR_Memory
+
+# Proctrack plugin: only/default option is proctrack/sgi_job
+# ALPS requires cluster-unique job container IDs and thus the /etc/init.d/job
+# service needs to be started on all slurmd and login nodes, as described in
+# S-2393, chapter 9. Due to this requirement, ProctrackType=proctrack/sgi_job
+# is the default on Cray and need not be specified explicitly.
+
+#---------------------------------------------------------------------
+# PATHS
+#---------------------------------------------------------------------
+SlurmdSpoolDir=/ufs/slurm/spool
+StateSaveLocation=/ufs/slurm/spool/state
+
+# main logfile
+SlurmctldLogFile=/ufs/slurm/log/slurmctld.log
+# slurmd logfiles (using %h for hostname)
+SlurmdLogFile=/ufs/slurm/log/%h.log
+
+# PIDs
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+
+#---------------------------------------------------------------------
+# COMPUTE NODES
+#---------------------------------------------------------------------
+# Return DOWN nodes to service when e.g. slurmd has been unresponsive
+ReturnToService=1
+
+# Configure the suspectend parameter in ALPS' nodehealth.conf file to the same
+# value as SlurmdTimeout for consistent behavior (e.g. "suspectend: 600")
+SlurmdTimeout=600
+
+# Controls how a node's configuration specifications in slurm.conf are
+# used.
+# 0 - use hardware configuration (must agree with slurm.conf)
+# 1 - use slurm.conf, nodes with fewer resources are marked DOWN
+# 2 - use slurm.conf, but do not mark nodes down as in (1)
+FastSchedule=2
+
+# Per-node configuration for PALU AMD G34 dual-socket "Magny Cours"
+# Compute Nodes. We deviate from slurm's idea of a physical socket
+# here, since the Magny Cours hosts two NUMA nodes each, which is
+# also visible in the ALPS inventory (4 Segments per node, each
+# containing 6 'Processors'/Cores).
+NodeName=DEFAULT Sockets=4 CoresPerSocket=6 ThreadsPerCore=1
+NodeName=DEFAULT RealMemory=32000 State=UNKNOWN
+
+# List the nodes of the compute partition below (service nodes are not
+# allowed to appear)
+NodeName=nid00[002-013,018-159,162-173,178-189]
+
+# Frontend nodes: these should not be available to user logins, but
+#                 have all filesystems mounted that are also 
+#                 available on a login node (/scratch, /home, ...).
+FrontendName=palu[7-9]
+
+#---------------------------------------------------------------------
+# ENFORCING LIMITS
+#---------------------------------------------------------------------
+# Enforce the use of associations: {associations, limits, wckeys}
+AccountingStorageEnforce=limits
+
+# Do not propagate any resource limits from the user's environment to
+# the slurmd
+PropagateResourceLimits=NONE
+
+#---------------------------------------------------------------------
+# Resource limits for memory allocation:
+# * the Def/Max 'PerCPU' and 'PerNode' variants are mutually exclusive;
+# * use the 'PerNode' variant for both default and maximum value, since
+#   - slurm will automatically adjust this value depending on
+#     --ntasks-per-node
+#   - if using a higher per-cpu value than possible, salloc will just
+#     block.
+#--------------------------------------------------------------------
+# XXX replace both values below with your values from 'xtprocadmin -A'
+DefMemPerNode=32000
+MaxMemPerNode=32000
+
+#---------------------------------------------------------------------
+# PARTITIONS
+#---------------------------------------------------------------------
+# defaults common to all partitions
+PartitionName=DEFAULT Nodes=nid00[002-013,018-159,162-173,178-189]
+PartitionName=DEFAULT MaxNodes=178
+PartitionName=DEFAULT Shared=EXCLUSIVE State=UP DefaultTime=60
+
+# "User Support" partition with a higher priority
+PartitionName=usup Hidden=YES Priority=10 MaxTime=720 AllowGroups=staff
+
+# normal partition available to all users
+PartitionName=day Default=YES Priority=1 MaxTime=01:00:00
+</pre>
+
+<p>SLURM supports an optional <i>cray.conf</i> file containing Cray-specific
+configuration parameters. <b>This file is NOT needed for production systems</b>,
+but is provided for advanced configurations. If used, <i>cray.conf</i> must be
+located in the same directory as the <i>slurm.conf</i> file. Configuration
+parameters supported by <i>cray.conf</i> are listed below.</p>
+
+<p><dl>
+<dt><b>apbasil</b></dt>
+<dd>Fully qualified pathname to the apbasil command.
+The default value is <i>/usr/bin/apbasil</i>.</dd>
+<dt><b>apkill</b></dt>
+<dd>Fully qualified pathname to the apkill command.
+The default value is <i>/usr/bin/apkill</i>.</dd>
+<dt><b>SDBdb</b></dt>
+<dd>Name of the ALPS database.
+The default value is <i>XTAdmin</i>.</dd>
+<dt><b>SDBhost</b></dt>
+<dd>Hostname of the database server.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'sdb'.</dd>
+<dt><b>SDBpass</b></dt>
+<dd>Password used to access the ALPS database.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'basic'.</dd>
+<dt><b>SDBport</b></dt>
+<dd>Port used to access the ALPS database.
+The default value is 0.</dd>
+<dt><b>SDBuser</b></dt>
+<dd>Name of user used to access the ALPS database.
+The default value is based upon the contents of the 'my.cnf' file used to
+store default database access information and that defaults to user 'basic'.</dd>
+</dl></p>
+
+<pre>
+# Example cray.conf file
+apbasil=/opt/alps_simulator_40_r6768/apbasil.sh
+SDBhost=localhost
+SDBuser=alps_user
+SDBdb=XT5istanbul
+</pre>
+
+<p>One additional configuration script can be used to insure that the slurmd
+daemons execute with the highest resource limits possible, overriding default
+limits on Suse systems. Depending upon what resource limits are propagated
+from the user's environment, lower limits may apply to user jobs, but this
+script will insure that higher limits are possible. Copy the file
+<i>contribs/cray/etc_sysconfig_slurm</i> into <i>/etc/sysconfig/slurm</i>
+for these limits to take effect. This script is executed from
+<i>/etc/init.d/slurm</i>, which is typically executed to start the SLURM
+daemons. An excerpt of <i>contribs/cray/etc_sysconfig_slurm</i>is shown
+below.</p>
+
+<pre>
+#
+# /etc/sysconfig/slurm for Cray XT/XE systems
+#
+# Cray is SuSe-based, which means that ulimits from
+# /etc/security/limits.conf will get picked up any time SLURM is
+# restarted e.g. via pdsh/ssh. Since SLURM respects configured limits,
+# this can mean that for instance batch jobs get killed as a result
+# of configuring CPU time limits. Set sane start limits here.
+#
+# Values were taken from pam-1.1.2 Debian package
+ulimit -t unlimited	# max amount of CPU time in seconds
+ulimit -d unlimited	# max size of a process's data segment in KB
+</pre>
+
+<p>SLURM's <i>init.d</i> script should also be installed to automatically
+start SLURM daemons when nodes boot as shown below. Be sure to edit the script
+as appropriate to reference the proper file location (modify the variable
+<i>PREFIX</i>).
+
+<pre>
+login: # scp /home/crayadm/ben/slurm/etc/init.d.slurm boot:/rr/current/software/
+</pre>
+
+<p>Now create the needed directories for logs and state files then start the
+daemons on the sdb and login nodes as shown below.</p>
+
+<pre>
+sdb: # mkdir -p /ufs/slurm/log
+sdb: # mkdir -p /ufs/slurm/spool
+sdb: # /etc/init.d/slurm start
+</pre>
+
+<pre>
+login: # /etc/init.d/slurm start
+</pre>
+
+<h3>Srun wrapper configuration</h3>
+
+<p>The <i>srun</i> wrapper to <i>aprun</i> might require modification to run
+as desired. Specifically the <i>$aprun</i> variable could be set to the
+absolute pathname of that executable file. Without that modification, the
+<i>aprun</i> command executed will depend upon the user's search path.</p>
+
+<p>In order to debug the <i>srun</i> wrapper, uncomment the line</p>
+<pre>
+print "comment=$command\n"
+</pre>
+<p>If the <i>srun</i> wrapper is executed from
+within an existing SLURM job allocation (i.e. within <i>salloc</i> or an
+<i>sbatch</i> script), then it just executes the <i>aprun</i> command with
+appropriate options. If executed without an allocation, the wrapper executes
+<i>salloc</i>, which then executes the <i>srun</i> wrapper again. This second
+execution of the <i>srun</i> wrapper is required in order to process environment
+variables that are set by the <i>salloc</i> command based upon the resource
+allocation.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 9 January 2009</p></td>
+<p style="text-align:center;">Last modified 1 August 2011</p></td>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/disclaimer.shtml b/doc/html/disclaimer.shtml
new file mode 100644
index 000000000..e4e9325d3
--- /dev/null
+++ b/doc/html/disclaimer.shtml
@@ -0,0 +1,90 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Legal Notices</h1>
+
+<p>SLURM is free software; you can redistribute it and/or modify it under
+the terms of the <a href="http://www.gnu.org/licenses/gpl.html">GNU General
+Public License</a> as published by the <a href="http://fsf/org/">Free
+Software Foundation</a>; either version 2 of the License, or (at your option)
+any later version.</p>
+
+<p>SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the <a href="//www.gnu.org/licenses/gpl.html">
+GNU General Public License</a> for more details.</p>
+
+<h1>NO WARRANTY</h1>
+<p>The following is an excerpt from the GNU General Public License.</p>
+
+<p>BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.</p>
+
+<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.</p>
+
+<h1>Copyrights</h1>
+
+<p>SLURM represents the collaborative efforts of roughly 100 people representing
+roughtly 40 different organizations world-wide. A current list of contributors
+can be found at the <a href="team.html">SLURM Team</a> web page.</p>
+
+<p>While many organizations contributed code and/or documentation without
+including a copyright notice, the following copyright notices are found in
+SLURM's code and documenation files:<br>
+Copyright (C) 2011 Trinity Centre for High Performance Computing<br>
+Copyright (C) 2010-2011 SchedMD LLC<br>
+Copyright (C) 2009 CEA/DAM/DIF<br>
+Copyright (C) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)<br>
+Copyright (C) 2008-2011 Lawrence Livermore National Security<br>
+Copyright (C) 2008 Vijay Ramasubramanian<br>
+Copyright (C) 2007-2008 Red Hat, Inc.<br>
+Copyright (C) 2007-2009 National University of Defense Technology, China<br>
+Copyright (C) 2007-2011 Bull<br>
+Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.<br>
+Copyright (C) 2004-2009, Marcus Holland-Moritz<br>
+Copyright (C) 2002-2007 The Regents of the University of California<br>
+Copyright (C) 2002-2003 Linux NetworX<br>
+Copyright (C) 2002 University of Chicago<br>
+Copyright (C) 2001 Paul Marquess<br>
+Copyright (C) 2000 Markus Friedl<br>
+Copyright (C) 1999 Kenneth Albanowski<br>
+Copyright (C) 1998 Todd C. Miller <Todd.Miller@courtesan.com><br>
+Copyright (C) 1996-2003 Maximum Entropy Data Consultants Ltd,<br>
+Copyright (C) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland<br>
+Copyright (C) 1989-1994, 1996-1999, 2001 Free Software Foundation, Inc.</p>
+
+<p>Much of the work was performed under the auspices of the U.S. Department of
+Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344.
+This work was sponsored by an agency of the United States government. 
+Neither the United States Government nor Lawrence Livermore National 
+Security, LLC, nor any of their employees, makes any warranty, express 
+or implied, or assumes any liability or responsibility for the accuracy, 
+completeness, or usefulness of any information, apparatus, product, or 
+process disclosed, or represents that its use would not infringe privately
+owned rights. References herein to any specific commercial products, process, 
+or services by trade names, trademark, manufacturer or otherwise does not
+necessarily constitute or imply its endorsement, recommendation, or
+favoring by the United States Government or the Lawrence Livermore National
+Security, LLC. The views and opinions of authors expressed herein do not 
+necessarily state or reflect those of the United States government or 
+Lawrence Livermore National Security, LLC, and shall not be used for 
+advertising or product endorsement purposes.</p>
+
+<p style="text-align: center;">Last modified 27 June 2010</p>
+
+<!--#include virtual="footer.txt"-->
+
diff --git a/doc/html/dist_plane.shtml b/doc/html/dist_plane.shtml
index 57747d5b5..7f0bfca64 100644
--- a/doc/html/dist_plane.shtml
+++ b/doc/html/dist_plane.shtml
@@ -17,7 +17,7 @@ task list is: 0, 1, 2, 3, 4, ..., 19, 20.
 <p>On <u>One (1)</u> node: <i>srun -N 1-1 -n 21 -m plane=4 <...></i>.
 
 <p>The distribution results in a plane distribution with plane_size 21.
-Even thought the user specified a plane_size of 4 the final plane
+Even though the user specified a plane_size of 4 the final plane
 distribution results in a plane_size of 21.
 
 <p>
@@ -114,7 +114,7 @@ task list is: 0, 1, 2, 3, 4, ..., 19, 20.
 
 <p>On <u>One (1)</u> node:
 <i>srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...></i>.
-Even thought the user specified a plane_size of 4 the final plane
+Even though the user specified a plane_size of 4 the final plane
 distribution results in a plane distribution with plane_size=8.
 
 <p>
diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml
index 22bdc38fb..efe406c9b 100644
--- a/doc/html/documentation.shtml
+++ b/doc/html/documentation.shtml
@@ -7,6 +7,8 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <h2>SLURM Users</h2>
 <ul>
 <li><a href="quickstart.html">Quick Start User Guide</a></li>
+<li><a href="man_index.html">Man Pages</a></li>
+<li><a href="cpu_management.html">CPU Management User and Administrator Guide</a></li>
 <li><a href="mpi_guide.html">MPI Use Guide</a></li>
 <li><a href="mc_support.html">Support for Multi-core/Multi-threaded Architectures</a></li>
 <li><a href="multi_cluster.html">Multi-Cluster Operation</a></li>
@@ -23,6 +25,7 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <h2>SLURM Administrators</h2>
 <ul>
 <li><a href="quickstart_admin.html">Quick Start Administrator Guide</a></li>
+<li><a href="cpu_management.html">CPU Management User and Administrator Guide</a></li>
 <li><a href="configurator.html">Configuration Tool</a></li>
 <li><a href="troubleshoot.html">Troubleshooting Guide</a></li>
 <li><a href="big_sys.html">Large Cluster Administration Guide</a></li>
@@ -32,7 +35,7 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <ul>
 <li><a href="cons_res.html">Consumable Resources Guide</a></li>
 <li><a href="gang_scheduling.html">Gang Scheduling</a></li>
-<li><a href="gres.html">Generic Resource (Gres) Scheduling</a></li>
+<li><a href="gres.html">Generic Resource (GRES) Scheduling</a></li>
 <li><a href="high_throughput.html">High Throughput Computing Guide</a></li>
 <li><a href="priority_multifactor.html">Multifactor Job Priority</a></li>
 <li><a href="preempt.html">Preemption</a></li>
@@ -61,6 +64,12 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 <ul>
 <li><a href="programmer_guide.html">Programmer Guide</a></li>
 <li><a href="api.html">Application Programmer Interface (API) Guide</a></li>
+<li>Design Information</li>
+<ul>
+<li><a href="gres_design.html">Generic Resource (GRES) Design Guide</a></li>
+<li><a href="job_launch.html">Job Launch Design Guide</a></li>
+<li><a href="select_design.html">Select Plugin Design Guide</a></li>
+</ul>
 <li><a href="plugins.html">Plugin Programmer Guide</a></li>
 <li>Plugin Interface Details</li>
 <ul>
@@ -84,6 +93,6 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 </li>
 </ul>
 
-<p style="text-align:center;">Last modified 27 August 2010</p>
+<p style="text-align:center;">Last modified 31 May 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index c46ac4bae..5b06b50d3 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -1,15 +1,28 @@
 <!--#include virtual="header.txt"-->
 
 <h1>Download</h1>
-<p>
-SLURM source can be downloaded from
-<a href="http://sourceforge.net/projects/slurm/">
-http://sourceforge.net/projects/slurm/</a><br>
+<p>SLURM source can be downloaded from
+<a href="http://www.schedmd.com/#repos">
+http://www.schedmd.com/#repos</a><br>
 SLURM has also been packaged for
 <a href="http://packages.debian.org/src:slurm-llnl">Debian</a> and
 <a href="http://packages.ubuntu.com/src:slurm-llnl">Ubuntu</a>
 (both named <i>slurm-llnl</i>).</p>
 
+<p>A <a href="http://www.bsc.es/plantillaA.php?cat_id=705">SLURM simulator</a>
+is available to assess various scheduling policies.
+Under simulation jobs are not actually executed. Instead a job execution trace
+from a real system or a synthetic trace are used.</p>
+
+<!--
+SLURM interface to PHP
+https://github.com/jcftang/slurm/commits/php-slurm
+http://thammuz.tchpc.tcd.ie/mirrors/php-slurm/1.0/
+Development by Peter Vermeulen with help from staff of
+Trinity Centre for High Performance Computing,
+Trinity College Dublin, Ireland.
+-->
+
 <p>Related software available from various sources include:
 <ul>
 
@@ -40,7 +53,7 @@ for the application to manage Kerberos V credentials.</li>
 See our <a href="accounting.html">Accounting</a> web page for more information.</li>
 <ul>
 <li><a href="http://www.mysql.com/">MySQL</a> (recommended)</li>
-<li><a href="http://www.postgresql.org/">PostgreSQL</a></li>
+<li><a href="http://www.postgresql.org/">PostgreSQL</a> (Not fully functional)</li>
 </ul><br>
 
 <li><b>Debuggers</b> and debugging tools</li>
@@ -68,7 +81,7 @@ Download it from <a href="http://www.openssl.org/">http://www.openssl.org/</a>.<
 is an implementation of <a href="http://www.gridforum.org/">Open Grid Forum</a>
 <a href="http://www.drmaa.org/">DRMAA 1.0</a> (Distributed Resource Management Application API)
 <a href="http://www.ogf.org/documents/GFD.133.pdf">specification</a> for submission
-and control of jobs to <href="https://computing.llnl.gov/linux/slurm/">SLURM</a>.
+and control of jobs to <href="http://www.schedmd.com/slurmdocs/">SLURM</a>.
 Using DRMAA, grid applications builders, portal developers and ISVs can use 
 the same high-level API to link their software with different cluster/resource
 management systems.</li><br>
@@ -88,8 +101,8 @@ the <b>qsnetlibs</b> development libraries from
 plugin also requires the <b>libelanhosts</b> library and
 a corresponding /etc/elanhosts configuration file, used to map
 hostnames to Elan IDs. The libelanhosts source is available from
-<a href="https://sourceforge.net/projects/slurm/">
-https://sourceforge.net/projects/slurm/</a>.
+<a href="http://www.schedmd.com/download/extras/libelanhosts-0.9-1.tgz">
+http://www.schedmd.com/download/extras/libelanhosts-0.9-1.tgz</a>.
 </ul><br>
 
 <li><b>I/O Watchdog</b><br>
@@ -118,15 +131,15 @@ http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2</a></li><br>
 <li><a href="http://www.quadrics.com/">Quadrics MPI</a></li>
 </ul><br>
 
-<li><b>PAM Modules (pam_slurm)</b><br>
+<li><b>PAM Module (pam_slurm)</b><br>
 Pluggable Authentication Module (PAM) for restricting access to compute nodes
 where SLURM performs resource management. Access to the node is restricted to
 user root and users who have been allocated resources on that node.
 NOTE: pam_slurm is included within the SLURM distribution for version 2.1
 or higher.
 For earlier SLURM versions, pam_slurm is available for download from<br>
-<a href="https://sourceforge.net/projects/slurm/">
-https://sourceforge.net/projects/slurm/</a><br>
+<a href="http://www.schedmd.com/download/extras/pam_slurm-1.6.tar.bz2">
+http://www.schedmd.com/download/extras/pam_slurm-1.6.tar.bz2</a><br>
 SLURM's PAM module has also been packaged for
 <a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and
 <a href="http://packages.ubuntu.com/src:libpam-slurm">Ubuntu</a>
@@ -176,20 +189,6 @@ The current source for the plugins can be checked out of the subversion
 repository with the following command:<br>
 <i>svn checkout http://slurm-spank-plugins.googlecode.com/svn/trunk/ slurm-plugins</i></li><br>
 
-<li><b>PAM Module (pam_slurm)</b><br>
-Pluggable Authentication Module (PAM) for restricting access to compute nodes
-where SLURM performs resource management. Access to the node is restricted to
-user root and users who have been allocated resources on that node.
-NOTE: pam_slurm is included within the SLURM distribution for version 2.1
-or higher.
-For earlier SLURM versions, pam_slurm is available for download from<br>
-<a href="https://sourceforge.net/projects/slurm/">
-https://sourceforge.net/projects/slurm/</a><br>
-SLURM's PAM module has also been packaged for
-<a href="http://packages.debian.org/src:libpam-slurm">Debian</a> and
-<a href="http://packages.ubuntu.com/src:libpam-slurm">Ubuntu</a>
-(both named <i>libpam-slurm</i>).</li><br>
-
 <li><b>Sqlog</b><br>
 A set of scripts that leverages SLURM's job completion logging facility
 in provide information about what jobs were running at any point in the
@@ -204,6 +203,6 @@ Portable Linux Processor Affinity (PLPA)</a></li>
 
 </ul>
 
-<p style="text-align:center;">Last modified 20 December 2010</p>
+<p style="text-align:center;">Last modified 24 May 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 1aac9d08e..03ad05bd3 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -44,6 +44,8 @@
 the <i>slurm-dev</i> mailing list?</a></li>
 <li><a href="#job_size">Can I change my job's size after it has started
 running?</a></li>
+<li><a href="#mpi_symbols">Why is my MPIHCH2 or MVAPICH2 job not running with
+SLURM? Why does the DAKOTA program not run with SLURM?</a></li>
 </ol>
 
 <h2>For Administrators</h2>
@@ -86,7 +88,7 @@ running?</a></li>
   core files?</a></li>
 <li><a href="#limit_propagation">Is resource limit propagation
   useful on a homogeneous cluster?</a></li>
-<li<a href="#clock">Do I need to maintain synchronized clocks
+<li><a href="#clock">Do I need to maintain synchronized clocks
   on the cluster?</a></li>
 <li><a href="#cred_invalid">Why are &quot;Invalid job credential&quot; errors
   generated?</a></li>
@@ -300,9 +302,9 @@ You can use the scontrol show command to check if these conditions apply.</p>
 <li>Job: ExcNodeList=NULL</li>
 <li>Job: Contiguous=0</li>
 <li>Job: Features=NULL</li>
-<li>Job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in
+<li>Job: MinCPUs, MinMemory, and MinTmpDisk satisfied by all nodes in
 the partition</li>
-<li>Job: MinProcs or MinNodes not to exceed partition's MaxNodes</li>
+<li>Job: MinCPUs or MinNodes not to exceed partition's MaxNodes</li>
 </ul>
 <p>If the partitions specifications differ from those listed above,
 no jobs in that partition will be scheduled by the backfills scheduler.
@@ -646,9 +648,22 @@ http://groups.google.com/group/slurm-devel</a></p>
 
 <p><a name="job_size"><b>24. Can I change my job's size after it has started
 running?</b></a><br>
-Beginning with Slurm version 2.2 it is possible to decrease a job's size after
-it has started, but it is not currently possible to increase a job's size.
-Use the <i>scontrol</i> command to change a job's size either by specifying
+Support to decrease the size of a running job was added to SLURM version 2.2.
+The ability to increase the size of a running job was added to SLURM version 2.3.
+While the size of a pending job may be changed with few restrictions, several
+significant restrictions apply to changing the size of a running job as noted
+below:
+<ol>
+<li>Support is not available on BlueGene or Cray system due to limitations
+in the software underlying SLURM.</li>
+<li>Job(s) changing size must not be in a suspended state, including jobs
+suspended for gang scheduling. The jobs must be in a state of pending or
+running. We plan to modify the gang scheduling logic in the future to
+concurrently schedule a job to be used for expanding another job and the
+job to be expanded.</li>
+</ol></p>
+
+<p>Use the <i>scontrol</i> command to change a job's size either by specifying
 a new node count (<i>NumNodes=</i>) for the job or identify the specific nodes
 (<i>NodeList=</i>) that you want the job to retain. 
 Any job steps running on the nodes which are reliquished by the job will be
@@ -658,11 +673,13 @@ containing information about the job's environment will no longer be valid and
 should either be removed or altered (e.g. SLURM_NNODES, SLURM_NODELIST and
 SLURM_NPROCS).
 The <i>scontrol</i> command will generate a script that can be executed to
-reset local environment variables
+reset local environment variables.
 You must retain the SLURM_JOBID environment variable in order for the
 <i>srun</i> command to gather information about the job's current state and
-specify the desired node and/or task count in subsequent <i>srun</i> invocations. 
-An example is shown below.
+specify the desired node and/or task count in subsequent <i>srun</i> invocations.
+A new accounting record is generated when a job is resized showing the to have
+been resubmitted and restarted at the new size.
+An example is shown below.</p>
 <pre>
 #!/bin/bash
 srun my_big_job
@@ -672,6 +689,114 @@ srun -N2 my_small_job
 rm slurm_job_${SLURM_JOBID}_resize.*
 </pre>
 
+<p><b>Increasing a job's size</b><br>
+Directly increasing the size of a running job would adversely effect the
+scheduling of pending jobs.
+For the sake of fairness in job scheduling, expanding a running job requires
+the user to submit a new job, but specify the option
+<i>--dependency=expand:&lt;jobid&gt;</i>.
+This option tells SLURM that the job, when scheduled, can be used to expand
+the specified jobid.
+Other job options would be used to identify the required resources
+(e.g. task count, node count, node features, etc.).
+This new job's time limit will be automatically set to reflect the end time of
+the job being expanded.
+This new job's generic resources specification will be automatically set
+equal to that of the job being merged to. This is due to the current SLURM
+restriction of all nodes associated with a job needing to have the same
+generic resource specification (i.e. a job can not have one GPU on one
+node and two GPUs on another node), although this restriction may be removed
+in the future. This restriction can pose some problems when both jobs can be
+allocated resources on the same node, in which case the generic resources
+allocated to the new job will be released. If the jobs are allocated resources
+on different nodes, the generic resources associated with the resulting job
+allocation after the merge will be consistent as expected.
+Any licenses associated with the new job will be added to those available in
+the job being merged to.
+Note that partition and Quality Of Service (QOS) limits will be applied
+independently to the new job allocation so the expanded job may exceed size
+limits configured for an individual job.</p>
+
+<p>After the new job is allocated resources, merge that job's allocation
+into that of the original job by executing:<br>
+<i>scontrol update jobid=&lt;jobid&gt; NumNodes=0</i><br>
+The <i>jobid</i> above is that of the job to relinquish it's resources.
+To provides more control over when the job expansion occurs, the resources are
+not merged into the original job until explicitly requested.
+These resources will be transfered to the original job and the scontrol
+command will generate a script to reset variables in the second
+job's environment to reflect it's modified resource allocation (which would
+be no resources).
+One would normally exit this second job at this point, since it has no
+associated resources.
+In order to generate a script to modify the environment variables for the
+expanded job, execute:<br>
+<i>scontrol update jobid=&lt;jobid&gt; NumNodes=ALL</i><br>
+Then execute the script generated.
+Note that this command does not change the original job's size, but only
+generates the script to change its environment variables.
+Until the environment variables are modified (e.g. the job's node count,
+CPU count, hostlist, etc.), any srun command will only consider the resources
+in the original resource allocation.
+Note that the original job may have active job steps at the time of it's
+expansion, but they will not be effected by the change.
+An example of the proceedure is shown below in which the original job
+allocation waits until the second resource allocation request can be
+satisfied. The job requesting additional resources could also use the sbatch
+command and permit the original job to continue execution at its initial size.
+Note that the development of additional user tools to manage SLURM resource
+allocations is planned in the future to make this process both simpler and
+more flexible.</p>
+
+<pre>
+$ salloc -N4 bash
+salloc: Granted job allocation 65542
+$ srun hostname
+icrm1
+icrm2
+icrm3
+icrm4
+
+$ salloc -N4 --dependency=expand:$SLURM_JOBID bash
+salloc: Granted job allocation 65543
+$ scontrol update jobid=$SLURM_JOBID NumNodes=0
+To reset SLURM environment variables, execute
+  For bash or sh shells:  . ./slurm_job_65543_resize.sh
+  For csh shells:         source ./slurm_job_65543_resize.csh
+$ exit
+exit
+salloc: Relinquishing job allocation 65543
+
+$ scontrol update jobid=$SLURM_JOBID NumNodes=ALL
+To reset SLURM environment variables, execute
+  For bash or sh shells:  . ./slurm_job_65542_resize.sh
+  For csh shells:         source ./slurm_job_65542_resize.csh
+$ . ./slurm_job_$SLURM_JOBID_resize.sh
+
+$ srun hostname
+icrm1
+icrm2
+icrm3
+icrm4
+icrm5
+icrm6
+icrm7
+icrm8
+$ exit
+exit
+salloc: Relinquishing job allocation 65542
+</pre>
+
+<p><a name="mpi_symbols"><b>25. Why is my MPIHCH2 or MVAPICH2 job not running with
+SLURM? Why does the DAKOTA program not run with SLURM?</b></a><br>
+The SLURM library used to support MPIHCH2 or MVAPICH2 references a variety of
+symbols. If those symbols resolve to functions or variables in your program
+rather than the appropriate library, the application will fail. In the case of
+<a href="http://dakota.sandia.gov">DAKOTA</a>, it contains a function named
+<b>regcomp</b>, which will get used rather than the POSIX regex functions.
+Rename DAKOTA's function and references from regcomp to something else to make
+it work properly.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
 
@@ -953,7 +1078,7 @@ $ squeue
 resources than physically exist on the node?</b></a><br>
 Yes in SLURM version 1.2 or higher.
 In the <i>slurm.conf</i> file, set <i>FastSchedule=2</i> and specify
-any desired node resource specifications (<i>Procs</i>, <i>Sockets</i>,
+any desired node resource specifications (<i>CPUs</i>, <i>Sockets</i>,
 <i>CoresPerSocket</i>, <i>ThreadsPerCore</i>, and/or <i>TmpDisk</i>).
 SLURM will use the resource specification for each node that is
 given in <i>slurm.conf</i> and will not check these specifications
@@ -1269,7 +1394,7 @@ address instead of the correct address and make it so the
 communication doesn't work.  Solution is to either remove this line or
 set a different nodeaddr that is known by your other nodes.</p>
 
-<p><a name="stop_sched"><b>38. How can I stop SLURM from scheduling jobs?</b></a></br>
+<p><a name="stop_sched"><b>39. How can I stop SLURM from scheduling jobs?</b></a></br>
 You can stop SLURM from scheduling jobs on a per partition basis by setting
 that partition's state to DOWN. Set its state UP to resume scheduling.
 For example:
@@ -1278,7 +1403,7 @@ $ scontrol update PartitionName=foo State=DOWN
 $ scontrol update PartitionName=bar State=UP
 </pre></p>
 
-<p><a name="scontrol_multi_jobs"><b>39. Can I update multiple jobs with a
+<p><a name="scontrol_multi_jobs"><b>40. Can I update multiple jobs with a
 single <i>scontrol</i> command?</b></a></br>
 No, but you can probably use <i>squeue</i> to build the script taking
 advantage of its filtering and formatting options. For example:
@@ -1286,7 +1411,7 @@ advantage of its filtering and formatting options. For example:
 $ squeue -tpd -h -o "scontrol update jobid=%i priority=1000" >my.script
 </pre></p>
 
-<p><a name="amazon_ec2"><b>40. Can SLURM be used to run jobs on 
+<p><a name="amazon_ec2"><b>41. Can SLURM be used to run jobs on 
 Amazon's EC2?</b></a></br>
 <p>Yes, here is a description of use SLURM use with 
 <a href="http://aws.amazon.com/ec2/">Amazon's EC2</a> courtesy of 
@@ -1310,7 +1435,7 @@ which I then copy over the /usr/local on the first instance and NFS export to
 all other instances.  This way I have persistent home directories and a very
 simple first-login script that configures the virtual cluster for me.</p>
 
-<p><a name="core_dump"><b>41. If a SLURM daemon core dumps, where can I find the
+<p><a name="core_dump"><b>42. If a SLURM daemon core dumps, where can I find the
 core file?</b></a></br>
 <p>For <i>slurmctld</i> the core file will be in the same directory as its
 log files (<i>SlurmctldLogFile</i>) iif configured using an fully qualified
@@ -1326,7 +1451,7 @@ Otherwise it will be found in directory used for saving state
 occurs. It will either be in spawned job's working directory on the same 
 location as that described above for the <i>slurmd</i> daemon.</p>
 
-<p><a name="totalview"><b>42. How can TotalView be configured to operate with
+<p><a name="totalview"><b>43. How can TotalView be configured to operate with
 SLURM?</b></a></br>
 <p>The following lines should also be added to the global <i>.tvdrc</i> file
 for TotalView to operate with SLURM:
@@ -1344,6 +1469,6 @@ dset TV::parallel_configs {
 </pre></p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 6 January 2011</p>
+<p style="text-align:center;">Last modified 4 September 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/footer.txt b/doc/html/footer.txt
index bee6ae12e..35ec10de6 100644
--- a/doc/html/footer.txt
+++ b/doc/html/footer.txt
@@ -1,32 +1,15 @@
 </div> <!-- closes "content" -->
 
 <div id="footer">
-<div id="left">&nbsp;&nbsp;<span class="ucrlnum">LLNL-WEB-411573 |</span> <a href="https://www.llnl.gov/disclaimer.html" target="_blank" class="privacy">Privacy &amp; Legal Notice</a></div>
-<div id="right"><span class="ucrlnum">18 July 2008&nbsp;&nbsp;</span></div>
+<div id="left">&nbsp;&nbsp;<a href="disclaimer.html" target="_blank" class="privacy">Legal Notices</a></div>
+<div id="right"><span class="ucrlnum">27 June 2011&nbsp;&nbsp;</span></div>
 </div>
 
 <div id="footer2">
-<div id="left2"><img src="sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
-<div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
-<span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
-<div id="right2"><span class="smalltextblue">Operated by
-Lawrence Livermore National Security, LLC, for the</span>
-<a href="http://www.energy.gov/" target="_blank" class="footer">
-Department of Energy's</a><br />
-<a href="http://www.nnsa.doe.gov/" target="_blank" class="footer">
-National Nuclear Security Administration</a></div>
 <div style="clear:both;"></div>
 </div>
 
 </div> <!-- closes "container" -->
 
-<map name="Map">
-<area shape="rect" coords="571,1,799,15" href="https://www.llnl.gov/">
-</map>
-<map name="Map2">
-<area shape="rect" coords="1,1,92,30" href="http://www.nnsa.doe.gov/" target="_blank" alt="NNSA logo links to the NNSA Web site">
-<area shape="rect" coords="98,1,132,30" href="http://www.energy.gov/" target="_blank" alt="Department of Energy logo links to the DOE Web site">
-</map>
-
 </body>
 </html>
diff --git a/doc/html/gang_scheduling.shtml b/doc/html/gang_scheduling.shtml
index 8a7431454..b52a9590b 100644
--- a/doc/html/gang_scheduling.shtml
+++ b/doc/html/gang_scheduling.shtml
@@ -98,6 +98,9 @@ how many jobs can share a resource (FORCE[:max_share]). By default the
 max_share value is 4. To allow up to 6 jobs from this partition to be
 allocated to a common resource, set <I>Shared=FORCE:6</I>. To only let 2 jobs
 timeslice on the same resources, set <I>Shared=FORCE:2</I>.
+NOTE: Gang scheduling is performed independently for each partition, so
+configuring partitions with overlapping nodes and gang scheduling is generally
+not recommended.
 </LI>
 </UL>
 <P>
@@ -522,6 +525,6 @@ For now this idea could be experimented with by disabling memory support in
 the selector and submitting appropriately sized jobs.
 </P>
 
-<p style="text-align:center;">Last modified 11 August 2009</p>
+<p style="text-align:center;">Last modified 24 June 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/gres.shtml b/doc/html/gres.shtml
index b2dc9da88..40555e432 100644
--- a/doc/html/gres.shtml
+++ b/doc/html/gres.shtml
@@ -1,6 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1>Generic Resource (Gres) Scheduling</h1>
+<h1>Generic Resource (GRES) Scheduling</h1>
 
 <P>Beginning in SLURM version 2.2 generic resource (Gres) scheduling is
 supported through a flexible plugin mechanism. Support is initially provided
@@ -100,11 +100,14 @@ for use by other jobs.</P>
 
 <P>Job steps can be allocated generic resources from those allocated to the
 job using the <I>--gres</I> option with the <I>srun</I> command as described
-above. By default a job step will have access to all generic resources
-allocated to the job, but lower values may be specified if running more than
-one job step concurrently. The job step will be allocated specific generic
-resources and those resources will not be available to other job steps. A
-simple example is shown below.</P>
+above. By default, a job step will be allocated none of the generic resources
+allocated to the job, but must explicitly request desired generic resources.
+This design choice was based upon a scenario where each job executes many
+job steps. If job steps were granted access to all generic resources by
+default, some job steps would need to explicitly specify zero generic resource
+counts, which we considered more confusing. The job step can be allocated
+specific generic resources and those resources will not be available to other
+job steps. A simple example is shown below.</P>
 
 <PRE>
 #!/bin/bash
@@ -123,8 +126,13 @@ wait
 <h2>GPU Management</h2>
 
 <P>In the case of SLURM's GRES plugin for GPUs, the environment variable
-CUDA_VISIBLE_DEVICES is set for each job steps to determine which GPUs are
-available for its use. CUDA version 3.1 (or higher) uses this environment
+CUDA_VISIBLE_DEVICES is set for each job step to determine which GPUs are
+available for its use on each node. This environment variable is only set
+when tasks are launched on a specific compute node (no global environment
+variable is set for the <i>salloc</i> command and the environment variable set
+for the <i>sbatch</i> command only reflects the GPUs allocated to that job
+on that node, node zero of the allocation).
+CUDA version 3.1 (or higher) uses this environment
 variable in order to run multiple jobs or job steps on a node with GPUs
 and insure that the resources assigned to each are unique. In the example
 above, the allocated node may have four or more graphics devices. In that
@@ -140,13 +148,7 @@ JobStep=1234.2 CUDA_VISIBLE_DEVICES=3
 <P>NOTE: Be sure to specify the <I>File</I> parameters in the <I>gres.conf</I>
 file and insure they are in the increasing numeric order.</P>
 <!-------------------------------------------------------------------------->
-<h2>Future</h2>
 
-<P>Our plans for the near future call for integrating SLURM GRES support with
-Linux <I>cgroups</I> in order to remove access to devices not allocated to
-a job or job step.</P>
-<!-------------------------------------------------------------------------->
-
-<p style="text-align: center;">Last modified 16 September 2010</p>
+<p style="text-align: center;">Last modified 1 August 2011</p>
 
 </body></html>
diff --git a/doc/html/gres_design.shtml b/doc/html/gres_design.shtml
new file mode 100644
index 000000000..51379d9c1
--- /dev/null
+++ b/doc/html/gres_design.shtml
@@ -0,0 +1,126 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">Generic Resource (GRES) Design Guide</a></h1>
+
+<h2>Overview</h2>
+
+<p>Generic Resources (GRES) are resources associated with a specific node
+that can be allocated to jobs and steps. The most obvious example of
+GRES use would be GPUs. GRES are identified by a specific name and use an
+optional plugin to provide device-specific support. This document is meant
+to provide details about SLURM's implementation of GRES support including the
+relevant data structures. For an overview of GRES configuration and use, see
+<a href="gres.html">Generic Resource (GRES) Scheduling</a>. For details about
+the APIs provided by GRES plugins, see <a href="gres_plugins.html">
+SLURM Generic Resource (GRES) Plugin API</a>.</p>
+
+<h2>Data Structures</h2>
+
+<p>GRES are associated with SLURM nodes, jobs and job steps. You will find
+a string variable named <b>gres</b> in those data structures which
+is used to store the GRES configured on a node or required by a job or step
+(e.g. "gpu:2,nic:1"). This string is also visible to various SLURM commands
+viewing information about those data structures (e.g. "scontrol show job").
+There is a second variable associated with each of those data structures on
+the <b>slurmctld</b> daemon
+named <b>gres_list</b> that is intended for program use only. Each element
+in the list <b>gres_list</b> provides information about a specific GRES type
+(e.g. one data structure for "gpu" and a second structure with information
+about "nic"). The structures on <b>gres_list</b> contain an ID number
+(which is faster to compare than a string) plus a pointer to another structure.
+This second structure differs somewhat for nodes, jobs, and steps (see
+<b>gres_node_state_t</b>, <b>gres_job_state_t</b>, and <b>gres_step_state_t</b> in
+<b>src/common/gres.h</b> for details), but contains various counters and bitmaps.
+Since these data structures differ for various entity types, the functions
+used to work with them are also different. If no GRES are associated with a
+node, job or step, then both <b>gres</b> and <b>gres_list</b> will be NULL.</p>
+
+<pre>
+------------------------
+|   Job Information    |
+|----------------------|
+| gres = "gpu:2,nic:1" |
+|      gres_list       |
+------------------------
+           |
+           +---------------------------------
+           |                                |
+   ------------------               ------------------
+   | List Struct    |               | List Struct    |
+   |----------------|               |----------------|
+   | id = 123 (gpu) |               | id = 124 (nic) |
+   |   gres_data    |               |   gres_data    |
+   ------------------               ------------------
+           |                                |
+           |                              ....
+           |
+           |
+------------------------------------------------
+| gres_job_state_t                             |
+|----------------------------------------------|
+| gres_count = 2                               |
+| node_count = 3                               |
+| gres_bitmap(by node) = 0,1;                  |
+|                        2,3;                  |
+|                        0,2                   |
+| gres_count_allocated_to_steps(by node) = 1;  |
+|                                          1;  |
+|                                          1   |
+| gres_bitmap_allocated_to_steps(by node) = 0; |
+|                                           2; |
+|                                           0  |
+------------------------------------------------
+</pre>
+
+<h2>Mode of Operation</h2>
+
+<p>After the slurmd daemon reads the configuration files, it calls the function
+<b>node_config_load()</b> for each configured plugin. This can be used to
+validate the configuration, for example validate that the appropriate devices
+actually exist. If no GRES plugin exists for that resource type, the information
+in the configuration file is assumed correct. Each node's GRES information is
+reported by slurmd to the slurmctld daemon at node registration time.</p>
+
+<p>The slurmctld daemon maintains GRES information in the data structures
+described above for each node, including the number of configured and allocated
+resources. If those resources are identified with a specific device file
+rather than just a count, bitmaps are used record which specific resources have
+been allocated to jobs.</p>
+
+<p>The slurmctld daemon's GRES information about jobs includes several arrays
+equal in length to the number of allocated nodes. The index into each of the
+arrays is the sequence number of the node in that's job's allocation (e.g.
+the first element is node zero of the <b>job</b> allocation). The job step's
+GRES information is similar to that of a job including the design where the
+index into arrays is based upon the job's allocation. This means when a job
+step is allocated or terminates, the required bitmap operations are very
+easy to perform without computing different index values for job and step
+data structures.</p>
+
+<p>The most complex operation on the GRES data structures happens when a job
+changes size (has nodes added or removed). In that case, the array indexed by
+node index must be rebuilt, with records shifting as appropriate. Note that
+the current software is not compatible with having different GRES counts by
+node (a job can not have 2 GPUs on one node and 1 GPU on a second node),
+although that might be addressed at a later time.</p>
+
+<p>When a job or step is initiated, it's credential includes allocated GRES information.
+This can be used by the slurmd daemon to associate those resources with that
+job. Our plan is to use the Linux cgroups logic to bind a job and/or its
+tasks with specific GRES devices, however that logic does not currently exist.
+What does exist today is a pair of plugin APIs, <b>job_set_env()</b> and
+<b>step_set_env()</b> which can be used to set environment variables for the
+program directing it to GRES which have been allocated for its use (the CUDA
+libraries base their GPU selection upon environment variables, so this logic
+should work for CUDA today if users do not attempt to manipulate the
+environment variables reserved for CUDA use).</p>
+
+<p>If you want to see how GRES logic is allocating resources, configure
+<b>DebugFlags=GRES</b> to log GRES state changes. Note the resulting output can
+be quite verbose, especially for larger clusters.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 18 May 2011</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/header.txt b/doc/html/header.txt
index 64cd41e80..98d497347 100644
--- a/doc/html/header.txt
+++ b/doc/html/header.txt
@@ -8,18 +8,7 @@
 <meta http-equiv="Pragma" content="no-cache">
 <meta http-equiv="keywords" content="Simple Linux Utility for Resource Management, SLURM, resource management,
 Linux clusters, high-performance computing, Livermore Computing">
-<meta name="LLNLRandR" content="LLNL-WEB-411573">
-<meta name="LLNLRandRdate" content="26 March 2009">
-<meta name="distribution" content="global">
 <meta name="description" content="Simple Linux Utility for Resource Management">
-<meta name="copyright"
-content="This document is copyrighted U.S.
-Department of Energy under Contract DE-AC52-07NA27344">
-<meta name="Author" content="Morris Jette">
-<meta name="email" content="jette1@llnl.gov">
-<meta name="Classification"
-content="DOE:DOE Web sites via organizational
-structure:Laboratories and Other Field Facilities">
 <title>Simple Linux Utility for Resource Management</title>
 <link href="linuxstyles.css" rel="stylesheet" type="text/css">
 <link href="slurmstyles.css" rel="stylesheet" type="text/css">
@@ -38,6 +27,7 @@ structure:Laboratories and Other Field Facilities">
 <h2>About</h2>
 <ul>
 	  <li><a href="overview.shtml" class="nav">Overview</a></li>
+	  <li><a href="meetings.shtml" class="nav">Meetings</a></li>
 	  <li><a href="news.shtml" class="nav">What's New</a></li>
 	  <li><a href="publications.shtml" class="nav">Publications</a></li>
 	  <li><a href="testimonials.shtml" class="nav">Testimonials</a></li>
diff --git a/doc/html/help.shtml b/doc/html/help.shtml
index 7b1cbfecd..431174fb1 100644
--- a/doc/html/help.shtml
+++ b/doc/html/help.shtml
@@ -17,12 +17,9 @@ their support staff.</li>
 </ol>
 
 <h1>Commercial Support</h1>
-<p>The following companies are known to provide commercial support for SLURM:</p>
-<ul>
-<li><a href="http://www.sched-md.com">SchedMD</a></li>
-<li><a href="http://www.hp.com">HP</a></li>
-<li><a href="http://www.bull.com">Bull</a></li>
-</ul>
-<p style="text-align:center;">Last modified 10 May 2010</p>
+<p>Several companies provide commercial support for SLURM including
+<a href="http://www.schedmd.com">SchedMD</a>.</p>
+
+<p style="text-align:center;">Last modified 30 June 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/job_exit_code.shtml b/doc/html/job_exit_code.shtml
index 6e5028e64..5f7ed3d26 100644
--- a/doc/html/job_exit_code.shtml
+++ b/doc/html/job_exit_code.shtml
@@ -49,7 +49,7 @@ ExitCode field whose format mirrors the output of <b>scontrol</b> and
 <b>sview</b> described above.</p>
 
 
-<h1>Derived Exit Code and String</h1>
+<h1>Derived Exit Code and Comment String</h1>
 
 <p>After reading the above description of a job's exit code, one can
 imagine a scenario where a central task of a batch job fails but the
@@ -65,14 +65,16 @@ and sent to the database when the accounting_storage plugin is
 enabled.</p>
 
 <p>In addition to the derived exit code, the job record in the SLURM
-db contains an additional field known as the "derived exit string".
-This is initialized to NULL and can only be changed by the user.  A
-new option has been added to the <b>sacctmgr</b> command to provide
-the user the means to modify these two new fields of the job record.
-No other modification to the job record is allowed.  For those who
-prefer a simpler command specifically designed to view and modify the
-derived exit code/string, the <b>sjobexitmod</b> wrapper has been
-created (see below).</p>
+database contains a comment string.  This is initialized to the job's
+comment string (when AccountingStoreJobComment parameter in the
+slurm.conf is set) and can only be changed by the user.</p>
+
+<p>A new option has been added to the <b>sacctmgr</b> command to
+provide the user the means to modify these two fields of the job
+record.  No other modification to the job record is allowed.  For
+those who prefer a simpler command specifically designed to view and
+modify the derived exit code and comment string, the
+<b>sjobexitmod</b> wrapper has been created (see below).</p>
 
 <p>The user now has the means to annotate a job's exit code after it
 completes and provide a description of what failed.  This includes the
@@ -88,7 +90,7 @@ two new derived exit fields of the SLURM db's job record.
 
 <PRE>
 > sjobexitmod -l 123
-       JobID    Account   NNodes        NodeList      State ExitCode DerivedExitCode DerivedExitStr
+       JobID    Account   NNodes        NodeList      State ExitCode DerivedExitCode        Comment
 ------------ ---------- -------- --------------- ---------- -------- --------------- --------------
 123                  lc        1            tux0  COMPLETED      0:0             0:0
 </PRE>
@@ -101,7 +103,7 @@ If a change is desired, <b>sjobexitmod</b> can modify the derived fields:
  Modification of job 123 was successful.
 
 > sjobexitmod -l 123
-       JobID    Account   NNodes        NodeList      State ExitCode DerivedExitCode DerivedExitStr
+       JobID    Account   NNodes        NodeList      State ExitCode DerivedExitCode        Comment
 ------------ ---------- -------- --------------- ---------- -------- --------------- --------------
 123                  lc        1            tux0  COMPLETED      0:0            49:0  out of memory
 </PRE>
@@ -110,8 +112,8 @@ If a change is desired, <b>sjobexitmod</b> can modify the derived fields:
 exit fields:</p>
 
 <PRE>
-> sacct -X -j 123 -o JobID,NNodes,State,ExitCode,DerivedExitcode,DerivedExitStr
-       JobID   NNodes      State ExitCode DerivedExitCode DerivedExitStr
+> sacct -X -j 123 -o JobID,NNodes,State,ExitCode,DerivedExitcode,Comment
+       JobID   NNodes      State ExitCode DerivedExitCode        Comment
 ------------ -------- ---------- -------- --------------- --------------
 123                 1  COMPLETED      0:0            49:0  out of memory
 </PRE>
diff --git a/doc/html/job_launch.shtml b/doc/html/job_launch.shtml
new file mode 100644
index 000000000..df7edbf81
--- /dev/null
+++ b/doc/html/job_launch.shtml
@@ -0,0 +1,140 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">Job Launch Design Guide</a></h1>
+
+<h2>Overview</h2>
+
+<p>This guide describes at a high level the processes which occur in order
+to initiate a job including the daemons and plugins involved in the process.
+It describes the process of job allocation, step allocation, task launch and
+job termination. The functionality of tens of thousands of lines of code
+has been distilled here to a couple of pages of text, so much detail is
+missing.</p>
+
+<h2>Job Allocation</h2>
+
+<p>The first step of the process is to create a job allocation, which is
+a claim on compute resources. A job allocation can be created using the
+<b>salloc</b>, <b>sbatch</b> or <b>srun</b> command. The <b>salloc</b> and
+<b>sbatch</b> commands create resource allocations while the <b>srun</b>
+command will create a resource allocation (if not already running within one)
+plus launch tasks. Each of these commands will fill in a data structure
+identifying the specifications of the job allocation requirement (e.g. node
+count, task count, etc.) based upon command line options and environment
+variables and send the RPC to the <b>slurmctld</b> daemon. If the new job
+request is the highest priority, the <b>slurmctld</b> daemon will attempt
+to select resources for it immediately, otherwise it will validate that the job
+request can be satisfied at some time and queue the request. In either case
+the request will receive a response almost immediately containing one of the
+following:</p>
+<ul>
+<li>A job ID and the resource allocation specification (nodes, cpus, etc.)</li>
+<li>A job ID and notification of the job being in a queued state OR</li>
+<li>An error code</li>
+</ul> 
+
+<p>The process of selecting resources for a job request involves multiple steps,
+some of which involve plugins. The process is as follows:</p>
+<ol>
+<li>Call <b>job_submit</b> plugins to modify the request as appropriate</li>
+<li>Validate the the options are valid for this user (e.g. valid partition
+name, valid limits, etc.)</li>
+<li>Determine if this job is the highest priority runnable job, if so then
+really try to allocate resources for it now, otherwise only validate that it
+could run if no other jobs existed</li>
+<li>Determine which nodes could be used for the job. This if the feature
+specification uses an exclusive OR option, then multiple iterations of the
+selection process below will be required with disjoint sets of nodes</li>
+<li>Call the <b>select</b> plugin to select the best resources for the request</li>
+<li>The <b>select</b> plugin will consider network topology and the topology within
+a node (e.g. sockets, cores, and threads) to select the best resources for the
+job</li>
+<li>If the job can not be initiated using available resources and preemption
+support is configured, the <b>select</b> plugin will also determine if the job
+can be initiated after preempting lower priority jobs. If so then initiate
+preemption as needed to start the job</li>
+</ol>
+
+<h2>Step Allocation</h2>
+
+<p>The <b>srun</b> command is always used for job step creation. It fills in
+a job step request RPC using information from the command line and environment
+variables then sends that request to the <b>slurmctld</b> daemon. It is
+important to note that many of the <b>srun</b> options are intended for job
+allocation and are not supported by the job step request RPC (for example the
+socket, core and thread information is not supported). If a job step uses
+all of the resources allocated to the job then the lack of support for some
+options is not important. If one wants to execute multiple job steps using
+various subsets of resources allocated to the job, this shortcoming could
+prove problematic. It is also worth noting that the logic used to select
+resources for a job step is relatively simple and entirely contained within
+the <b>slurmctld</b> daemon code (the <b>select</b> plugin is not used for job
+steps). If the request can not be immediately satisfied due to a request for
+exclusive access to resources, the appropriate error message will be sent and
+the <b>srun</b> command will retry the request on a periodic basis.
+(NOTE: It would be desirable to queue the job step requests to support job step 
+dependencies and better performance in the initiation of job steps, but that 
+is not currently supported.)
+If the request can be satisfied, the response contains a digitally signed
+credential (by the <b>crypto</b> plugin) identifying the resources to be used.</p>
+
+<h2>Task Launch</h2>
+
+<p>The <b>srun</b> command builds a task launch request data structure
+including the credential, executable name, file names, etc. and sends it to
+the <b>slurmd</b> daemon on node zero of the job step allocation. The
+<b>slurmd</b> daemon validates the signature and forwards the request to the
+<b>slurmd</b> daemons on other nodes to launch tasks for that job step. The
+degree of fanout in this message forwarding is configurable using the
+<b>TreeWidth</b> parameter. Each <b>slurmd</b> daemon tests that the job has
+not been cancelled since the credential was issued (due to a possible race 
+condition) and spawns a <b>slurmstepd</b> program to manage the job step.
+Note that the <b>slurmctld</b> daemon is not directly involved in task
+launch in order to minimize the overhead on this critical resource.</p>
+
+<p>Each <b>slurmstepd</b> program executes a single job step.
+Besides the functions listed below, the <b>slurmstepd</b> program also
+executes several SPANK plugin functions at various times.</p>
+<ol>
+<li>Performs MPI setup (using the appropriate plugin)</li>
+<li>Calls the <b>switch</b> plugin to perform any needed network configuration</li>
+<li>Creates a container for the job step using a <b>proctrack</b> plugin</li>
+<li>Change user ID to that of the user</li>
+<li>Configures I/O for the tasks (either using files or a socket connection back
+to the <b>srun</b> command</li>
+<li>Sets up environment variables for the tasks including many task-specific
+environment variables</li>
+<li>Fork/exec the tasks</li>
+</ol>
+
+<h2>Job Step Termination</h2>
+
+<p>There are several ways in which a job step or job can terminate, each with
+slight variation in the logic executed. The simplest case is if the tasks run
+to completion. The <b>srun</b> will note the termination of output from the
+tasks and notify the <b>slurmctld</b> daemon that the job step has completed.
+<b>slurmctld</b> will simply log the job step termination. The job step can
+also be explicitly cancelled by a user, reach the end of its time limit, etc.
+and those follow a sequence of steps very similar to that for job termination,
+which is described below.</p>
+
+<h2>Job Termination</h2>
+
+<p>Job termination can either be user initiated (e.g. <b>scancel</b> command) or system
+initiated (e.g. time limit reached). The termination ultimately requires
+the <b>slurmctld</b> daemon to notify the <b>slurmd</b> daemons on allocated
+nodes that the job is to be ended. The <b>slurmd</b> daemon does the following:
+<ol>
+<li>Send a SIGCONT and SIGTERM signal to any user tasks</li> 
+<li>Wait <b>KilLWait</b> seconds if there are any user tasks</li>
+<li>Send a SIGKILL signal to any user tasks</li>
+<li>Wait for all tasks to complete</li>
+<li>Execute any <b>Epilog</b> program</li>
+<li>Send an epilog_complete RPC to the <b>slurmctld</b> daemon</li>
+</ol>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 18 May 2011</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/job_submit_plugins.shtml b/doc/html/job_submit_plugins.shtml
index 65e07877f..80f99812a 100644
--- a/doc/html/job_submit_plugins.shtml
+++ b/doc/html/job_submit_plugins.shtml
@@ -23,12 +23,17 @@ The minor type can be any suitable name
 for the type of accounting package.
 We include samples in the SLURM distribution for
 <ul>
-<li><b>defaults</b>&#151;Set default values for job submission or modify requests.
-<li><b>logging</b>&#151;Log select job submission and modification parameters.
+<li><b>defaults</b>&#151;Set default values for job submission or modify
+requests.</li>
+<li><b>logging</b>&#151;Log select job submission and modification
+parameters.</li>
 <li><b>lua</b>&#151;Interface to <a href="http://www.lua.org">Lua</a> scripts
-implementing these functions (actually a slight variation of them).
+implementing these functions (actually a slight variation of them). Sample Lua
+scripts can be found with the SLURM distribution in the directory
+<i>contribs/lua</i>. The default installation location of the Lua scripts is
+the same location as the SLURM configuration file, <i>slurm.conf</i>.</li>
 <li><b>partition</b>&#151;Sets a job's default partition based upon job
-submission parameters and available partitions.
+submission parameters and available partitions.</li>
 </ul>
 <p>SLURM can be configured to use multiple job_submit plugins if desired.
 
@@ -79,7 +84,9 @@ be modified.<br>
 <h2>Lua Functions</h2>
 <p>The Lua functions differ slightly from those implemented in C for
 better ease of use. Sample Lua scripts can be found with the SLURM distribution
-in the directory <i>contribs/lua</i>.</p>
+in the directory <i>contribs/lua</i>. The default installation location of
+the Lua scripts is the same location as the SLURM configuration file,
+<i>slurm.conf</i>.</p>
 
 <p class="commandline">
 int job_submit(struct job_descriptor *job_desc, List part_list)
@@ -128,6 +135,6 @@ appropriate error message printed for that errno.
 releases of SLURM may revise this API.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 3 December 2010</p>
+<p style="text-align:center;">Last modified 29 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml
index ef31f4900..e2f619b7e 100644
--- a/doc/html/jobacct_gatherplugins.shtml
+++ b/doc/html/jobacct_gatherplugins.shtml
@@ -163,7 +163,7 @@ polling thread.
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<p class="commandline">void jobacct_gather_p_suspend_poll()
+<p class="commandline">void jobacct_gather_p_suspend_poll(void)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_suspend_poll() is called when the process is suspended.
 This causes the polling thread to halt until the process is resumed.
@@ -172,7 +172,7 @@ This causes the polling thread to halt until the process is resumed.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">none</span>
 
-<p class="commandline">void jobacct_gather_p_resume_poll()
+<p class="commandline">void jobacct_gather_p_resume_poll(void)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_resume_poll() is called when the process is resumed.
 This causes the polling thread to resume operation.
@@ -181,14 +181,14 @@ This causes the polling thread to resume operation.
 <p style="margin-left:.2in"><b>Returns</b>:<br>
 <span class="commandline">none</span>
 
-<p class="commandline">int jobacct_gather_p_set_proctrack_container_id(uint32_t id)
+<p class="commandline">int jobacct_gather_p_set_proctrack_container_id(uint64_t cont_id)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 jobacct_gather_p_set_proctrack_container_id() is called after the
 proctrack container id is known at the start of the slurmstepd,
 if using a proctrack plugin to track processes this will set the head
 of the process tree in the plugin.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
-<span class="commandline">id</span> (input) procktrack container id.
+<span class="commandline">cont_id</span> (input) procktrack container id.
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -257,6 +257,6 @@ ability to implement a particular API version using the mechanism outlined
 for SLURM plugins.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 11 September 2007</p>
+<p style="text-align:center;">Last modified 15 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/man_index.shtml b/doc/html/man_index.shtml
new file mode 100644
index 000000000..7b98c1f2f
--- /dev/null
+++ b/doc/html/man_index.shtml
@@ -0,0 +1,43 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Man Pages</h1>
+
+<table border="1">
+<tr><td><a href="sacct.html">sacct</a></td><td>displays accounting data for all jobs and job steps in the SLURM job accounting log or SLURM database</td></tr>
+<tr><td><a href="sacctmgr.html">sacctmgr</a></td><td>Used to view and modify Slurm account information.</td></tr>
+<tr><td><a href="salloc.html">salloc</a></td><td>Obtain a SLURM job allocation (a set of nodes), execute a command, and then release the allocation when the command is finished.</td></tr>
+<tr><td><a href="sattach.html">sattach</a></td><td>Attach to a SLURM job step.</td></tr>
+<tr><td><a href="sbatch.html">sbatch</a></td><td>Submit a batch script to SLURM.</td></tr>
+<tr><td><a href="sbcast.html">sbcast</a></td><td>transmit a file to the nodes allocated to a SLURM job.</td></tr>
+<tr><td><a href="scancel.html">scancel</a></td><td>Used to signal jobs or job steps that are under the control of Slurm.</td></tr>
+<tr><td><a href="scontrol.html">scontrol</a></td><td>Used view and modify Slurm configuration and state.</td></tr>
+<tr><td><a href="sinfo.html">sinfo</a></td><td>view information about SLURM nodes and partitions.</td></tr>
+<tr><td><a href="slurm.html">slurm</a></td><td>SLURM system overview.</td></tr>
+<tr><td><a href="smap.html">smap</a></td><td>graphically view information about SLURM jobs, partitions, and set configurations parameters.</td></tr>
+<tr><td><a href="sprio.html">sprio</a></td><td>view the factors that comprise a job's scheduling priority</td></tr>
+<tr><td><a href="squeue.html">squeue</a></td><td>view information about jobs located in the SLURM scheduling queue.</td></tr>
+<tr><td><a href="sreport.html">sreport</a></td><td>Generate reports from the slurm accounting data.</td></tr>
+<tr><td><a href="srun_cr.html">srun_cr</a></td><td>run parallel jobs with checkpoint/restart support</td></tr>
+<tr><td><a href="srun.html">srun</a></td><td>Run parallel jobs</td></tr>
+<tr><td><a href="sshare.html">sshare</a></td><td>Tool for listing the shares of associations to a cluster.</td></tr>
+<tr><td><a href="sstat.html">sstat</a></td><td>Display various status information of a running job/step.</td></tr>
+<tr><td><a href="strigger.html">strigger</a></td><td>Used set, get or clear Slurm trigger information.</td></tr>
+<tr><td><a href="sview.html">sview</a></td><td>graphical user interface to view and modify SLURM state.</td></tr>
+<tr><td><a href="bluegene.conf.html">bluegene.conf</a></td><td>Slurm configuration file for BlueGene systems</td></tr>
+<tr><td><a href="cgroup.conf.html">cgroup.conf</a></td><td>Slurm configuration file for the cgroup support</td></tr>
+<tr><td><a href="gres.conf.html">gres.conf</a></td><td>Slurm configuration file for generic resource management.</td></tr>
+<tr><td><a href="slurm.conf.html">slurm.conf</a></td><td>Slurm configuration file</td></tr>
+<tr><td><a href="slurmdbd.conf.html">slurmdbd.conf</a></td><td>Slurm Database Daemon (SlurmDBD) configuration file</td></tr>
+<tr><td><a href="topology.conf.html">topology.conf</a></td><td>Slurm configuration file for defining the network topology</td></tr>
+<tr><td><a href="wiki.conf.html">wiki.conf</a></td><td>Slurm configuration file for wiki and wiki2 scheduler plugins</td></tr>
+<tr><td><a href="slurmctld.html">slurmctld</a></td><td>The central management daemon of Slurm.</td></tr>
+<tr><td><a href="slurmd.html">slurmd</a></td><td>The compute node daemon for SLURM.</td></tr>
+<tr><td><a href="slurmdbd.html">slurmdbd</a></td><td>Slurm Database Daemon.</td></tr>
+<tr><td><a href="slurmstepd.html">slurmstepd</a></td><td>The job step manager for SLURM.</td></tr>
+<tr><td><a href="spank.html">SPANK</a></td><td>SLURM Plug-in Architecture for Node and job (K)control</td></tr>
+</table>
+
+
+<p style="text-align:center;">Last modified 15 October 2010</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/mc_support.shtml b/doc/html/mc_support.shtml
index 3d6f0cd4a..07768fa64 100644
--- a/doc/html/mc_support.shtml
+++ b/doc/html/mc_support.shtml
@@ -2,8 +2,6 @@
 
 <h1>Support for Multi-core/Multi-thread Architectures</h1>
 
-<b>Note:</b> This document describes features added to SLURM version 1.2.
-
 <h2>Contents</h2>
 <UL>
 <LI> <a href=#defs>Definitions</a>
@@ -13,7 +11,6 @@
 <LI> <a href=#config>Configuration settings in slurm.conf</a>
 </UL>
 
-<!-------------------------------------------------------------------------->
 <a name=defs>
 <h2>Definitions</h2></a>
 
@@ -43,7 +40,6 @@ on the associated logical processor.
 allowing a process to run on more than one logical processor.
 </dl>
 
-<!-------------------------------------------------------------------------->
 <a name=flags>
 <h2>Overview of new srun flags</h2></a>
 
@@ -845,9 +841,9 @@ JobId=20 UserId=(30352) GroupId=users(1051)
    JobState=RUNNING StartTime=09/25-17:17:30 EndTime=NONE
    NodeList=hydra[12-14] NodeListIndices=0,2,-1
    <u>AllocCPUs=1,2,1</u>
-   ReqProcs=4 ReqNodes=2 <u>ReqS:C:T=2:1:*</u>
+   NumCPUs=4 ReqNodes=2 <u>ReqS:C:T=2:1:*</u>
    Shared=0 Contiguous=0 CPUs/task=0
-   MinProcs=0 MinMemory=0 MinTmpDisk=0 Features=(null)
+   MinCPUs=0 MinMemory=0 MinTmpDisk=0 Features=(null)
    Dependency=0 Account=(null) Reason=None Network=(null)
    ReqNodeList=(null) ReqNodeListIndices=-1
    ExcNodeList=(null) ExcNodeListIndices=-1
@@ -903,7 +899,7 @@ NodeName parameter can be used in combination with FastSchedule:
 
 <PRE>
 FastSchedule=1
-NodeName=dualcore[01-16] Procs=4 CoresPerSocket=2 ThreadsPerCore=1
+NodeName=dualcore[01-16] CPUs=4 CoresPerSocket=2 ThreadsPerCore=1
 </PRE>
 
 <p>Below is a more complete description of the configuration possible
@@ -941,13 +937,13 @@ using NodeName:
 #
 #     "RealMemory" : Amount of real memory (in Megabytes)
 #
-#     "Procs"      : Number of logical processors on the node.
-#                    If Procs is omitted, it will be inferred from:
+#     "CPUs"       : Number of logical processors on the node.
+#                    If CPUs is omitted, it will be inferred from:
 #                           Sockets, CoresPerSocket, and ThreadsPerCore.
 #
 #     "Sockets"    : Number of physical processor sockets/chips on the node.
 #                    If Sockets is omitted, it will be inferred from:
-#                           Procs, CoresPerSocket, and ThreadsPerCore.
+#                           CPUs, CoresPerSocket, and ThreadsPerCore.
 #
 #     "CoresPerSocket"
 #                  : Number of cores in a single physical processor socket
@@ -972,12 +968,12 @@ using NodeName:
 #
 # Example Node configuration:
 #
-# NodeName=DEFAULT Procs=2 TmpDisk=64000 State=UNKNOWN
+# NodeName=DEFAULT CPUs=2 TmpDisk=64000 State=UNKNOWN
 # NodeName=host[0-25] NodeAddr=ehost[0-25] Weight=16
 # NodeName=host26     NodeAddr=ehost26     Weight=32 Feature=graphics_card
-# NodeName=dualcore01  Procs=4 CoresPerSocket=2 ThreadsPerCore=1
-# NodeName=dualcore02  Procs=4 Sockets=2 CoresPerSocket=2 ThreadsPerCore=1
-# NodeName=multicore03 Procs=64 Sockets=8 CoresPerSocket=4 ThreadsPerCore=2
+# NodeName=dualcore01  CPUs=4 CoresPerSocket=2 ThreadsPerCore=1
+# NodeName=dualcore02  CPUs=4o Sockets=2 CoresPerSocket=2 ThreadsPerCore=1
+# NodeName=multicore03 CPUs=64 Sockets=8 CoresPerSocket=4 ThreadsPerCore=2
 </PRE>
 
 <!-------------------------------------------------------------------------->
diff --git a/doc/html/meetings.shtml b/doc/html/meetings.shtml
new file mode 100644
index 000000000..7ee9c54b1
--- /dev/null
+++ b/doc/html/meetings.shtml
@@ -0,0 +1,20 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Meetings</h1>
+
+<p><b>SLURM User Group Meeting 2011</b><br>
+September 22-23, 2011<br>
+Phoenix, Arizona, USA</p>
+
+<p><a href="slurm_ug_cfp.html">Call for submissions</a><br>
+<a href="slurm_ug_registration.html">Registration</a><br>
+<a href="slurm_ug_agenda.html">Agenda</a><br>
+</p>
+
+<!--<p><b>SLURM User Group Meeting 2010</b><br>
+October 5, 2010<br>
+Paris, France</p>-->
+
+<p style="text-align:center;">Last modified 23 May 2011</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/mpi_guide.shtml b/doc/html/mpi_guide.shtml
index 40c74fdd4..e546f4a7b 100644
--- a/doc/html/mpi_guide.shtml
+++ b/doc/html/mpi_guide.shtml
@@ -4,7 +4,7 @@
 
 <p>MPI use depends upon the type of MPI being used.
 There are three fundamentally different modes of operation used
-by these various MPI implementation.
+by these various MPI implementations.
 <ol>
 <li>SLURM directly launches the tasks and performs initialization
 of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX,
diff --git a/doc/html/news.shtml b/doc/html/news.shtml
index b64336261..5ebe074a1 100644
--- a/doc/html/news.shtml
+++ b/doc/html/news.shtml
@@ -6,7 +6,7 @@
 <ul>
 <li><a href="#21">SLURM Version 2.1, January 2010</a></li>
 <li><a href="#22">SLURM Version 2.2, December 2010</a></li>
-<li><a href="#23">SLURM Version 2.3, available May 2011</a></li>
+<li><a href="#23">SLURM Version 2.3, planned for Summer 2011</a></li>
 <li><a href="#24">SLURM Version 2.4 and beyond</a></li>
 <li><a href="#security">Security Patches</a></li>
 </ul>
@@ -54,7 +54,7 @@ several times that rate.</li>
 </ul>
 
 <h2><a name="23">Major Updates in SLURM Version 2.3</a></h2>
-<p>SLURM Version 2.3 release is planned in May 2011.
+<p>SLURM Version 2.3 release is planned for Summer 2011.
 Major enhancements currently planned include:
 <ul>
 <li>Support for Cray XT and XE computers (integration with ALPS/BASIL).</li>
@@ -63,6 +63,10 @@ Major enhancements currently planned include:
 BlueGene architectures, improves performance and fault tolerance).</li>
 <li>Support for Linux cgroup job containers including integration with
 generic resources.</li>
+<li> Resource reservations with a node count specification will select
+those nodes optimized for the system topology.</li>
+<li>Support for growing job allocations (support for shrinking jobs was added
+in version 2.2).</li>
 </ul>
 
 <h2><a name="24">Major Updates in SLURM Version 2.4 and beyond</a></h2>
@@ -71,10 +75,15 @@ have not been finalized. Anyone desiring to perform SLURM development should
 notify <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>
 to coordinate activities. Future development plans includes:
 <ul>
+<li>Faster and more powerful job step management support  (e.g. step
+dependencies).</li>
+<li>Improved user support for fault-tolerance (e.g. "hot spare" resources).</li>
 <li>Integration with FlexLM license management.</li>
-<li>Numerous enhancements to advanced resource reservations.</li>
+<li>Numerous enhancements to advanced resource reservations (e.g. start or
+end the reservation early depending upon the workload).</li>
 <li>Add Kerberos credential support including credential forwarding
 and refresh.</li>
+<li>Improved support for provisioning and virtualization.</li> 
 <li>Provide a web-based SLURM administration tool.</li>
 </ul>
 
@@ -102,6 +111,6 @@ trojan library, then that library will be used by the SLURM daemon with
 unpredictable results. This was fixed in SLURM version 2.1.14.</li>
 </ul>
 
-<p style="text-align:center;">Last modified 3 January 2011</p>
+<p style="text-align:center;">Last modified 17 March 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml
index 373b5e69a..3809134bb 100644
--- a/doc/html/overview.shtml
+++ b/doc/html/overview.shtml
@@ -22,13 +22,6 @@ parallel jobs), backfill scheduling,
 and sophisticated <a href="priority_multifactor.html"> multifactor job
 prioritization</a> algorithms.
 
-<p>SLURM has been developed through the collaborative efforts of
-<a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory (LLNL)</a>,
-<a href="http://www.hp.com/">Hewlett-Packard</a>,
-<a href="http://www.schedmd.com/">SchedMD</a>,
-<a href="http://www.bull.com/">Bull</a>,
-Linux NetworX and many other contributors.</p>
-
 <h2>Architecture</h2>
 <p>SLURM has a centralized manager, <b>slurmctld</b>, to monitor resources and
 work. There may also be a backup manager to assume those responsibilities in the
@@ -69,7 +62,7 @@ MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li>
 <a href="http://www.theether.org/authd/">authd</a>,
 <a href="http://munge.googlecode.com/">munge</a>, or none (default).</li>
 
-<li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX, OpenMPI, XLCH, or none.</li>
+<li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX, BLCR, OpenMPI, or none.</li>
 
 <li><a href="crypto_plugins.html">Cryptography (Digital Signature Generation)</a>:
 <a href="http://munge.googlecode.com/">munge</a> (default) or
@@ -194,7 +187,7 @@ TmpFS=/tmp
 #
 # Node Configurations
 #
-NodeName=DEFAULT Procs=4 TmpDisk=16384 State=IDLE
+NodeName=DEFAULT CPUs=4 TmpDisk=16384 State=IDLE
 NodeName=lx[0001-0002] State=DRAINED
 NodeName=lx[0003-8000] RealMemory=2048 Weight=2
 NodeName=lx[8001-9999] RealMemory=4096 Weight=6 Feature=video
@@ -209,6 +202,6 @@ PartitionName=DEFAULT MaxTime=UNLIMITED MaxNodes=4096
 PartitionName=batch Nodes=lx[0041-9999]
 </pre>
 
-<p style="text-align:center;">Last modified 31 March 2009</p>
+<p style="text-align:center;">Last modified 5 May 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/platforms.shtml b/doc/html/platforms.shtml
index 79cd6eaaa..55a888e04 100644
--- a/doc/html/platforms.shtml
+++ b/doc/html/platforms.shtml
@@ -13,11 +13,10 @@ added in version 2.1.</li>
 </ul>
 <h2>Interconnects</h2>
 <ul>
-<li><b>BlueGene</b>&#151;SLURM support for IBM's BlueGene/L and BlueGene/P
-systems has been thoroughly tested.</li>
-<li><b>Cray XT</b>&#151;Much of the infrastructure to support a Cray XT
-system is current in SLURM. The interface to ALPS/BASIL remains to be done.
-Please contact us if you would be interested in this work.</li>
+<li><b>BlueGene</b>&#151;SLURM support for IBM's BlueGene/L, BlueGene/P and
+BlueGene/Q systems has been thoroughly tested.</li>
+<li><b>Cray XT and XE</b>&#151;Operates as a scheduler on top of Cray's
+ALPS/BASIL software. Supported added in SLURM version 2.3.</li>
 <li><b>Ethernet</b>&#151;Ethernet requires no special support from SLURM and has
 been thoroughly tested.</li>
 <li><b>IBM Federation</b>&#151;SLURM support for IBM's Federation Switch
@@ -31,6 +30,6 @@ for the three-dimensional torus interconnect.</li>
 <li><b>Other</b>&#151;SLURM ports to other systems will be gratefully accepted.</li>
 </ul>
 
-<p style="text-align:center;">Last modified 23 July 2009</p>
+<p style="text-align:center;">Last modified 8 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml
index 48e9a9e16..cab835d4b 100644
--- a/doc/html/power_save.shtml
+++ b/doc/html/power_save.shtml
@@ -130,7 +130,7 @@ scripts.</p>
 # Example SuspendProgram
 echo "`date` Suspend invoked $0 $*" >>/var/log/power_save.log
 hosts=`scontrol show hostnames $1`
-for host in "$hosts"
+for host in $hosts
 do
    sudo node_shutdown $host
 done
@@ -139,7 +139,7 @@ done
 # Example ResumeProgram
 echo "`date` Resume invoked $0 $*" >>/var/log/power_save.log
 hosts=`scontrol show hostnames $1`
-for host in "$hosts"
+for host in $hosts
 do
    sudo node_startup $host
 done
diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml
index 3b9abb172..f404c4ce8 100644
--- a/doc/html/preempt.shtml
+++ b/doc/html/preempt.shtml
@@ -76,6 +76,16 @@ at job submission time to specify their memory requirements.
 parameters are not critical.
 </LI>
 <LI>
+<B>GraceTime</B>: Specifies a time period for a job to execute after
+it is selected to be preempted. This option can be specified by partition or
+QOS using the <I>slurm.conf</I> file or database respectively. This option is
+only honored if <I>PreemptMode=CANCEL</I>. The <I>GraceTime</I> is specified in
+seconds and the default value is zero, which results in no preemption delay.
+Once a job has been selected for preemption, it's end time is set to the
+current time plus <I>GraceTime</I> and the mechanism used to terminate jobs
+upon reaching their time limit is used to cancel the job.
+</LI>
+<LI>
 <B>JobAcctGatherType and JobAcctGatherFrequency</B>: The "maximum data segment
 size" and "maximum virtual memory size" system limits will be configured for
 each job to ensure that the job does not exceed its requested amount of memory.
@@ -92,7 +102,7 @@ parameters are not critical.
 The PreemptMode can be specified on a system-wide basis or on a per-partition
 basis when <I>PreemptType=preempt/partition_prio</I>. Note that when specified 
 on a partition, a compatible mode must also be specified system-wide;
-specifically if a PreemptMode is set to SUSPEND for any partition or QOS, then
+specifically if a PreemptMode is set to SUSPEND for any partition, then
 the system-wide PreemptMode must include the GANG parameter so the module
 responsible for resuming jobs executes.
 Configure to <I>CANCEL</I>, <I>CHECKPOINT</I>,
@@ -109,7 +119,9 @@ Checkpointed jobs are not automatically restarted.
 jobs. Requeued jobs are permitted to be restarted on different resources.</LI>
 <LI>A value of <I>SUSPEND</I> will suspend and automatically resume the low
 priority jobs. The <I>SUSPEND</I> option must be used with the <I>GANG</I>
-option (e.g. "PreemptMode=SUSPEND,GANG").</LI>
+option (e.g. "PreemptMode=SUSPEND,GANG") and with
+<I>BPreemptType=preempt/partition_prio</I> (the logic to suspend and resume
+jobs currently only has the data structures to support partitions).</LI>
 <LI>A value of <I>GANG</I> may be used with any of the above values and will
 execute a module responsible for resuming jobs previously suspended for either
 gang scheduling or job preemption with suspension.</LI>
@@ -126,8 +138,10 @@ can preempt jobs from lower priority partitions.</LI>
 <LI><I>preempt/qos</I> indicates that jobs from one Quality Of Service (QOS)
 can preempt jobs from a lower QOS. These jobs can be in the same partition
 or different partitions. PreemptMode must be set to CANCEL, CHECKPOINT,
-SUSPEND or REQUEUE. This option requires the use of a database identifying
-available QOS and their preemption rules. </LI>
+or REQUEUE. This option requires the use of a database identifying
+available QOS and their preemption rules. This option is not compatible with
+PreemptMode=OFF or PreemptMode=SUSPEND (i.e. preempted jobs must be removed
+from the resources).</LI>
 </UL>
 </LI>
 <LI>
@@ -377,6 +391,6 @@ order to support ideal placements such as this, which can quickly complicate
 the design. Any and all help is welcome here!
 </P>
 
-<p style="text-align:center;">Last modified 6 December 2010</p>
+<p style="text-align:center;">Last modified 16 May 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/priority_multifactor.shtml b/doc/html/priority_multifactor.shtml
index 432b24936..24a83dc39 100644
--- a/doc/html/priority_multifactor.shtml
+++ b/doc/html/priority_multifactor.shtml
@@ -465,9 +465,17 @@ factor as it is currently configured.</P>
   use the PriorityDecayHalfLife option to avoid not having anything
   running on your cluster, but if your schema is set up to only allow
   certain amounts of time on your system this is the way to do it.
-  The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
-  days-hr). The default value is not set (turned off).
-
+  Applicable only if PriorityType=priority/multifactor. The unit is a
+  time string (i.e. NONE, NOW, DAILY, WEEKLY).  The default is NONE.
+<ul>
+<li>NONE: Never clear historic usage. The default value.</li>
+<li>NOW: Clear the historic usage now. Executed at startup and reconfiguration time.</li>
+<li>DAILY: Cleared every day at midnight.</li>
+<li>WEEKLY: Cleared every week on Sunday at time 00:00.</li>
+<li>MONTHLY: Cleared on the first day of each month at time 00:00.</li>
+<li>QUARTERLY: Cleared on the first day of each quarter at time 00:00.</li>
+<li>YEARLY: Cleared on the first day of each year at time 00:00.</li>
+</ul>
 <DT> PriorityFavorSmall
 <DD> A boolean that sets the polarity of the job size factor.  The
   default setting is NO which results in larger node sizes having a
@@ -535,8 +543,8 @@ PriorityType=priority/multifactor
 # apply no decay
 PriorityDecayHalfLife=0
 
-# reset usage after 28 days
-PriorityUsageResetPeriod=28-0
+# reset usage after 1 month
+PriorityUsageResetPeriod=MONTHLY
 
 # The larger the job, the greater its job size priority.
 PriorityFavorSmall=NO
@@ -559,4 +567,3 @@ PriorityWeightQOS=0 # don't use the qos factor
 <p style="text-align:center;">Last modified 2 November 2010</p>
 
 <!--#include virtual="footer.txt"-->
-
diff --git a/doc/html/proctrack_plugins.shtml b/doc/html/proctrack_plugins.shtml
index a3c4a17e6..b3b09c558 100644
--- a/doc/html/proctrack_plugins.shtml
+++ b/doc/html/proctrack_plugins.shtml
@@ -22,10 +22,16 @@ of proctrack. We recommend, for example:</p>
 <li><b>aix</b>&#151;Perform process tracking on an AIX platform.
 NOTE: This requires a kernel extension that records
 ever process creation and termination.</li>
+<li><b>cgroup</b>&#151;Use Linux cgroups for process tracking.</li>
 <li><b>linuxproc</b>&#151;Perform process tracking based upon a scan
 of the Linux process table and use the parent process ID to determine
 what processes are members of a SLURM job. NOTE: This mechanism is
 not entirely reliable for process tracking.</li>
+<li><b>lua</b>&#151;Use site-defined <a href="http://www.lua.org">Lua</a>
+script for process tracking. Sample Lua scripts can be found with the
+SLURM distribution in the directory <i>contribs/lua</i>. The default
+installation location of the Lua scripts is the same location as the SLURM
+configuration file, <i>slurm.conf</i>.</li>
 <li><b>pgid</b>&#151;Use process group ID to determine
 what processes are members of a SLURM job. NOTE: This mechanism is
 not entirely reliable for process tracking.</li>
@@ -50,7 +56,7 @@ for an example implementation of a SLURM proctrack plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
-<p> The implementation must support a container id of type uint32_t.
+<p> The implementation must support a container id of type uint64_t.
 This container ID is maintained by the plugin directly in the slurmd
 job structure using the field named <i>cont_id</i>.</p>
 
@@ -70,10 +76,10 @@ Successful API calls are not required to reset errno to a known value.</p>
 <p>The following functions must appear. Functions which are not implemented should
 be stubbed.</p>
 
-<p class="commandline">int slurm_container_create (slurmd_job_t *job);</p>
+<p class="commandline">int slurm_container_plugin_create (slurmd_job_t *job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Create a container.
 The container should be valid
-<span class="commandline">slurm_container_destroy()</span> is called.
+<span class="commandline">slurm_container_plugin_destroy()</span> is called.
 This function must put the container ID directory in the job structure's
 variable <i>cont_id</i>.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
@@ -83,7 +89,7 @@ Pointer to a slurmd job structure.</p>
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int slurm_container_add (slurmd_job_t *job, pid_t pid);</p>
+<p class="commandline">int slurm_container_plugin_add (slurmd_job_t *job, pid_t pid);</p>
 <p style="margin-left:.2in"><b>Description</b>: Add a specific process ID
 to a given job's container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -95,7 +101,7 @@ The ID of the process to add to this job's container.</p>
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int slurm_container_signal (uint32_t id, int signal);</p>
+<p class="commandline">int slurm_container_plugin_signal (uint64_t id, int signal);</p>
 <p style="margin-left:.2in"><b>Description</b>: Signal all processes in a given
 job's container.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -111,7 +117,7 @@ its errno to an appropriate value to indicate the reason for failure.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p class="commandline">int slurm_container_destroy (uint32_t id);</p>
+<p class="commandline">int slurm_container_plugin_destroy (uint64_t id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Destroy or  otherwise
 invalidate a job container.
 This does not imply the container is empty, just that it is no longer
@@ -123,7 +129,7 @@ Job container's ID.</p>
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">uint32_t slurm_container_find (pid_t pid);</p>
+<p class="commandline">uint64_t slurm_container_plugin_find (pid_t pid);</p>
 <p style="margin-left:.2in"><b>Description</b>:
 Given a process ID, return its job container ID.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
@@ -132,14 +138,27 @@ A process ID.</p>
 <p style="margin-left:.2in"><b>Returns</b>: The job container ID
 with this process or zero if none is found.</p>
 
+<p class="commandline">uint32_t slurm_container_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids);</p>
+<p style="margin-left:.2in"><b>Description</b>:
+Given a process container ID, fill in all the process IDs in the container.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> cont_id</span>&nbsp; &nbsp;&nbsp;(input)
+A container ID.</p>
+<span class="commandline"> pids</span>&nbsp; &nbsp;&nbsp;(output)
+Array of process IDs in the container.</p>
+<span class="commandline"> npids</span>&nbsp; &nbsp;&nbsp;(output)
+Count of process IDs in the container.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if
+  successful, SLURM_ERROR else.</p>
+
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM Process Tracking API.
+<p> This document describes version 91 of the SLURM Process Tracking API.
 Future releases of SLURM may revise this API. A process tracking plugin
 conveys its ability to implement a particular API version using the
 mechanism outlined for SLURM plugins.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 6 June 2006</p>
+<p style="text-align:center;">Last modified 29 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml
index a89a4293b..4ada15bdc 100644
--- a/doc/html/publications.shtml
+++ b/doc/html/publications.shtml
@@ -7,8 +7,84 @@
 <h2>Presentations</h2>
 <ul>
 
+<li><b>Presentations from SLURM User Group Meeting, September 2011</b></li>
+<ul>
+
+<li><a href="slurm_ug_2011/Basic_Configuration_Usage.pdf">Basic
+    Configuration and Usage</a>,
+Rod Schultz, Groupe Bull</li>
+
+<li><a href="slurm_ug_2011/Advanced_Usage_Tutorial.pdf">SLURM:
+    Advanced Usage</a>,
+Rod Schultz, Groupe Bull</li>
+
+<li><a href="slurm_ug_2011/cons_res.pdf">CPU Management Allocation
+    and Binding</a>,
+Martin Perry, Groupe Bull
+</li>
+
+<li><a href="slurm_ug_2011/SLURM_HA_Tutorial.pdf">Configuring SLURM for HA</a>,
+David Egolf - Bill Brophy, Groupe Bull
+</li>
+
+<li><a href="slurm_ug_2011/SLURM_UserGroup2011_cgroups.pdf">SLURM Resources isolation through cgroups</a>,
+Yiannis Georgiou, Groupe Bull
+<br>Matthieu Hautreux, CEA
+</li>
+
+<li><a href="slurm_ug_2011/SLURM.Cray.pdf">SLURM Operation on Cray XT and XE</a>,
+Moe Jette, SchedMD LLC
+</li>
+
+<!-- day 2 -->
+
+<li><a href="slurm_ug_2011/SLURM-Keynode-v.pdf">Challenges and Opportunities for Exscale
+Resource Management and How Today's Petascale Systems are Guiding the Way</a>,
+William Kramer, NCSA
+</li>
+
+<li><a href="slurm_ug_2011/cea_site_report-1.0.pdf">CEA Site report</a>,
+Matthieu Hautreux, CEA
+</li>
+
+<li><a href="slurm_ug_2011/LLNL_Site_Report_2011.pdf">LLNL Site Report</a>,
+Don Lipari, LLNL
+</li>
+
+<li><a href="slurm_ug_2011/SLURM.v23.status.pdf">SLURM Version 2.3 and
+    Beyond</a>,
+Moe Jette, SchedMD LLC
+</li>
+
+<li><a href="slurm_ug_2011/slurm_simulator_phoenix.pdf">SLURM Simulator</a>,
+Alejandro Lucero, BSC
+</li>
+
+<li><a href="slurm_ug_2011/SLURM_Grid_Ideas.pdf">Proposed Design for Enhanced
+Enterprise-wide Scheduling</a>,
+Don Lipari, LLNL
+</li>
+
+<li><a href="slurm_ug_2011/Bright_Computing_SLURM_integration.pdf">Bright
+    Cluster Manager & SLURM</a>,
+Robert Stober, Bright Computing
+</li>
+
+<li><a href="slurm_ug_2011/User.steps.design.pdf">Job Step Management in User Space</a>,
+Moe Jette, SchedMD LLC
+</li>
+
+<li><a href="slurm_ug_2011/SLURM.BGQ.pdf">SLURM Operation IBM BlueGene/Q</a>,
+Danny Auble, SchedMD LLC
+</li>
+</ul>
+
+<li><a href="http://mescal.imag.fr/membres/yiannis.georgiou/publications/thesis_Georgiou-2010-UJF.pdf">
+Contributions For Resource and Job Management in High Performance Computing</a>,
+Yiannis Georgiou, Universite de Grenoble, France (Thesis, November 2010)</li>
+
 <!-- Use LLNL-PRES-461787 -->
-<li><a href="slurm_sc10_bof.pdf">SLURM Version 2.2: Features and Release Plans</a>,
+<li><a href="pdfs/slurm_sc10_bof.pdf">SLURM Version 2.2: Features and Release Plans</a>,
 Morris Jette, Danny Auble and Donald Lipari, Lawrence Livermore National Laboratory
 (Supercomputing 2010, November 2010)</li>
 
@@ -53,38 +129,38 @@ Morris Jette and Danny Auble, Lawrence Livermore National Laboratory
 </ul>
 
 <!-- Use LLNL-PRES-402832 -->
-<li><a href="slurm_sc09_bof.pdf">SLURM Community Meeting</a>,
+<li><a href="pdfs/slurm_sc09_bof.pdf">SLURM Community Meeting</a>,
 Morris Jette, Danny Auble and Don Lipari, Lawrence Livermore National Laboratory
 (Supercomputing 2009, November 2009)</li>
 
 <!-- Use LLNL-PRES-408498 -->
-<li><a href="slurm.sc08.bof.pdf">High Scalability Resource Management with
+<li><a href="pdfs/slurm.sc08.bof.pdf">High Scalability Resource Management with
 SLURM</a>,
 Morris Jette, Lawrence Livermore National Laboratory
 (Supercomputing 2008, November 2008)</li>
 
 <!-- Use LLNL-PRES-408510 -->
-<li><a href="slurm.sc08.status.pdf">SLURM Status Report</a>,
+<li><a href="pdfs/slurm.sc08.status.pdf">SLURM Status Report</a>,
 Morris Jette and Danny Auble, Lawrence Livermore National Laboratory
 (Supercomputing 2008, November 2008)</li>
 
 <!-- Use LLNL-PRES-402832 -->
-<li><a href="slurm_v1.3.pdf">SLURM Version 1.3</a>,
+<li><a href="pdfs/slurm_v1.3.pdf">SLURM Version 1.3</a>,
 Morris Jette and Danny Auble, Lawrence Livermore National Laboratory
 (May 2008)</li>
 
 <!-- Use LLNL-PRES-403148 -->
-<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a>,
+<li><a href="pdfs/slurm_moab.pdf">Managing Clusters with Moab and SLURM</a>,
 Morris Jette and Donald Lipari, Lawrence Livermore National Laboratory
 (May 2008)</li>
 
 <!-- Use UCRL-PRES-230170 -->
-<li><a href="slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2</a>,
+<li><a href="pdfs/slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2</a>,
 Morris Jette, Danny Auble and Chris Morrone, Lawrence Livermore National Laboratory
 (April 2007)</li>
 
 <!-- Use UCRL-PRES-219562 -->
-<li><a href="lci.7.tutorial.pdf">Resource Management Using SLURM</a>,
+<li><a href="pdfs/lci.7.tutorial.pdf">Resource Management Using SLURM</a>,
 Morris Jette, Lawrence Livermore National Laboratory
 (Tutorial, The 7th International Conference on Linux Clusters, May 2006)</li>
 </ul>
@@ -130,6 +206,6 @@ RCE 10: SLURM (podcast)</a>:
 Brock Palen and Jeff Squyres speak with Morris Jette and
 Danny Auble of LLNL about SLURM.</p>
 
-<p style="text-align:center;">Last modified 27 December 2010</p>
+<p style="text-align:center;">Last modified 12 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/qos.shtml b/doc/html/qos.shtml
index 7d942aa80..eeaa8a0c5 100644
--- a/doc/html/qos.shtml
+++ b/doc/html/qos.shtml
@@ -78,10 +78,12 @@ QOS</P>
 <LI><b>MaxCPUMinsPerJob</b> Maximum number of CPU*minutes any job with this QOS can run.
 <LI><b>GrpCpus</b> Maximum number of CPU's all jobs with this QOS can be allocated.
 <LI><b>MaxCpusPerJob</b> Maximum number of CPU's any job with this QOS can be allocated.
+<LI><b>MaxCpusPerUser</b> Maximum number of CPU's any user with this QOS can be allocated.
 <LI><b>GrpJobs</b> Maximum number of jobs that can run with this QOS.
 <LI><b>MaxJobsPerUser</b> Maximum number of jobs a user can run with this QOS.
 <LI><b>GrpNodes</b> Maximum number of nodes that can be allocated to all jobs with this QOS.
 <LI><b>MaxNodesPerJob</b> Maximum number of nodes that can be allocated to any job with this QOS.
+<LI><b>MaxNodesPerUser</b> Maximum number of nodes that can be allocated to any user with this QOS.
 <LI><b>GrpSubmitJobs</b> Maximum number of jobs with this QOS that can be in the system (no matter what state).
 <LI><b>MaxSubmitJobsPerUser</b> Maximum number of jobs with this QOS that can be in the system.
 <LI><b>GrpWall</b> Wall clock limit for all jobs running with this QOS.
diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml
index c14563e48..245490600 100644
--- a/doc/html/quickstart.shtml
+++ b/doc/html/quickstart.shtml
@@ -213,9 +213,9 @@ JobId=65672 UserId=phil(5136) GroupId=phil(5136)
    AllocNode:Sid=adev0:16726 TimeLimit=00:10:00 ExitCode=0:0
    StartTime=06/02-15:27:11 EndTime=06/02-15:37:11
    JobState=PENDING NodeList=(null) NodeListIndices=
-   ReqProcs=24 ReqNodes=1 ReqS:C:T=1-65535:1-65535:1-65535
+   NumCPUs=24 ReqNodes=1 ReqS:C:T=1-65535:1-65535:1-65535
    Shared=1 Contiguous=0 CPUs/task=0 Licenses=(null)
-   MinProcs=1 MinSockets=1 MinCores=1 MinThreads=1
+   MinCPUs=1 MinSockets=1 MinCores=1 MinThreads=1
    MinMemory=0 MinTmpDisk=0 Features=(null)
    Dependency=(null) Account=(null) Requeue=1
    Reason=None Network=(null)
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index c92429cf0..adc47e2dd 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -409,7 +409,7 @@ Each partition can thus be considered a separate queue.
 Partition and node specifications use node range expressions to identify
 nodes in a concise fashion. This configuration file defines a 1154-node cluster
 for SLURM, but it might be used for a much larger cluster by just changing a few
-node range expressions. Specify the minimum processor count (Procs), real memory
+node range expressions. Specify the minimum processor count (CPUs), real memory
 space (RealMemory, megabytes), and temporary disk space (TmpDisk, megabytes) that
 a node should have to be considered available for use. Any node lacking these
 minimum configuration values will be considered DOWN and not scheduled.
@@ -450,7 +450,7 @@ TreeWidth=50
 #
 # Node Configurations
 #
-NodeName=DEFAULT Procs=2 RealMemory=2000 TmpDisk=64000 State=UNKNOWN
+NodeName=DEFAULT CPUs=2 RealMemory=2000 TmpDisk=64000 State=UNKNOWN
 NodeName=mcr[0-1151] NodeAddr=emcr[0-1151]
 #
 # Partition Configurations
@@ -563,8 +563,8 @@ JobId=475 UserId=bob(6885) Name=sleep JobState=COMPLETED
    AllocNode:Sid=adevi:21432 TimeLimit=UNLIMITED
    StartTime=03/19-12:53:41 EndTime=03/19-12:53:59
    NodeList=adev8 NodeListIndecies=-1
-   ReqProcs=0 MinNodes=0 Shared=0 Contiguous=0
-   MinProcs=0 MinMemory=0 Features=(null) MinTmpDisk=0
+   NumCPUs=0 MinNodes=0 Shared=0 Contiguous=0
+   MinCPUs=0 MinMemory=0 Features=(null) MinTmpDisk=0
    ReqNodeList=(null) ReqNodeListIndecies=-1
 
 JobId=476 UserId=bob(6885) Name=sleep JobState=RUNNING
@@ -572,8 +572,8 @@ JobId=476 UserId=bob(6885) Name=sleep JobState=RUNNING
    AllocNode:Sid=adevi:21432 TimeLimit=UNLIMITED
    StartTime=03/19-12:54:01 EndTime=NONE
    NodeList=adev8 NodeListIndecies=8,8,-1
-   ReqProcs=0 MinNodes=0 Shared=0 Contiguous=0
-   MinProcs=0 MinMemory=0 Features=(null) MinTmpDisk=0
+   NumCPUs=0 MinNodes=0 Shared=0 Contiguous=0
+   MinCPUs=0 MinMemory=0 Features=(null) MinTmpDisk=0
    ReqNodeList=(null) ReqNodeListIndecies=-1
 </pre> <p>Print the detailed state of job 477 and change its priority to
 zero. A priority of zero prevents a job from being initiated (it is held in &quot;pending&quot;
diff --git a/doc/html/registration.shtml b/doc/html/registration.shtml
new file mode 100644
index 000000000..1b498aab2
--- /dev/null
+++ b/doc/html/registration.shtml
@@ -0,0 +1,78 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Registration</h1>
+
+<p>Please use with the following form to register for the
+<b>Slurm User Group Meeting 2011</b> to be held September 22-23, 2011 in
+Phoenix, Arizona, USA.
+Note that a registration fee of $100 per person will be collected at
+the meeting to defray expenses.</p>
+
+<p>Thursday September 22 will be devoted to tutorials.
+One room will be devoted to beginner level SLURM training.
+A second room will be devoted to advanced topics of SLURM administration and use.
+Please specify which tutorial track you would like to attend in the form below.</p>
+
+<p>Friday September 23 will be devoted to a variety of technical presentations.
+Please see the <a href="sug2011_agenda">agenda</a> for more details.</p>
+
+<p>Early registration is recommended as the number of attendees is limited to
+50 and speakers will receive priority.
+You will receive by email the status of your registration within a
+few days of its submission.</p>
+
+<p>The meeting will be held at LOCATION/ADDRESS in Phoenix Arizion.
+A block of rooms are available for meeting attendees wishing to stay at
+THAT HOTEL at a preferred rate of $79. HOTEL CONTACT INFO HERE.</p>
+
+<FORM METHOD=POST ENCTYPE="text/plain" ACTION="mailto:jette1@llnl.gov?subject=Registration">
+	<PRE>
+
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	    <tr>
+	      <td width="10%">First Name</td>
+	      <td width="80%"><INPUT NAME=Firstname size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Last Name</td>
+	      <td width="80%"><INPUT NAME=Lastname size=30></td>
+
+	    </tr>
+	    <tr>
+	      <td width="10%">Email</td>
+	      <td width="80%"><INPUT NAME=Email size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Country</td>
+	      <td width="80%"><INPUT NAME=Country size=30></td>
+
+	    </tr>
+	    <tr>
+	      <td width="10%">Company</td>
+	      <td width="80%"><INPUT NAME=Company size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Title</td>
+	      <td width="80%"><INPUT NAME=Title size=30></td>
+
+	    </tr>
+	    <tr>
+	      <td width="10%">Tutorials</td>
+	      <td width="80%"><INPUT TYPE="radio" NAME=Tutorial VALUE=Beginner>Beginner<br>
+	                      <INPUT TYPE="radio" NAME=Tutorial VALUE=Advanced>Advanced<br>
+	                      <INPUT TYPE="radio" NAME=Tutorial VALUE=None>None</td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Comments</td>
+	      <td width="80%"><TEXTAREA NAME=Comments rows=5 cols=50></TEXTAREA></td>
+	    </tr>
+	    <tr>
+	      <td width="30%"><INPUT TYPE=SUBMIT VALUE=Submit></td>
+	      <td width="30%"><INPUT TYPE=RESET VALUE=Clear></td>
+
+	    </tr>
+	  </table>
+	</PRE>
+</FORM>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/reservations.shtml b/doc/html/reservations.shtml
index 67db6686e..f94179df5 100644
--- a/doc/html/reservations.shtml
+++ b/doc/html/reservations.shtml
@@ -11,6 +11,13 @@ Note that resource reservations are not compatible with SLURM's
 gang scheduler plugin since the termination time of running jobs
 is not possible to accurately predict.</p>
 
+<p>Note that reserved licenses are treated somewhat differently than reserved
+nodes. When nodes are reserved, then jobs using that reservation can use only
+those nodes and no other jobs can use those nodes. Reserved licenses can only
+be used by jobs associated with that reservation, but licenses not explicitly
+reserved are available to any job. This eliminates the need to explicitly
+put licenses into every advanced reservation created.</p>
+
 <p>Reservations can be created, updated, or destroyed only by user root
 or the configured <i>SlurmUser</i> using the <i>scontrol</i> command.
 The <i>scontrol</i>, <i>smap</i> and <i>sview</i> commands can be used
@@ -99,7 +106,7 @@ ReservationName=root_5 StartTime=2009-02-04T16:22:57
    Users=root Accounts=(null)
 </pre>
 
-<p>Our final example is to reserve ten nodes in the default
+<p>Our next example is to reserve ten nodes in the default
 SLURM partition starting at noon and with a duration of 60
 minutes occurring daily. The reservation will be available
 only to users alan and brenda.</p>
@@ -124,6 +131,28 @@ reservation. Note that the reservation creation request can also
 identify the partition from which to select the nodes or _one_
 feature that every selected node must contain.</p>
 
+<p>On a smaller system, one might want to reserve specific CPUs rather than
+whole nodes. While the resolution of SLURM's resource reservation is that of
+whole nodes, one might configure each CPU as a license to SLURM and reserve
+those instead (we understand this is a kludge, but it does provide a way to
+work around this shortcoming in SLURM's code). Proper enforcement then requires
+that each job request one "cpu" license for each CPU to be allocated, which
+can be accomplished by an appropriate job_submit plugin. In the example below,
+we configure the system with one license named "cpu" for each CPU in the
+system, 64 in this example, then create a reservation for 32 CPUs. The
+user developed job_submit plugin would then explicitly set the job's
+licenses field to require one "cpu" for each physical CPU required to satisfy
+the request.</p>
+<pre>
+$ scontrol show configuration | grep Licenses
+Licenses  = cpu*64
+
+$ scontrol create reservation starttime=2009-04-06T16:00:00 \
+   duration=120 user=bob flags=maint,ignore_jobs \
+   licenses=cpu*32
+Reservation created: bob_5
+</pre>
+
 <h2>Reservation Use</h2>
 
 <p>The reservation create response includes the reservation's name.
@@ -140,6 +169,12 @@ $ sbatch --reservation=alan_6 -N4 my.script
 sbatch: Submitted batch job 65540
 </pre>
 
+<p>Note that use of a reservation does not alter a job's priority, but it
+does act as an enhancement to the job's priority.
+Any job with a reservation is considered for scheduling to resources 
+before any other job in the same SLURM partition (queue) not associated
+with a reservation.</p>
+
 <h2>Reservation Modification</h2>
 
 <p>Reservations can be modified by user root as desired.
@@ -234,18 +269,16 @@ to have used half of the reserved resources).</p>
 
 <p>Several enhancements are anticipated at some point in the future.
 <ol>
-<li>The automatic selection of nodes for a reservation create request may be
-sub-optimal in terms of locality (for optimized application
-communication).</li>
 <li>Reservations made within a partition having gang scheduling assumes
 the highest level rather than the actual level of time-slicing when
 considering the initiation of jobs.
 This will prevent the initiation of some jobs which would complete execution
 before a reservation given fewer jobs to time-slice with.</li>
+<li>Add support to reserve specific CPU counts rather than require whole
+nodes be reserved (work around described above).</li>
 </ol>
 
-
-<p style="text-align: center;">Last modified 27 December 2010</p>
+<p style="text-align: center;">Last modified 15 September 2011</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
index c2f1b21cf..49f75922e 100644
--- a/doc/html/resource_limits.shtml
+++ b/doc/html/resource_limits.shtml
@@ -63,7 +63,8 @@ each association in the database.  By setting this option, the
   set to true.
 </li>
 </ul>
-(NOTE: The association is a combination of cluster, account,
+
+<p>(NOTE: The association is a combination of cluster, account,
 user names and optional partition name.)
 <br>
 Without AccountingStorageEnforce being set (the default behavior)
@@ -72,7 +73,7 @@ cluster.
 <br>
 It is advisable to run without the option 'limits' set when running a
 scheduler on top of SLURM, like Moab, that does not update in real
-time their limits per association.</li>
+time their limits per association.
 </p>
 
 <h2>Tools</h2>
@@ -108,40 +109,23 @@ specified then no limit will apply.</p>
 
 <p>Currently available scheduling policy options:</p>
 <ul>
-<li><b>Fairshare=</b> Used for determining priority.  Essentially
-  this is the amount of claim this association and it's children have
-  to the above system.</li>
+<li><b>Fairshare=</b> Integer value used for determining priority.
+  Essentially this is the amount of claim this association and it's
+  children have to the above system. Can also be the string "parent",
+  this means that the parent association is used for fairshare.
 </li>
 
-<!-- For future use
 <li><b>GrpCPUMins=</b> A hard limit of cpu minutes to be used by jobs
   running from this association and its children.  If this limit is
   reached all jobs running in this group will be killed, and no new
   jobs will be allowed to run.
 </li>
--->
 
-<!-- For future use
-<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
-  running from this association.  If this limit is
-  reached the job will be killed will be allowed to run.
-</li>
--->
-
-<!-- For future use
 <li><b>GrpCPUs=</b> The total count of cpus able to be used at any given
   time from jobs running from this association and its children.  If
   this limit is reached new jobs will be queued but only allowed to
   run after resources have been relinquished from this group.
 </li>
--->
-
-<!-- For future use
-<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
-</li>
--->
 
 <li><b>GrpJobs=</b> The total number of jobs able to run at any given
   time from this association and its children.  If
@@ -149,48 +133,61 @@ specified then no limit will apply.</p>
   run after previous jobs complete from this group.
 </li>
 
-<li><b>MaxJobs=</b> The total number of jobs able to run at any given
-  time from this association.  If this limit is reached new jobs will
-  be queued but only allowed to run after previous jobs complete from
-  this association.
-</li>
-
 <li><b>GrpNodes=</b> The total count of nodes able to be used at any given
   time from jobs running from this association and its children.  If
   this limit is reached new jobs will be queued but only allowed to
   run after resources have been relinquished from this group.
 </li>
 
-<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
-</li>
-
 <li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
   to the system at any given time from this association and its children.  If
   this limit is reached new submission requests will be denied until
   previous jobs complete from this group.
 </li>
 
+<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
+  this group can run for.  If this limit is reached submission requests
+  will be denied.
+</li>
+
+<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+
+<li><b>MaxJobs=</b> The total number of jobs able to run at any given
+  time from this association.  If this limit is reached new jobs will
+  be queued but only allowed to run after previous jobs complete from
+  this association.
+</li>
+
+<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+
 <li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
   to the system at any given time from this association.  If
   this limit is reached new submission requests will be denied until
   previous jobs complete from this association.
 </li>
 
-<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
-  this group can run for.  Submitting jobs that specify a wall clock
-  time limit that exceeds this limit will be denied.</li>
-
 <li><b>MaxWallDurationPerJob=</b> The maximum wall clock time any job
-  submitted to this association can run for.  Submitting jobs that
-  specify a wall clock time limit that exceeds this limit will be
-  denied.
+  submitted to this association can run for.  If this limit is reached
+  the job will be denied at submission.
 </li>
 
 <li><b>QOS=</b> comma separated list of QOS's this association is
   able to run.
 </li>
+
+<!-- For future use
+<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
+  running from this association.  If this limit is
+  reached the job will be killed will be allowed to run.
+</li>
+-->
+
 </ul>
 
 <p>The <b>MaxNodes</b> and <b>MaxWall</b> options already exist in
@@ -205,6 +202,6 @@ data maintained in the SLURM database.  More information can be found
 in the <a href="priority_multifactor.html">priority/multifactor</a>
 plugin description.</p>
 
-<p style="text-align: center;">Last modified 9 October 2009</p>
+<p style="text-align: center;">Last modified 10 June 2011</p>
 
 </ul></body></html>
diff --git a/doc/html/review_release.html b/doc/html/review_release.html
index 063870585..2bb49a043 100644
--- a/doc/html/review_release.html
+++ b/doc/html/review_release.html
@@ -2,13 +2,16 @@
 
 <head>
 <title>SLURM Web pages for Review and Release</title>
-<!-- Updated 6 March 2009 -->
+<!-- Updated 6 May 2011 -->
 </head>
 
 <body>
 <h1>SLURM Web pages for Review and Release</h1>
 <b>NOTE: Do not follow links.</b>
 <ul>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm_ug_agenda.html">slurm_ug_agenda.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm_ug_registration.html">slurm_ug_registration.html</a></li>
+<!-- 
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting.html">accounting.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting_storageplugins.html">accounting_storageplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/api.html">api.html</a></li>
@@ -28,6 +31,8 @@
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/faq.html">faq.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/gang_scheduling.html">gang_scheduling.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/gres.html">gres.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/gres_design.html">gres_design.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/gres_plugins.html">gres_plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/help.html">help.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/ibm.html">ibm.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_gatherplugins.html">jobacct_gatherplugins.html</a></li>
@@ -64,7 +69,7 @@
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/testimonials.html">testimonials.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/topology.html">topology.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/topology_plugin.html">topology_plugin.html</a></li>
-<li><a href="https://computing-pre.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li> -->
 </ul>
 </body>
 </html>
diff --git a/doc/html/select_design.shtml b/doc/html/select_design.shtml
new file mode 100644
index 000000000..21356a68b
--- /dev/null
+++ b/doc/html/select_design.shtml
@@ -0,0 +1,104 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">Select Plugin Design Guide</a></h1>
+
+<h2>Overview</h2>
+
+<p>The select plugin is responsible for selecting compute resources to be
+allocated to a job, plus allocating and deallocating those resources.
+The select plugin is aware of the systems topology, based upon data structures
+established by the topology plugn. It can also over-subscribe resources to
+support gang scheduling (time slicing of parallel jobs), if so configured.
+The select plugin is also capable of communicating with an external entity
+to perform these actions (the select/bluegene plugin used on an IBM BlueGene
+and the select/cray plugin used with Cray ALPS/BASIL software are two 
+examples). Other architectures would rely upon either the select/linear or
+select/cons_res plugin. The select/linear plugin allocates whole nodes to jobs
+and is the simplest implementation. The select/cons_res plugin (<i>cons_res</i>
+is an abbreviation for <i>consumable resources</i>) can allocate individual
+sockets, cores, threads, or CPUs within a node. The select/cons_res plugin
+is slightly slower than select/linear, but contains far more complex logic.</p>
+
+<h2>Mode of Operation</h2>
+
+<p>The select/linear and select/cons_res plugins have similar modes of
+operation. The obvious difference is that data structures in select/linear
+are node-centric, while those in select/cons_res contain information at a
+finer resolution (sockets, cores, threads, or CPUs depending upon the
+SelectTypeParameters configuration parameter). The description below is
+generic and applies to both plugin implementations. Note that both plugins
+are able to manage memory allocations. Both plugins are also able to manage
+generic resource (GRES) allocations, making use of the GRES plugins.</p>
+
+<p>Per node data structures include memory (configured and allocated),
+GRES (configured and allocated, in a List data structure), plus a flag
+indicating if the node has been allocated using an exclusive option (preventing
+other jobs from being allocated resources on that same node). The other key
+data structure is used to enforce the per-partition <i>Shared</i> configuration
+parameter and tracks how many jobs have been allocated each resource in each
+partition. This data structure is different between the plugins based upon
+the resolution of the resource allocation (e.g. nodes or CPUs).</p>
+
+<p>Most of the logic in the select plugin is dedicated to identifying resources
+to be allocated to a new job. Input to that function includes: a pointer to the
+new job, a bitmap identifying nodes which could be used, node counts (minimum,
+maximum, and desired), a count of how many jobs of that partition the job can
+share resources with, and a list of jobs which can be preempted to initiate the
+new job. The first phase is to determine of all usable nodes, which nodes
+would best satisfy the resource requirement. This consistes of a best-fit
+algorithm that groups nodes based upon network topology (if the topology/tree
+plugin is configured) or based upon consecutive nodes (by default). Once the
+best nodes are identified, resources are accumulated for the new job until its
+resource requirements are satisfied.</p>
+
+<p>If the job can not be started with currently available resources, the plugin
+will attempt to identify jobs which can be preempted in order to initiate the
+new job. A copy of the current system state will be created including details
+about all resources and active jobs. Preemptable jobs will then be removed
+from this simulated system state until the new job can be initiated. When
+sufficient resources are available for the new job, the jobs actually needing
+to be preempted for its initiation will be preempted (this may be a subset of
+the jobs whose preemption is simulated).</p>
+
+<p>Other functions exist to support suspending jobs, resuming jobs, terminating
+jobs, expanding/shrinking job allocations, un/packing job state information,
+un/packing node state information, etc. The operation of those functions is
+relatively straightforward and not detailed here.</p>
+
+<h2>Operation on IBM BlueGene Systems</h2>
+
+<p>On IBM BlueGene systems, SLURM's <i>slurmd</i> daemon executes on the
+front-end nodes rather than the compute nodes and IBM provides a Bridge API 
+to manage compute nodes and jobs. The IBM BlueGene systems also have very 
+specific topology rules for what resources can be allocated to a job. SLURM's
+interface to IBM's Bridge API and the topology rules are found within the
+select/bluegene plugin and very little BlueGene-specific logic in SLURM is
+found outside of that plugin. Note that the select/bluegene plugin is used for
+BlueGene/L, BlueGene/P and BlueGene/Q systems with select portions of the
+code conditionally compiled depending upon the system type.</p>
+
+<h2>Operation on Cray Systems</h2>
+
+<p>The operation of the select/cray plugin is unique in that it does not
+directly select resources for a job, but uses the select/linear plugin for
+that purpose. It also interfaces with Cray's ALPS software using the BASIL
+interface or directly using the database. On Cray systems, SLURM's <i>slurmd</i>
+daemon executes on the front-end nodes rather than the compute nodes and
+ALPS is the mechanism available for SLURM to manage compute nodes and their
+jobs.</p>
+
+<pre>
+           -------------------
+           |   select/cray   |
+           -------------------
+              |           |
+-----------------   --------------
+| select/linear |   | BASIL/ALPS |
+-----------------   --------------
+</pre>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 31 May 2011</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml
index 8daac3ef6..104672b4b 100644
--- a/doc/html/selectplugins.shtml
+++ b/doc/html/selectplugins.shtml
@@ -16,18 +16,24 @@ specifications:</p>
 The major type must be &quot;select.&quot; The minor type can be any recognizable
 abbreviation for the type of node selection algorithm. We recommend, for example:</p>
 <ul>
+<li><b>bluegene</b>&#151;<a href="http://www.research.ibm.com/bluegene/">IBM Blue Gene</a>
+node selector. Note that this plugin not only selects the nodes for a job, but performs
+some initialization and termination functions for the job. Use this plugin for
+BlueGene/L, BlueGene/P and BlueGene/Q systems.</li>
+<li><b>cons_res</b>&#151;A plugin that can allocate individual processors,
+memory, etc. within nodes. This plugin is recommended for systems with
+many non-parallel programs sharing nodes. For more information see
+<a href=cons_res.html>Consumable Resources in SLURM</a>.</li>
+<li><b>cray</b>&#151;Cray XE and XT system node selector. Note that this
+plugin not only selects the nodes for a job, but performs some initialization
+and termination functions for the job. This plugin also serves as a wrapper
+for the <i>select/linear</i> plugin which enforces various limits and
+provides support for resource selection optimized for the system topology.</li>
 <li><b>linear</b>&#151;A plugin that selects nodes assuming a one-dimensional
 array of nodes. The nodes are selected so as to minimize the number of consecutive
 sets of nodes utilizing a best-fit algorithm. While supporting shared nodes,
 this plugin does not allocate individual processors, but can allocate memory to jobs.
 This plugin is recommended for systems without shared nodes.</li>
-<li><b>cons_res</b>&#151;A plugin that can allocate individual processors,
-memory, etc. within nodes. This plugin is recommended for systems with
-many non-parallel programs sharing nodes. For more information see
-<a href=cons_res.html>Consumable Resources in SLURM</a>.</li>
-<li><b>bluegene</b>&#151;<a href="http://www.research.ibm.com/bluegene/">IBM Blue Gene</a>
-node selector. Note that this plugin not only selects the nodes for a job, but performs
-some initialization and termination functions for the job.</li>
 </ul>
 <p>The <span class="commandline">plugin_name</span> and
 <span class="commandline">plugin_version</span>
@@ -98,7 +104,7 @@ be stubbed.</p>
 <p style="margin-left:.2in"><b>Description</b>: Save any global node selection state
 information to a file within the specified directory. The actual file name used is plugin specific.
 It is recommended that the global switch state contain a magic number for validation purposes.
-This function is called by the slurmctld deamon on shutdown.</p>
+This function is called by the slurmctld daemon on shutdown.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname of a directory into which user SlurmUser (as defined
 in slurm.conf) can create a file and write state information into that file. Cannot be NULL.</p>
@@ -109,7 +115,7 @@ the plugin should return SLURM_ERROR.</p>
 <p style="margin-left:.2in"><b>Description</b>: Restore any global node selection state
 information from a file within the specified directory. The actual file name used is plugin specific.
 It is recommended that any magic number associated with the global switch state be verified.
-This function is called by the slurmctld deamon on startup.</p>
+This function is called by the slurmctld daemon on startup.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> dir_name</span>&nbsp;
 &nbsp;&nbsp;(input) fully-qualified pathname of a directory containing a state information file
 from which user SlurmUser (as defined in slurm.conf) can read. Cannot be NULL.</p>
@@ -121,9 +127,10 @@ the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 <h3>State Initialization Functions</h3>
 
 <p class="commandline">int select_p_node_init (struct node_record *node_ptr, int node_cnt);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the node record data
-structure. This function is called when the node records are initially established and again
-when any nodes are added to or removed from the data structure. </p>
+<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the
+node record data structure. This function is called by the slurmctld daemon
+when the node records are initially established and again when any nodes are
+added to or removed from the data structure. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input) pointer
 to the node data records. Data in these records can read. Nodes deleted after initialization
@@ -134,9 +141,10 @@ of node data records.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
-<p class="commandline">int select_p_block_init (List block_list);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the partition record data
-structure. This function is called when the partition records are initially established and again
+<p class="commandline">int select_p_block_init (List part_list);</p>
+<p style="margin-left:.2in"><b>Description</b>: Note the initialization of the
+partition record data structure. This function is called by the slurmctld
+daemon when the partition records are initially established and again
 when any partition configurations change. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> part_list</span>&nbsp;&nbsp;&nbsp;(input) list of partition
@@ -146,32 +154,100 @@ consider that nodes can be removed from one partition and added to a different p
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="commandline">int select_p_job_init(List job_list);<p>
-<p style="margin-left:.2in"><b>Description</b>: Used at slurm startup to
-synchronize plugin (and node) state with that of currently active jobs.</p>
+<p style="margin-left:.2in"><b>Description</b>: Used at slurmctld daemon
+startup to synchronize plugin (and node) state with that of currently active
+jobs.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_list</span>&nbsp; &nbsp;&nbsp;(input)
 list of slurm jobs from slurmctld job records.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_reconfigure (void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin
+of change in partition configuration or general configuration change.
+The plugin will test global variables for changes as appropriate.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<h3>State Synchronization Functions</h3>
 
-<p class="commandline">int select_p_update_block (update_part_msg_t *part_desc_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs
-to manually update the state of a block. </p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
-description variable.  Containing the block name and the state to set the block.</p>
+<h3>Node-Specific Functions</h3>
+
+<p class="commandline">select_nodeinfo_t *select_p_select_nodeinfo_alloc(void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Allocate a buffer for select
+plugin specific information about a node. Use select_p_select_nodeinfo_free()
+to free the returned data structure.</p>
+<p style="margin-left:.2in"><b>Returns</b>: A buffer for select plugin specific
+information about a node or NULL on failure. Use select_p_select_nodeinfo_free()
+to free this data structure.</p>
+
+<p class="commandline">int select_p_select_nodeinfo_pack(select_nodeinfo_t *nodeinfo,
+Buf buffer, uint16_t protocol_version);</p>
+<p style="margin-left:.2in"><b>Description</b>: Pack select plugin specific
+information about a node into a buffer for node queries.</p>
+<p style="margin-left:.2in"><b>Argument</b>:<br>
+<span class="commandline"> nodeinfo</span>&nbsp; &nbsp;&nbsp;(input) Node information to be packed.<br>
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) pointer
+to buffer into which the node information is packed.<br>
+<span class="commandline"> protocol_version</span>&nbsp; &nbsp;&nbsp;(input)
+Version number of the data packing mechanism (needed for backward compatibility).</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
+Buf buffer, uint16_t protocol_version);</p>
+<p style="margin-left:.2in"><b>Description</b>: Unpack select plugin specific
+information about a node from a buffer for node queries. Use
+select_p_select_nodeinfo_free() to free the returned data structure.</p>
+<p style="margin-left:.2in"><b>Argument</b>:<br>
+<span class="commandline"> nodeinfo</span>&nbsp; &nbsp;&nbsp;(output) Node
+information unpacked from the buffer. Use select_p_select_nodeinfo_free()
+to free the returned data structure.<br>
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) pointer
+to buffer from which the node information is to be unpacked.<br>
+<span class="commandline"> protocol_version</span>&nbsp; &nbsp;&nbsp;(input)
+Version number of the data packing mechanism (needed for backward compatibility).</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);</p>
+<p style="margin-left:.2in"><b>Description</b>: Free a buffer which was
+previously allocated for select plugin specific information about a node.</p>
+<p style="margin-left:.2in"><b>Argument</b>:
+<span class="commandline"> nodeinfo</span>&nbsp; &nbsp;&nbsp;(input/output) The buffer to be freed.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int int select_p_select_nodeinfo_set(struct job_record *job_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Reset select plugin specific
+information about a job. Called by slurmctld daemon after that job's state has
+been restored (at startup) or job has been scheduled.</p>
+<p style="margin-left:.2in"><b>Argument</b>:
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) Pointer
+to the updated job.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
-<p class="commandline">int select_p_update_nodeinfo(struct node_record *node_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Update plugin-specific information
-related to the specified node. This is called after changes in a node's configuration.</p>
+<p class="commandline">int select_p_select_nodeinfo_set_all(time_t last_query_time);</p>
+<p style="margin-left:.2in"><b>Description</b>: Update select plugin specific
+information about every node as needed.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
-<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the node for which information is requested.</p>
+<span class="commandline"> last_query_time</span>&nbsp; &nbsp;&nbsp;(input) Time
+of previous node state query. Only update the information if data has changed
+since this time.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
+enum select_nodedata_type dinfo, enum node_states state, void *data);</p>
+<p style="margin-left:.2in"><b>Description</b>: Get information from a
+select plugin's node specific data structure.</p>
+<p style="margin-left:.2in"><b>Argument</b>:<br>
+<span class="commandline"> nodeinfo</span>&nbsp; &nbsp;&nbsp;(input) Node information
+data structure from which information is to get retrieved.<br>
+<span class="commandline"> dinfo</span>&nbsp; &nbsp;&nbsp;(input) Data type to
+be retrieved.<br>
+<span class="commandline"> state</span>&nbsp; &nbsp;&nbsp;(input) Node state filter
+to be applied (e.g. only get information about ALLOCATED nodes).<br>
+<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) The retrieved data.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
@@ -181,58 +257,189 @@ registered with a different configuration than previously registered.
 For example, the node was configured with 1GB of memory in slurm.conf,
 but actually registered with 2GB of memory.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> index</span>&nbsp;&nbsp;&nbsp;(input) index
-of the node in reference to the entire system.<br><br>
+<span class="commandline"> index</span>&nbsp;&nbsp;&nbsp;(input) zero origin index
+of the node in reference to the entire system.<br>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
-<p class="commandline">int select_p_update_node_state (int index, uint16_t state);</p>
-<p style="margin-left:.2in"><b>Description</b>: push a change of state
-into the plugin the index should be the index from the slurmctld of
-the entire system.  The state should be the same state the node_record
-was set to in the slurmctld.</p>
+<p class="commandline">bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)</p>
+<p style="margin-left:.2in"><b>Description</b>: This function is called by the slurmctld
+daemon at start time to set node rank information for recording the nodes to
+optimize application performance. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> index</span>&nbsp;&nbsp;&nbsp;(input) index
-of the node in reference to the entire system.<br><br>
-<span class="commandline"> state</span>&nbsp;&nbsp;&nbsp;(input) new
-state of the node.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
-
-<p class="commandline">int select_p_update_sub_node (update_part_msg_t *part_desc_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: update the state of a portion of
-a SLURM node. Currently used on BlueGene systems to place node cards within a
-midplane into or out of an error state.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
-description variable.  Containing the sub-block name and its new state.</p>
+<span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) pointer
+to the node data structure. Each node's node rank field may be set.<br>
+<span class="commandline"> node_cnt</span>&nbsp;&nbsp;&nbsp;(input) number
+of nodes configured on the system.</p>
+<p style="margin-left:.2in"><b>Returns</b>: true if node rank information has
+been set.</p>
+
+<p class="commandline">int select_p_update_node_state (struct node_record *node_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: push a node state change
+into the plugin. The index should be the index from the slurmctld of
+the entire system.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) pointer
+to the node data structure. Each node's node rank field may be set.<br>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
 <p class="commandline">int select_p_alter_node_cnt (enum
 select_node_cnt type, void *data);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used for systems like
-a Bluegene system where SLURM sees 1 node where many nodes really
-exists, in Bluegene's case 1 node reflects 512 nodes in real live, but
-since usually 512 is the smallest allocatable block slurm only handles
-it as 1 node.  This is a function so the user can issue a 'real'
-number and the function will alter it so slurm can understand what the
-user really means in slurm terms.</p>
+<p style="margin-left:.2in"><b>Description</b>: Used for systems like an IBM
+Bluegene system where one SLURM node is mapped to many compute nodes. In
+Bluegene's case one SLURM node/midplane represents 512 compute nodes, but
+since 512 is typically the smallest allocatable block SLURM treats
+it as one node.  This is a function so the user can issue a 'real'
+number and the function will alter it so SLURM can understand what the
+user really means in SLURM terms.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> type</span>&nbsp;&nbsp;&nbsp;(input) enum
-telling the plug in what the user is really wanting.<br><br>
+telling the plugin how to transform the data.<br>
 <span class="commandline"> data</span>&nbsp;&nbsp;&nbsp;(input/output)
-Is a void * so depending on the type sent in argument 1 this should
-adjust the variable returning what the user is asking for.</p>
+Is a void * and the actual data type depends upon the first argument to this
+function (type).</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
-<p class="commandline">int select_p_reconfigure (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin
-of change in partition configuration or general configuration change.
-The plugin will test global variables for changes as appropriate.</p>
+<p class="footer"><a href="#top">top</a></p>
+
+
+<h3>Block-Specific Functions</h3>
+
+<p class="commandline">int select_p_update_sub_node (update_block_msg_t *block_desc_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Update the state of a portion of
+a SLURM node. Currently used on BlueGene systems to place node cards within a
+midplane into or out of an error state.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> block_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) pointer
+to the modified block containing the sub-block name and its new state.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
+<p class="commandline">int select_p_update_block (update_block_msg_t *block_desc_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs
+to manually update the state of a block. </p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> block_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) block
+description variable.  Containing the block name and the state to set the block.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
+
 <h3>Job-Specific Functions</h3>
 
+<p class="commandline">select_jobinfo_t *select_p_select_jobinfo_alloc(void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Allocate a buffer for select
+plugin specific information about a job. Use select_p_select_jobinfo_free()
+to free the allocated memory.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the job being initialized. Data in this job record may safely be read or written.
+The <i>nodes</i> and <i>node_bitmap</i> fields of this job record identify the
+nodes which have already been selected for this job to use.</p>
+<p style="margin-left:.2in"><b>Returns</b>: Pointer to a select plugin buffer
+for a job or NULL on failure. Use select_p_select_jobinfo_free() to free the
+allocated memory.</p>
+
+<p class="commandline">select_jobinfo_t *select_p_select_jobinfo_copy(select_jobinfo_t *jobinfo);</p>
+<p style="margin-left:.2in"><b>Description</b>: Copy the buffer containing select
+plugin specific information about a job. Use select_p_select_jobinfo_free()
+to free the allocated memory.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Returns</b>: A copy of jobinfo or NULL on
+failure. Use select_p_select_jobinfo_free() to free the allocated memory.</p>
+
+<p class="commandline">int select_p_select_jobinfo_free(select_jobinfo_t *jobinfo);</p>
+<p style="margin-left:.2in"><b>Description</b>: Free the buffer containing select
+plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo,
+Buf buffer, uint16_t protocol_version);</p>
+<p style="margin-left:.2in"><b>Description</b>: Pack into a buffer the contents
+of the select plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job.<br>
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) pointer
+to buffer into which the job information is packed.<br>
+<span class="commandline"> protocol_version</span>&nbsp; &nbsp;&nbsp;(input)
+Version number of the data packing mechanism (needed for backward compatibility).</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo_pptr,
+Buf buffer, uint16_t protocol_version);</p>
+<p style="margin-left:.2in"><b>Description</b>: Pack from a buffer the contents
+of the select plugin specific information about a job.
+The returned value must be freed using select_p_select_jobinfo_free().</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(output) pointer
+to the select plugin specific information about a job. The returned value must
+be freed using select_p_select_jobinfo_free().<br>
+<span class="commandline"> buffer</span>&nbsp; &nbsp;&nbsp;(input/output) pointer
+to buffer from which the job information is unpacked.<br>
+<span class="commandline"> protocol_version</span>&nbsp; &nbsp;&nbsp;(input)
+Version number of the data packing mechanism (needed for backward compatibility).</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_select_jobinfo_get(select_jobinfo_t *jobinfo,
+enum select_jobdata_type data_type, void *data);</p>
+<p style="margin-left:.2in"><b>Description</b>: Get the contents of a field
+from the select plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job to be read.<br>
+<span class="commandline"> data_type</span>&nbsp; &nbsp;&nbsp;(input) identification
+of the field to be retrieved.<br>
+<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) data read
+from the job record.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
+enum select_jobdata_type data_type, void *data);</p>
+<p style="margin-left:.2in"><b>Description</b>: Set a field in the select
+plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input/output) pointer
+to the select plugin specific information about a job to be modified.<br>
+<span class="commandline"> data_type</span>&nbsp; &nbsp;&nbsp;(input) identification
+of the field to be set.<br>
+<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(input) data to be written
+into the job record.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">char *select_p_select_jobinfo_sprint(select_jobinfo_t *jobinfo,
+char *buf, size_t size, int mode);</p>
+<p style="margin-left:.2in"><b>Description</b>: Print the contents of the select
+plugin specific information about a job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job.<br>
+<span class="commandline"> buf</span>&nbsp; &nbsp;&nbsp;(input/output) buffer
+into which the contents are written.<br>
+<span class="commandline"> size</span>&nbsp; &nbsp;&nbsp;(input) size of buf in bytes.<br>
+<span class="commandline"> mode</span>&nbsp; &nbsp;&nbsp;(input) print mode, see enum select_print_mode.</p>
+<p style="margin-left:.2in"><b>Returns</b>: Pointer to the buf on success or NULL on failure.</p>
+
+<p class="commandline">char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo, int mode);</p>
+<p style="margin-left:.2in"><b>Description</b>: Print the contents of the select
+plugin specific information about a job. The return value must be released using the xfree() function.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> jobinfo</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the select plugin specific information about a job.<br>
+<span class="commandline"> mode</span>&nbsp; &nbsp;&nbsp;(input) print mode, see enum select_print_mode.</p>
+<p style="margin-left:.2in"><b>Returns</b>: Pointer to a string on success or NULL on failure.
+Call xfree() to release the memory allocated for the return value.</p>
+
 <p class="commandline">int select_p_job_test (struct job_record *job_ptr,
 bitstr_t *bitmap, int min_nodes, int max_nodes, int req_nodes, int mode,
 List preemption_candidates, List *preempted_jobs);</p>
@@ -250,31 +457,31 @@ the job with appropriate constraints.</p>
 to the job being considered for scheduling. Data in this job record may safely be read.
 Data of particular interest include <i>details->contiguous</i> (set if allocated nodes
 should be contiguous), <i>num_procs</i> (minimum processors in allocation) and
-<i>details->req_node_bitmap</i> (specific required nodes).<br><br>
+<i>details->req_node_bitmap</i> (specific required nodes).<br>
 <span class="commandline"> bitmap</span>&nbsp; &nbsp;&nbsp;(input/output)
 bits representing nodes which might be allocated to the job are set on input.
 This function should clear the bits representing nodes not required to satisfy
 job's scheduling request.
 Bits left set will represent nodes to be used for this job. Note that the job's
 required nodes (<i>details->req_node_bitmap</i>) will be a superset
-<i>bitmap</i> when the function is called.<br><br>
+<i>bitmap</i> when the function is called.<br>
 <span class="commandline"> min_nodes</span>&nbsp; &nbsp;&nbsp;(input)
 minimum number of nodes to allocate to this job. Note this reflects both job
-and partition specifications.<br><br>
+and partition specifications.<br>
 <span class="commandline"> max_nodes</span>&nbsp; &nbsp;&nbsp;(input)
 maximum number of nodes to allocate to this job. Note this reflects both job
-and partition specifications.<br><br>
+and partition specifications.<br>
 <span class="commandline"> req_nodes</span>&nbsp; &nbsp;&nbsp;(input)
 the requested (desired)  of nodes to allocate to this job. This reflects job's
-maximum node specification (if supplied).<br><br>
+maximum node specification (if supplied).<br>
 <span class="commandline"> mode</span>&nbsp; &nbsp;&nbsp;(input)
-controls the mode of operation. Valid options are
-SELECT_MODE_RUN_NOW: try to schedule job now<br>
-SELECT_MODE_TEST_ONLY: test if job can ever run<br>
-SELECT_MODE_WILL_RUN: determine when and where job can run<br><br>
+controls the mode of operation. Valid options are:<br>
+* SELECT_MODE_RUN_NOW: try to schedule job now<br>
+* SELECT_MODE_TEST_ONLY: test if job can ever run<br>
+* SELECT_MODE_WILL_RUN: determine when and where job can run<br>
 <span class="commandline"> preemption_candidates</span>&nbsp; &nbsp;&nbsp;(input)
 list of pointers to jobs which may be preempted in order to initiate this
-pending job. May be NULL if there are no preemption candidates.<br><br>
+pending job. May be NULL if there are no preemption candidates.<br>
 <span class="commandline"> preempted_jobs</span>&nbsp; &nbsp;&nbsp;(input/output)
 list of jobs which must be preempted in order to initiate the pending job.
 If the value is NULL, no job list is returned.
@@ -283,8 +490,7 @@ otherwise the existing list will be overwritten.
 Use the <i>list_destroy</i> function to destroy the list when no longer
 needed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR and future attempts may be made to schedule
-the job.</p>
+the plugin should return SLURM_ERROR .</p>
 
 <p class="commandline">int select_p_job_begin (struct job_record *job_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the initiation of the specified job
@@ -294,8 +500,7 @@ is about to begin. This function is called immediately after
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being initialized. Data in this job record may safely be read or written.
 The <i>nodes</i> and <i>node_bitmap</i> fields of this job record identify the
-nodes which have already been selected for this job to use. For an example of
-a job record field that the plugin may write into, see <i>select_id</i>.</p>
+nodes which have already been selected for this job to use.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR, which causes the job to be requeued for
 later execution.</p>
@@ -303,7 +508,7 @@ later execution.</p>
 <p class="commandline">int select_p_job_ready (struct job_record *job_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: Test if resources are configured
 and ready for job execution. This function is only used in the job prolog for
-BlueGene systems to determine if the bglblock has been booted and is ready for use.</p>
+BlueGene systems to determine if the bgblock has been booted and is ready for use.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being initialized. Data in this job record may safely be read.
@@ -324,30 +529,139 @@ nodes which were selected for this job to use.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
-<p class="commandline">int select_p_job_suspend (struct job_record *job_ptr);</p>
+<p class="commandline">int select_p_job_signal (struct job_record *job_ptr,
+int signal);</p>
+<p style="margin-left:.2in"><b>Description</b>: Signal the specified job.
+This is needed for architectures where the job steps are launched by a
+mechanism outside of SLURM, for example when ALPS is used on Cray systems.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the job to be signaled.<br>
+<span class="commandline"> signal</span>&nbsp; &nbsp;&nbsp;(input) signal to
+be sent to the job.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
+failure, the plugin should return a SLURM error code.</p>
+
+<p class="commandline">int select_p_job_suspend (struct job_record *job_ptr,
+bool indf_susp);</p>
 <p style="margin-left:.2in"><b>Description</b>: Suspend the specified job.
 Release resources for use by other jobs.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being suspended. Data in this job record may safely be read or
 written.  The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record
-identify the nodes which were selected for this job to use.</p>
+identify the nodes which were selected for this job to use.<br>
+<span class="commandline"> indf_susp</span>&nbsp; &nbsp;&nbsp;(input) flag
+which if set indicates the job is being suspended indefinitely by the user or
+administrator. If not set, the job is being suspended temporarily for gang
+scheduling.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
 failure, the plugin should return a SLURM error code.</p>
 
-<p class="commandline">int select_p_job_resume (struct job_record *job_ptr);</p>
+<p class="commandline">int select_p_job_resume (struct job_record *job_ptr,
+bool indf_susp);</p>
 <p style="margin-left:.2in"><b>Description</b>: Resume the specified job
 which was previously suspended.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
 to the job being resumed. Data in this job record may safely be read or
 written.  The <i>nodes</i> and/or <i>node_bitmap</i> fields of this job record
-identify the nodes which were selected for this job to use.</p>
+identify the nodes which were selected for this job to use.<br>
+<span class="commandline"> indf_susp</span>&nbsp; &nbsp;&nbsp;(input) flag
+which if set indicates the job is being resumed after being suspended
+indefinitely by the user or administrator. If not set, the job is being
+resumed after being temporarily suspended for gang scheduling.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
+failure, the plugin should return a SLURM error code.</p>
+
+<p class="commandline">int select_p_job_expand_allow (void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Report the ability of this
+select plugin to expand jobs.</p>
+<p style="margin-left:.2in"><b>Returns</b>: True if job expansion is
+supported, otherwise false.</p>
+
+<p class="commandline">int select_p_job_expand (struct job_record *from_job_ptr,
+struct job_record *to_job_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Transfer all resources
+currently allocated to one job to another job. One job is left with no
+allocate resources and the other job is left with the resources previously
+allocated to both jobs.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> from_job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the job being to have all of its resources removed.<br>
+<span class="commandline"> to_job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the job getting all of the resources previously either job.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
 failure, the plugin should return a SLURM error code.</p>
 
+<p class="commandline">int select_p_job_resized (struct job_record *job_ptr,
+struct node_record *node_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Remove the specified node
+from the job's allocation.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the job being decreased in size.<br>
+<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the node being removed from a job's allocation.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
+failure, the plugin should return a SLURM error code.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+
+<h3>Step-Specific Functions</h3>
+
+<p class="commandline">bitstr_t *select_p_step_pick_nodes(struct job_record *job_ptr,
+select_jobinfo_t *step_jobinfo, uint32_t node_count)</p>
+<p style="margin-left:.2in"><b>Description</b>: If the select plugin needs to
+select nodes for a job step, then do so here.<br>
+<b>NOTE:</b> Only select/bluegene selects the job step resources. The logic
+within the slurmctld daemon directly selects resources for a job step for all
+other select plugins.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input)
+Pointer to the job which is attempting to allocate a job step.</br>
+<span class="commandline"> step_jobinfo</span>&nbsp; &nbsp;&nbsp;(input/output)
+On input, this is a pointer to an empty buffer. On output for a successful
+job step allocation, this structure is filled in with detailed information
+about the job step allocation.</br>
+<span class="commandline"> node_count</span>&nbsp; &nbsp;&nbsp;(input)
+Number of nodes required by the new job step.</p>
+<p style="margin-left:.2in"><b>Returns</b>: If successful, then return a
+bitmap of the nodes allocated to the job step, otherwise return NULL and the
+logic within the slurmctld daemon will select the nodes to be allocated to
+the job step.</p>
+
+<p class="commandline">int select_p_step_finish(struct step_record *step_ptr)</p>
+<p style="margin-left:.2in"><b>Description</b>: Note that a job step has completed execution</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> step_ptr</span>&nbsp; &nbsp;&nbsp;(input)
+Pointer to the step which has completed execution.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
+
+<h3>Advanced Reservation Functions</h3>
+
+<p class="commandline">bitstr_t * select_p_resv_test(bitstr_t *avail_bitmap,
+uint32_t node_cnt)</p>
+<p style="margin-left:.2in"><b>Description</b>: Identify the nodes which best
+satisfy a reservation request taking system topology into consideration if
+applicable.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> avail_bitmap</span>&nbsp; &nbsp;&nbsp;(input)
+a bitmap of the nodes which are available for use in creating the reservation.<br>
+<span class="commandline"> node_cnt</span>&nbsp; &nbsp;&nbsp;(input)
+number of nodes required to satisfy the reservation request.</p>
+<p style="margin-left:.2in"><b>Returns</b>: A bitmap of the nodes which should
+be used for the advanced reservation or NULL if the selected nodes can not
+be used for an advanced reservation.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+
 <h3>Get Information Functions</h3>
 
 <p class="commandline">int select_p_get_info_from_plugin(enum select_data_info info,
@@ -356,41 +670,66 @@ struct job_record *job_ptr, void *data);</p>
 about a job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies
-the type of data to be updated.<br><br>
+the type of data to be updated.<br>
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer to
-the job related to the query (if applicable; may be NULL).<br><br>
+the job related to the query (if applicable; may be NULL).<br>
 <span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) the requested data.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
-<p class="commandline">int select_p_pack_node_info (time_t last_query_time, Buf *buffer_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Pack node specific information into a buffer.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline">
-last_query_time</span>&nbsp;&nbsp;&nbsp;(input) time that the data was
-last saved.<br>
-<span class="commandline"> buffer_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) buffer into
-which the node data is appended.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful,
-SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLURM_ERROR</p>
-
-<p class="commandline">int select_p_get_select_nodeinfo(struct node_record *node_ptr,
-enum select_data_info info, void *data);</p>
-<p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information
-related to the specified node.</p>
+<p class="commandline">int select_p_pack_select_info(time_t last_query_time,
+uint16_t show_flags, Buf *buffer_ptr, uint16_t protocol_version);</p>
+<p style="margin-left:.2in"><b>Description</b>: Pack plugin-specific information
+about its general state into a buffer. Currently only used by select/bluegene
+to pack block state information.<br>
+<b>NOTE:</b> Functions to work with this data may be needed on computers
+without the plugin which generated the data, so those functions are in
+src/common modules. The unpack function is performed by
+slurm_unpack_block_info_members() in src/common/slurm_protocol_pack.c
+using BlueGene specific data structures. Use destroy_select_ba_request()
+in src/common/noe_select.c to free the data structure's memory.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the node for which information is requested.<br><br>
-<span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies
-the type of data requested.<br><br>
-<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) the requested data.</p>
+<span class="commandline"> last_query_time</span>&nbsp; &nbsp;&nbsp;(input)
+Time when the data was previously requested (used so only updated information
+needs to be sent).<br>
+<span class="commandline"> show_flags</span>&nbsp; &nbsp;&nbsp;(input) identifies
+the type of data requested.<br>
+<span class="commandline"> buffer_ptr</span>&nbsp; &nbsp;&nbsp;(input/output)
+Pointer to buffer filled in with select plugin state information.</br>
+<span class="commandline"> protocol_version</span>&nbsp; &nbsp;&nbsp;(input)
+Version number of the data packing mechanism (needed for backward compatibility).</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
+<p class="commandline">int *select_p_ba_get_dims(void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Return an array containing
+the number of elements in each dimension of the system size. For example, an IBM
+Bluegene/P system has a three-dimensional torus topology. If it has eight elements
+in the X dimension, and four in the Y and Z dimensions, the returned array will
+contain the values 8, 4, 4.</p>
+<p style="margin-left:.2in"><b>Returns</b>: An array containing the number of
+elements in each dimension of the system size.</p>
+
+<p class="commandline">void select_p_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check);</p>
+<p style="margin-left:.2in"><b>Description</b>: Construct an internal block allocation
+table containing information about the nodes on a computer. This allocated memory
+should be released  by calling select_p_ba_fini();</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> node_info_ptr</span>&nbsp; &nbsp;&nbsp;(input)
+Information about the nodes on a system.<br>
+<span class="commandline"> sanity_check</span>&nbsp; &nbsp;&nbsp;(input) if set
+then validate that the node name suffix values represent coordinated which are
+within the system's dimension size (see function select_p_ba_get_dims).</p>
+
+<p class="commandline">void select_p_ba_fini(void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Free storage allocated by
+select_p_ba_init().</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
+
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM node selection API. Future
+<p> This document describes version 100 of the SLURM node selection API. Future
 releases of SLURM may revise this API. A node selection plugin conveys its ability
 to implement a particular API version using the mechanism outlined for SLURM plugins.
 In addition, the credential is transmitted along with the version number of the
@@ -399,6 +738,6 @@ to maintain data format compatibility across different versions of the plugin.</
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 5 October 2009</p>
+<p style="text-align:center;">Last modified 3 August 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurm.shtml b/doc/html/slurm.shtml
index a9fc487df..1a0df209d 100644
--- a/doc/html/slurm.shtml
+++ b/doc/html/slurm.shtml
@@ -51,37 +51,41 @@ for the motivated end user to understand the source and add functionality.</li>
 the world including:
 <ul>
 <li><a href="http://www.nytimes.com/2010/10/28/technology/28compute.html?_r=1&partner=rss&emc=rss">
-Tianhe-1A</a> designed by The National University of Defence Technology (NUDT)
-in China with 14,336 Intel CPUs and 7,168 NVDIA Tesla M2050 GPUs.
-The world's fastest super computer with a peak performance of 2.507 Petaflops.</li>
+Tianhe-1A</a> designed by 
+<a href="http://english.nudt.edu.cn">The National University of Defence Technology (NUDT)</a>
+in China with 14,336 Intel CPUs and 7,168 NVDIA Tesla M2050 GPUs, with a peak performance of 2.507 Petaflops.</li>
+
 <li><a href="http://www.wcm.bull.com/internet/pr/rend.jsp?DocId=567851&lang=en">
-Tera 100</a> at CEA with 140,000 Intel Xeon 7500 processing cores, 300TB of 
+Tera 100</a> at <a href="http://www.cea.fr">CEA</a>
+with 140,000 Intel Xeon 7500 processing cores, 300TB of 
 central memory and a theoretical computing power of 1.25 Petaflops. Europe's
 most powerful supercomputer.</li>
+
 <li><a href="https://asc.llnl.gov/computing_resources/sequoia/">Dawn</a>,
-a BlueGene/P system at LLNL with 147,456 PowerPC 450 cores with a peak
+a BlueGene/P system at <a href=https://www.llnl.gov">LLNL</a>
+with 147,456 PowerPC 450 cores with a peak
 performance of 0.5 Petaflops.</li>
+
+<li><a href="http://www.cscs.ch/compute_resources">Rosa</a>,
+a CRAY XT5 at the <a href="http://www.cscs.ch">Swiss National Supercomputer Centre</a>
+named after Monte Rosa in the Swiss-Italian Alps, elevation 4,634m.
+3,688 AMD hexa-core Opteron @ 2.4 GHz, 28.8 TB DDR2 RAM, 290 TB Disk,
+9.6 GB/s interconnect bandwidth (Seastar).</li>
+
 <li><a href="http://c-r-labs.com/">EKA</a> at Computational Research Laboratories,
 India with 14,240 Xeon processors and Infiniband interconnect</li>
+
 <li><a href="http://www.bsc.es/plantillaA.php?cat_id=5">MareNostrum</a>
-a Linux cluster at Barcelona Supercomputer Center
+a Linux cluster at the <a href="http://www.bsc.es">Barcelona Supercomputer Center</a>
 with 10,240 PowerPC processors and a Myrinet switch</li>
+
 <li><a href="http://en.wikipedia.org/wiki/Anton_(computer)">Anton</a>
 a massively parallel supercomputer designed and built by
 <a href="http://www.deshawresearch.com/">D. E. Shaw Research</a>
 for molecular dynamics simulation using 512 custom-designed ASICs
 and a three-dimensional torus interconnect.</li>
 </ul>
-<p>SLURM is actively being developed, distributed and supported by
-<a href="https://www.llnl.gov">Lawrence Livermore National Laboratory</a>,
-<a href="http://www.hp.com">Hewlett-Packard</a>,
-<a href="http://www.schedmd.com">SchedMD</a>.
-<a href="http://www.bull.com">Bull</a>.
-It is also distributed and supported by
-<a href="http://www.adaptivecomputing.com">Adaptive Computing</a>,
-<a href="http://www.infiscale.com">Infiscale</a> and
-<a href="http://www.ibm.com">IBM</a>.</p>
 
-<p style="text-align:center;">Last modified 1 December 2010</p>
+<p style="text-align:center;">Last modified 5 May 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurm_banner.gif b/doc/html/slurm_banner.gif
index 3a6d6d517ff0340764b5a687ae5fde4fed4d598c..8564b4b837ff23a89823e61e64cc950cc4911215 100644
GIT binary patch
delta 17147
zcmW)mRaBf!vqlGIaE$>U9w4~8LxAA!!5xAJ2o5h{a1Rn(g1fuBTX1*R!F@RSPhZrk
zwYo35@2dB+p|uF9;Ru*gGLpP}#=VG|pnZfYOl&a9|5^|#Jnkcy7Wf(i0;4?Np4xOe
zt^Y(p#=|B5rs6l2%IkczGE`GAktqsACy}i!{F@^kN3TCzTQpsuoFkhqTUR_=!o%Q=
zYYYyKqWR_;;&Vxb67`lsSiN(&zHGU{aeFvjuAzLj#r<md6gN9yzVaS1HO9}OG^*y?
z{E_}>W7YOR45eI#JkV4<j;-e`xL76G-HyuoKw_orE=5SfZ5^O&+f;WvUuim$q0mx4
zj``7c2Uoz)5D|j*9)geUml?n%kddZu*V1&gKba$!DNH00x@vR#0XHM7<!*!H0scYL
z01;wybTD0FIMLqbh47!m#)9oeiw`24v{A3pNYsh=TN}-W9{|C#+W4uxo8N<tg=@6#
z^8}geL`1{PLbj$z3r-9|bS!*lL%LV18A5XFLK^z%vG-$G*dOVc5bmZV6Lr><q*)bm
z#2;J1bk#}m(cHy_rfN*ci*sQ-*cz0IeDGZ3I5EOq<6rXVO*`?@^h59BIb8YPBVT^k
zT!*ed)JzcCU;>V!2$=7Gi=z>TXz!{rS4;j>a8u2q$?($rRsiy%uisDi+lr_8<F}V=
zh4kUNs4(Lv-mv8#H+FQZ9Jb#Cv`995!{73Fz7H2>2XYr%=CJAv7Zzoq%hKk;DNKgH
zh35B%9F+vPdLES)TFBZ2XGftk<R&3L6_?j+eWlOqlzjva%L{_SO3Om9(GTnLEtV+?
z|8CJ+XM8b8Jt$ynPx+Rah^=GW6f$mQS69uw%-D2D%AZ{jO<l}VqhFs|+SrLs$XZ&i
zBg<T!a7)M9NHOKbQhuXJe^QWcJzUuY(h^{f*iiL8t;w~HVCv7{9{!Am!3`o#Ui(lP
z4FR9@;D7-%Vpt|Bfuq1t8(s0!dHrmP!1u!Wz2~aGWcZOT<I>{8F4OM`uzviN5g4n@
zP!ov!F{5eY!-=c_C2^hAa%gjzw{#x6nf>Mv`D4yLuFQ4OT1($;#*v1k9=>2BV#l*;
z3#8Z0*x}gotOd;r)y)|6V7bn#ca&XkTCy-dY?}cM3S65W;^jBnDR(b7>lRSoyH(T7
za`%G>4&l2^`v%|Z!zf+2=XjId9>|T6Mj0f6!w3Q)^fHilVe`#&B5Y<pt$M6XhDS9n
zjEX1-t!E|L3Ej>l+CSbb-HqK{FK^qmZ|y;q+7DI@JGi%Yba7sn)25?6p8TLJ9Z&1W
z=Rn6Tyk`A&rlI5YKUTlj@J#sBfY5zaq!&iO20{(K00Q3R&;6El5StqxvULn_d~A3p
z*4X)w5bd`uzKJ|wS@(vzGLR~l9Ns&?xBq7H=ezg1sE#o~T$814@YRnQqU||;K2!t9
zAEh_ZANE_PLLGcOoBiIik@sOJSB0tdZUG;vwELKxF2Z!Lvp*Ej_Or&SMCk8i6V4Cz
zb4UG+bO@j%?33(gss9^3qqa@FnIy?s{U!o{nBf<p)^kQx#klq6e4Lk-7MlANt%I3E
zR^u)$#Xc1kuV+R{g4QqN$sTUrYDV!fSw<rEGTdTxhfe8RzXCIRRFe8G$*PsCd_EAH
zpz$8TH6RDH6@w<iei7{U9_GiDiy)e<_lfcIw=aU|vEg`r)MQMe3XGC9Ve_^5gf(<u
z?8~Q87q9mS`($LD!s60~XQ;UsL&nwW)zVJ_3Ix`h#vEOp)289Aom_`>V_yBv>Glvy
z{?|oG-&?gzkivoJ@;BuG`{_T}yZeCfhv9MSwVFRuwS|H?e1Gj-<Fnz!v=VvR&{gRy
z0Yo-G1mYbM0J9f_V&uo#0tNBR8i7Az(Mn%@Q+_Y~3rr*W6Up1tKigmfg2#&>7EY+1
z#{46j_~lTAr}Aem=?5g~$b3~kKg9RSKXPdbt+h`>74pue5s5ZHD9q&QbjSc#is*+U
zottlSR{R=aio3Lmr#c$VNAJpng=|!P(8n6sxk@d{C=?TPzI12b6lzNxX~u;u*g6?y
zIt%^ODH+tZ4w^0dQg@8Rouyv$7VnQv9iyonh7L2bYmvzp__2WsNIgntqgq12mb1!J
zJug->CFSN=UrJZA;Vpixb{#FC%F?M(Ta8y6O;V=+-|_r_L0pl+DoEw9S&weYwXlKf
z*qqKr15i~fiBYIfVp-NM+q$l`m@U-_d|aNo<Ss;AE!X@ne7g9^E%ThD%yqg&qc7jJ
z<<E~}k7Zf?3U;1ioHYi;{iU%<+4_b%!BdkK&xzix4{fWZ#m+^Gnwk8-hmzHVa-{Ma
zC;~YMgy9Yn@uQJPYP?3kQjU|;ULtut$bxi~<=gSeZEde5HubiZNd9bI=(<d(RhDGb
z2<JDa)8Z>t53K%O6S2T-lRT_P&Gfs7ahF-wqt>YX5*}aEYR;|YF=iihWn49C&foJe
zVr)^I)MdNhQ=L5O1m^&<R&CAM_W8!0=bYmk(kxhr9!JdEuQEQQ9|#mIP9_F%=G=_#
z^UODm8QXKDqu5z~K3|+v!@5Zs5VV#Mc^tEdy2`vDHkXQ?osN#W$raJJVTfKD$rHXQ
z*V4DrJWiPnDF0FILvpCeLO9uh%~=FQF0843Fc;uGM5JQH@rD7E)bJ!3@EmKj`)CdV
zHi4QWVI_E`a$_>w;HH!)^3X$92W&j+*M8TA;CuW8c|~q)mkB@rc}KLaHwV)AP!0>E
zd|pRFY3%ur8Rmo2x`8Wo-^G-%>jQ2bd9#1l3+GvL@Fx>oN5QY@MuJ~to22c&y1MtV
zj2|ZD2<|cs@B+OWQRcshRyMHD?*_=qor83V*Wa?-59zPL{_M9Liu3S~@Qi=|qxRn+
zgJt8S5Vm>5JMVq3eD~>ZzBjRVddHu5nun5gT#DcM?BOYS&ZuQv<XjO|vC+7XbQs*k
z1*e^VvwRpRIQd?#_Hrnx<=G#H{XL&-^&~X9WvXc0t<?c|+0vWfAE3kbNEgUBK$!Pj
z{U!`WlIbMF3X$TjtU^Yy>dAXbLgB9d8)-WVMR>W#ZpkzP|Hlg7!{@mkF<!e+u@O9e
zu;}`mP=SEQYPdcT3~x`Ec;1&<Kby*I?<<0@KcBb(PxG(st4-%Gm%+f(f)wyrK=yK5
z(eW_L@(Nt8E4@C7uRm_j`@U?G0q1cdFE7EsegSY>;JY;stOolX?0etS_`>%6UK4@G
zd0(u1KM1PdW`Q4mqxbl}-_pMCdg9M{6#rv4U&0U{Fu_j}?VndDz9`cE)Y|^E3;v|R
zc5f>|HZfSZF<6V_2#CLM_y|B(tT-jXI6d1Wz%z`Djr=E4qu-sD9|LZnDsP~=cAzFf
zAfaQR${->`OrQ>JAdDyQDJW34G0?y&&~hQrR66jRbdc&`paX4?p>~iqYOq$JgMCQg
z*N|ZQ#^5oYK-a<`22il&LXe5H58SpeSe+omN;|~zz}smdFg7W~B`MfGB*Z8wG$<()
z@FxiVtsP|07#z<Vmaq`&G8p197!0)vEJO_o91P8C45{J`*B=ZVrU|u63No+?ucQqv
zBM2=a2%ZcMDk6v|)edSH40j6&uR92a(uV1{hu7SPe;W+UE(}Y*4{jMWdD{uXWyPto
z^m>@Y!NkQ8I{<-MaaM70&_N*S!hmYvK5*(jxZNru&OJJVHjth*_<123ejg3`F9wk>
z2J$rq*&{}?F&ecg26Hh6+anO&Bj(LR%=@pu@H~F0@%|z>jLA#1!@<SDG7`ka#X)67
zcqbYF27wq65O86jvjq}vXSh6T<*%1QL~f5b-sCv`rZ~aHIN^sl(f{Jb_~Ipiukn%|
z@zTlhvQ6>wi}8vN@yh=tsPZMKe@)Q*n!o`<OS|}m)rrMjgM1Q$eNu@WI|%}1fp%%p
zo?)>N+&Erxh?Yz|ol%@~XdGz_;*aD+`V9o0&_w5=M0(}KpE8NOhlv3XiT>Y`_(GGQ
zLrH#gNzkUmz{R8(x+J%6ao+y{$uWybE*{Bg$w|OaqQ|!+x8x+Bhr|?_#GKH?q{YO*
z(3F_3DbZ+2{Cuf(O-VvBsj2@ZRW2s~NKS5{OZjz}lEasjT$Iwfm^6@_63mx4GL%x$
zlvFE|6!$fy=f9+k(6rz1!^Es_i9-+R-7@Lr|7A2DriGxT|NbvyL?(UO14x-WOkNpE
z*$+*>^hn81PNA#JXz@sCLQ7viOc6cIOnXRA_zG4a$J^z_0g2-5uwrlHVjuWnpR<CP
zXF>c2Alv^Ct{mkZzh%x3C8dO>6o1S3{56HtG27iE+buMkF)Z8nFq^J9n;|8e(>i;l
zDEmPshmSsoA3cZVF^7*Z6W}V&5lG2o^2}i*%;nI@;W*0XCCpT`&Q|oy5eUnnr%#om
z&l9B2t^O}Z(mF?EDOa*M_cMRKmQKF$Qnt)djtYIA)o{LLaUP3jp3!5TjBKv+W8N3~
zT!XNDS)Bsq;sQVPT)62`K@feR`BA=2N`XFqt`vH{5`TdkVSeCIJ`gIK4_(TOPRZwP
z&eK`?HznuCKNffw7g~5Ga>Rg*zvI~WVZEKq+QcovA;jJm#XijPw28stNB|)l5%S?e
zG{XvX35!*Z3V(&=(ZLW|HV_0WvjscB4`|@ArPARegpnfzRv6-hb(sr)*<5qZd`jlX
zBjQ$A_Iz>n{8H&Ekb=nCSvpx<I;2y!JzTz2TsoVAC`ewR<eB*3nSYv63JR~_(<!5o
zt=!eAq)(~1A1+-%uR!oByK62RvCch2FMCZXJ@YIlodj=t=Dna-%srOQKUN@Nlo1n^
zyBsAh9aXt~gI8dWl#e!7!-}hv(5u`-t5=%K2vW;<<jQG)lnP;i9G1!QTfz!v+44!*
zGQmmk$C1+aHf1A^<)X*siY3{n!)2^S1(+>4M@Kb~kuoHKDzHH9GJW+<O6E~m)eK?H
zc5^jxc-28z{SbW>BY)Wye=X)yo%3?FcX+vNYQwwbN|L9tE&hfh&%`O~2A<>6_e2$5
z0_8JDb>I=8R<^lbIW>D2y=<$v$<?|-o~VXzsltk(`Ug=R*CQf_W0sAw#KlHd)noG(
zZcEi8_C^f$-A6n=R`5kio#rrjak$K#u32Qc`hKaxgCR#FwTii<l-a0a>=<!kq;;&N
zb@aG;8h+e5Pt-Q|gg9N&Hq+8N-_o{r+`8V<HbVro?=H8kl(f&<w68q1pX#<>>b6bF
zwV#f(-x77)33Qx?x1ZT`91wLPM6|6xwQi=iO{KO!3$$-wbX<gYAU$_t%Xcn>cV6js
ztQ~h?2zI>^bsaBv?jLtOdv%eobWuKctfsaRmUb|DxBq?WzFzLe*6RQXc5!01GshGg
ztAKzLKkORxmTCs<ig4`P4H8HUj>#Xed{!HeZ94;F&qH|6q(Gm#UZ19SpLSZGPHUgu
zN}s`VpAm7tiD18(UcZHRzg1elb!)%vO27Scza#O0({n$I5d@_fd^?#XLX2ItJWwrL
z0>6QgK*$lSVGwh2B<ImS(aOQkm4i_$gFqDVQ0xg}?C4-z>mV~|NHk_B^<*$UZ7BX^
zNE8>6A~>8LF_f=2R7^ZP*)o(VKNPbv_{V#==6NV-bU00L<d=M3;pp)1(vcSL;p~;6
zPGU$I@#rtip)$si`q9B=#*r4x(Qd(!ypv%yy|IDTVKv6#l+uxQ+u`<;v9ySxYM^vz
zSa57MZ8%bXc<6aBnsF@Fc629gY=CiWA!7J4ZKMrz{MdUW>v=4dapHHxaL@C2vEc9w
z<|K@GIInfGGHp1=cJz95vbS}jfN^r8bQp1UqI`64cxCiIpP`Jj$%M4Ai?rcJLHMxD
zB*ge{kJsoxHA4wb81}W}Z}InW3IHpZTxba9{dZk&_^@?4_8FY;43;<@l!}}YjhP7_
zossvMR4$v56PhWuozbwHRTr97S)I|*pHXa^{q;Pf%rs}jG;6#%qir{5sXwQyFsE-f
zXRkk__cG@sG_T<^siZLTJ$+WZY~H?Y-c4cN$!_Lb<cy)voL$+xoWg?M3oz?DHs?q(
z>&G-_b~@vsKL?D>{}7r>U|LKSnoqQwFG@we+5lZ{^oTO{)*dz29Yry8fmna{${9gi
zg=Qn#=2BJ{LKPNTSC`vgmODvSx`kFEULalMXoE~>)-lMvt1II#E0ZLvQ*fcx8U58c
zpVfu*)up!8mDSa?m(>jtU~S88RVJ%gU;~61wp1ZkB7BmyU5R}^i34H<+4$iw%M2<_
zLO5C1dhH-0Mi3C`27>(z7<=PYVF6?Sf%|O0?KUvlH(s$eC6ytV_8TaqoA5U9!Xy^)
z+9m=nXrU5JaJGR6-@sztKqcKokJ@-|zxf8fg<ybe_8AWgdmF%G-fY$1z&F^U#NLpr
z1cOMCu;E~_s15M=hKlHhl<$_T!RDv&9i^ykE`tquIGBlam!^DMS#d!|c<0q;SD}1I
z1HOSCwaaBcV}7<nfW0kgzpb{mEl9e{(!MRjyk}vsB?jO7p0NuT%Gk1J-sflDg9vZA
zw(rr~9|Q>R15#`Ix@UWiid$6Wd%xE9V+}U>gm-^oZ$hK?sIfOVvl6T~KvgI>Z;rBr
zSJuu+a40=MpiYntE)I_sjtK&qc?`tYcQes`pILENU2(&l6zPjHM1}RF3;U#B_@p=E
zWYFNGm-J*j>SS{Kq+9rO68of&`J~eT`4?<+W$knYU`8{qJeguX5r|n4AU|91Jvkyp
zQtCWa5<OiQKT(>*TCs=Bjh}RNo?46UTx6V@Re`OQ2bCO8V8SO0ou@G|XVdMlIr|eO
zT$uGFWX$(`7=F?}4kM_5{XIJwRlFExK7%O1h&xVZ;qVJ;!%O^i*!<eb3+XA<>m_66
z<sFc5#-enE@p{79aYp5E!7p;baehJ5amJ){`Z4<K%lgHX@CCQwDM`h(=sJuUxKNn5
zmh8Ab62AN-a;;_v`-*cZ_Ijnta;f46G513TIpAQi#^#=_oke0F<D%jEVc|M@f*h&A
zMxZPGK?OgEgwd5W&JA<s72Eptf+8dc0PaIF@54IoBi8Su*6+<7VQaz<2$NU~Ahd81
zhy*T@b>}8R7P1QLK5YGrOc)lW^hnlz|AiclYz-?5xJMyGTkJf&UOW8-gG8*K#^OF^
z1NX}Yk8Nbn-6GGuhR^-L^Wf`!O6K$E`tvya^?8!)Wvb$Ny5d=`bIN)HijoB!T^M1%
zd3@Q+I{H4bdEs}r0t2ysAU64lY~Fb{%kn6W`+6zzdTsc63%uTEzTO!kbxfQt;-Z26
zNGed-B48pP1VxVG24N8u^A+}QN`ztNh;rWNX7|vbwEX#_EuktAq(ng?kvwJ8ApJqj
zdPV!YS$`6j-CA$5+P-3jkPjSy5UL+2XMc$#{X$<G7At8rb3zLe72`(5-ReuVv|pPo
zUc^Nn48rNu2BE!OvB{!Vi2I}-yOsOaO<J$n`LMap%dLDh$nE&?2T_u-QJeqYTy^nJ
zVS4let)#YJd06^Ep(yys#99gs5$>;Ba1wtf6vGA$3r$}qon33_TNX7yhHi*sqg4XC
zLjF0~ac{8&ZGSAB&)IR!KT^O<{<-FItNZl~OSrMZX=^kb?4X!>an=_2CvI(x`0?_f
zxr}IHT<FE~q+{20er@f^`+hT83*~&-$7!-PG4`h!3K!wzfS>!@umXCdl5T3Eab64%
zqAy6fld*<CTRf76P-P21?<fSHwZ2e=kA!WNH+D<l9O_&!zmvNj3B%LPHTi;L$9d6*
zPlsU?Ob1Fe!L%jE+z5Sd7jc|BS6xFP)vO2&_(EvNIGAN*<gM}+wZx2xNQTM-GtX8w
z{!M+N9wb7R;=k-F-VZi2DpGhHS8!P0sDbTY#rHNl?-Urft!tEk=95-u#Wh4OP6d1|
z+iFEIv^IvmlYCo6Rf3`rwMin=To)yIjy_J6&j@YmQ`|fs5*paWNjUrE3~#1cBy_QE
zW+Zsb->b-d*iHDmM~<Y92AWnDNAv`B?thkP+$!#}&+x(f!v%s3h}GPCpuc73*Q;0-
zd1J?}_kG|~gP!vQ@a0Z#n1Zi6jIP%w5uLp;o~rMA5%1W5%h2xSv~y7iL`-H_gDFp%
z(>7NI_^Uic&iPy?mM0y*;KmWZ_QTCEl4q~cG`gqne%nVJugNS<Ns`YzL5n%r?6-c9
zn_kja?mJZW6nC*mE&}om5GpqOa*7_6@ELsxW&3*;qYYXNAj)J%m+!GonV-LeUc78K
zZ(Z6@-+WwDSN?ch*0=X)T@gL#&u%+xe;GnoePH!Tx*Z#T9h%XYVq4mkL6}$kFz1OL
z(fo_*xY&aeHI-!CN*trAPs3k|^H$iTuIZ}XGmN^Mv+%U)kXul1l8r~up<QJD$)RQf
zJKeTY31|5l=n%qx`QBzT|9D;QjI(O9qHme*w787je*IS;NB4TwgD1mjRSUn}V`89u
z)qP8Y%tvpkh^*axG0s8QV^3xx{l`K>w9w6AopyHwjo;U81kl-6(@$1JE|b1FnSB<Z
zyt2NS>A}gM(>ClM2MmViaN+_aWYV8M|KU|I32Ijnz!znW6Ojexvigi+uk4SL=D#7B
zjf~^U8lWdDj`Ck);IyDI`uhlS<W*N9Xkn#4y*ya9KC7!J4~y;VU^j-ypg5ubZlIu^
z0u|Mu_<;EzW0Yhgut!J}k|xaGBgY7hM!JuQoh?kYbrYX$u#0xhF_dT~8`GSipTS-w
zOv8H{Krwaa;%om4)sZj-Z(2!C4ONAEyX1UWl<p^RoebmN*~Sf~mFBX*ji4{3ATb`4
zWX@2D4oS-)UqzLYCRP3QlhKrx@;;Cw>$B*SC<t_`67%u}<6~KbCQts22*!V+qDqs$
zNl`+jm7HSYeo3U_Rl~{U$?<BEdLb0x$fi+afVw7m&^Dj#&*A7det+pzWiUrD-Kdeo
z4#wn+7)BMjf`u?{$ajtqj@i&rVp6J<zFc|kNk1g>IgaFRZ^V87KLKv#)I^r<L~MaW
zqAuA~DbsrSq6ML&Kdq<J&Z#UV>JBGO(bZEh%nK#vixh(?YIEKx97qW1j6<ESlRhHR
z0bESYlTJF<IW#`hT$I-12?p`mtaSx4pNmIhx87%Cy&OuLH&3T7%@nffAK()rgc@QN
zih;Dn84S_ntjefJk+bpwi7Szfvbr$M+-c-cj#U2qeve^EVu{z^q)DDpS7vW+qqn-G
zS;zdY+^VcZXFdf!XH22^HYSO|&<!`#2q^qgVbzC-9+^DXviAYwRGvnk$V(AQdRr>M
z8$!jXE8qX9EN{8MXj8rTwbp4U%iH`|S4DTet@}q+;g2%&3(tkIt-6}x-4fF`%Q}ns
zziS&KnM~iv=`IgzR(G^9eGgt<T0nHGOS?IBGPcoc)8uK$O<-_^3h1?*xi+Pl=L3#c
z#Y=074b8QA#}+m=t5?<aZ6zPhO$Ltj_vY@(O#L8L$S`%}(VovwBmF>8O614BZICRb
zUmb2|G|34F%MvC=v}Uchfb3c^X?~7h2}8nm)9xXHI|fe_8)G{V^inc^50UcT`Vg1Y
zOMMH&Ux4+(=hC`*w2j2^+b5B0fi+$X2~Ynp70j)-+l^T?cvYbj=!>96_cU9-QmV8F
z?f*o&YK@XDV>>ztZK)Q^LIeiGio8wF!;(f@e7;8~=<SfU@b<F7S!1`;EI!RQ4H}SM
zrs+nQd@OJu^R>T<c@Q+m8%P=q3i_TF%4or!>M{OJvnqA$*-}J~e<HS>4M=@+y7xIc
zc_JQ<Gj;Z4SE1u!{Feb|8baHFSZc~&8&Zxek&{5x?q~!Hm0iK0+jWp>&%7jMWyOa8
zEL7=EGPp7bq1e0(f!7YxR+%H>tGuBURHw(Fle>Pg^3>MLc-a=_THhpO?-abevNGq|
z*s*Ho0EecoOy++FRU%+I10^l1w-#X8DUiKg%hTFn5WDQ!>X`$|Gx^b)EB<71q;K>x
zMhq|7uga5i2e+Sc`Kx!$i$+1L$$0B`t~?#$;|^Z$TGv($c)G-~9b-ClceW&XMx~-)
z@zcb6)KVVJePvESj`s$Op~twCfqg90dmn_t-}0(|m4n~5FC^jt{C%_L=!*Xj%eFU<
z>cX9Kj-Pj>COq1>GJb@qwH;9@c@F)scgd1V-!~xWp`5CO`e>tqz>{1Mh`bTPdh`N%
z7)lip$Pa}WR5B+E5^lB*;tkP1RZ-$w`a5>ctiE=cIN-g<)!{Mjwsxhj4Bp4}kL2`*
zuU$TJzwAznQcVLDaPnkJ@i$zVLGCEzZzDFIzcr&J6C3*8AtN9ktBH7T;jiBhNcmnS
zba?OatUpWy`(EdXyiBAbV0Cs5lwrhx%&^9+P!GKr&`Psxd$$oV76yb#{qB7Vci)(1
z!S^>O10URc;dceV%VDPH3;sH0U1Gr0atH5%;0&ec$T83{k}5*o(h43GL>?7n77ZQo
zf^aof_dX!qkF<rjXMf2OjaP;;Cw0DvcMi*SzGM76E+_hKv=uJ?8A-3}1qw#g`;4;E
z`k3>1tfp&hx$C{4@N<pG+ul|Jv#vwRZlY^JUS2V>6Ms@pAtF67h>IAKx7gbVcsG88
z*hjPWM_r&x&=F$7h5*nC0=$0UzhuR+#r9d~ZMslz@>b*+GK{X6zIV_=s2FpnI4>@k
z*$*nXBQC7oEj-#ItS%<<UP2HeAwJV1MkVpd8!UF+BO=%<ECKar3t4vV6@3;Lz7FBE
zl3<&Z_#7a?Oq(r*+b22N!)(OP?B4r1035F!0C1uPeY_|zGJ>!z^hgMPS!JN$!tG_Y
zO0qQy&=L$$V3g1gfJ&K3C`d?1V@jCY_KBKFh>rGI5PuOam9&(XwBeMLaOvY=mHZqb
zDLUF~8X@^zuh-nRR~XV~hWW)wuvgF<>Uz?n8Q&|oBK{3B@Qt|dhkU;?93mx{JK$?3
z34Du?(kJc>P?xfHk@_Yf=}0Usy&|QqE~aPpC5*GzB3H^?P%=<nLS{x>xOU*jNxz8T
zfS!xAgG;Zgt)w|bCc;}v40BMew<4b(gtUO-yAkx5OwV~eWKTedN+1Yw^z-3ug^2pq
zN`s{cz`<scT7vyC*Zl#lgU~ctW=CiVFe585I$U-=Ty`>CAtAxK;m=vwYYPf0!jwZ3
z4B^C;E1Th|IzcoVWGugyOmO_fR`o@>b+~Q^@zY_D(T0Dxc(0TwT9=s|^kAf+SGMYU
zq{3UiKVH5rZFD$7z9m4uzg2!<W_YZ3wEwz)08?&abaXgZzLIkcK29~(CowhyaE{Ho
zD9m|}2x<o$r{ei8plCcMXYP#s#lQe<1cMiX!8I``3qQ+s0{&u-EkG0mA<%sn=zje8
zVea@*>G;9F6nj+hXh!k)TJi8i@c;tlY^=5iA+y00eVn0Zm=i}5p|#KBCz!}Tn-p0a
zJL3nDbzR2q<Hy%Af|{seIo*MABQprI5hQ+L{M=UQ!b}++F^;6Kd=Q}whbW=fO=2i0
zpG8cd^-W^iO=9dS<Mb(G_l@Iz`1|h5-}hsahny<!eI{|tRnA+XER1;9sWBeB;%nDK
zAl43_7|=?Dc#RdIp;3T<``-^YlW1*F0e3~}GDYe@)k8hFDjgD-rVdc0*H@*no1!n9
zVo3P=Z-*xd%7-cx%{p-~Gqu0-)+lM}&~}<BVLZ7rpKo%C-&~a$Er|1ns+>C7{f;VQ
zq$+j#gdFE2YiNJ~>nId8h~B)K=Ve+nQ0=HxjYmQ4I89xsZi?|nRnBLMB~O+0MV;q#
zN_JLVG(nxNOkMVa2B4BY!!)L<tgj&(IK?ogE_FIXdpdP*JFTyv!91&`{-Q4RVeb57
zRvBwn6>E;BZd%%0(}-(M{?nYA(42z))E9S1YVVjYEIDIFg;PEap;H085!_W1Q+ppM
z1%k-8X&CEkD%U~X+MsR;^B#Hgo^|tH>GSS=^PccoE%()VfX_W(+8xj1R7+s8&3{$P
z!;V0va^6>AX0CDGzbi-@SNlp{TVQgY^<F5b4oYJJL97F_-Xp~~F2wH6$Lg!bbA62%
zn~&R_cmJ~B6S<gR{xzj;G4)0(t!*)R_G{wlV!-TtlEPQd^sgCxUo*ItGO%<~%D|E^
zq;zAy&46MQV3Dj6RfOaVs`q5DuQUj80R>sRz6>$QKsz8oK_@$5o~;hbR|u^$U*>mT
zW((A9N(VRiEH?!%w?^u=&Mr5t>UIR`^7)k-!E~ERrrCV54SDtINz_(Lz@4+p%?Wz-
zZQy1>R4JpC0ewAz`!)~^sXua=k2g}<5d!Cnfp!2AXjhQcp0VXRpA~_H1wLH8cz23!
z5;f{@y?LgUfxhKFrj<Ud)pew`7JdDu7yV8yz5Y7z*r&Cv-PM0<=aa#vhVHug>K519
zzPUl;5B<3>27CGjJ*TT{H*32-^F8_oYYGN?8oH}U1`V@%gII<zjpgk=g9{RaZ6y7p
zNMQMTRd*xOpuNoigsp#lV*t<7Z3tZNNHDlvU29%7xS2H^^3k)gD@LXrMxyN@_W2Du
zo;#2G1khrzr{UI!`sI5dN@YP2^Osxe)(3wW)#pL~%iko-2lHha6W4DN8*KhZszy9#
zOj5temt_2L&zOvQ>sQ@d(N1HETevY%KcFT`um!C%rpz~{;9d>J0vT08X~vDE4tV(s
z!PC3Ot1ZTiz7$uj;KfyAQqmbvOaMju1n2;XIv@Or8<o`6ghqIasNRI7eT&}Lgz<Kh
zdT*QUbW;LuDvY;7%&kv;yG@3?L&&{DVZSY9Zz>GmCQ{rHE;kmlFcY%hmS5WhsIkoy
zU3VmJw?9yCX_A_M4BAop4VH=8A)PbR9yjB@HKoKhSJpIJOyB)tu&H2RE=g^sPHiej
zx}`L3YzjAHoZFGv+acGq_(;7a><BRg1(C4&fq7ZIVPOa@I;H6-$OOhkOj=y9K>ZjK
z^V=Pjxm|1QJ*oVC&-#5Yc>g}&J-6=-M?xJ6GFY(G=N;AeHRD+O7w{%}YepI=TYdRt
z#b=ddi4YWAZ^^%~AcZX)`e)x)I0Slo5RH5ogLn9g`Y@LJFi!F?Uh^=);_$cYVIurM
zSUJE33mHvR<e^K&zAf|nnL;jIcm~}jq*I2AS4+b0Q0fett9g`X@h^XI{g=%g71~?#
z9YDjU0}G=L3>~fcu#k&ftpzvUa9LUFqY`8_9Tn^y<?lf`k&%lspsSQ=x%o#JGuBO-
zL>2wVt#ik1d&lj!#~sKgop>i*)F<8CCq0rUz2i0#lYs@RKap7dd=`*>Lr}2yG9BR?
zR-wUf=a75U2$6XG;M>5-KVWB@w+Ee;Je|=zowYcfb3L86I2EqcgMowQe6wvEk7noE
zghfwf?gQ81fkwDzn~JA;8)-|zcKV<ohNr-J;mCReJ3%9xg*hQUUb{s_G}|oenKkW&
z+p}}za~R(F1@-wQ_xY9N`L*Wxjm7z`>-k-f!;EHt0qWTbBOnV{fi$z}Er(95euB1U
zkbN7uFg}CIj{<c=9A?HLf`b7_`(We&DCz(dCD9Sh0frU~L)(YSFTkcPU`Q1(j0)Hr
zoC~yl$IwqlSm%Me+yv-27t=8TMllfPqT={MN8tm#N>JRal?~>;BmO=(e)0$_8ir)(
zNEi;`YlH)i_?C{V&KL5mWlKRY`33E_EKZmMjyQlLMWz!<0}L|}HqCv>fPaAlz}Rq{
zF)W>MJ1&s-9sj+IA=r_b#tFymihTmcMdr++<jkGuO!f*xt+?XDzh<|*;$gX9Zg8Sz
zabYcR6ftySc)g^!gUQ@EvkzS20L~~pmrMiKfL!qPn+_M54kv2EE8KnOw<zCPq+Fy1
zE`>T^8UvR!EY8dh*UE{o{{D7#Prt5I^y14g_^rbhqaS!d*k+U7un~z9xWaO{5t+DF
z0$@VqH`WceHUqbB3S9M*Oqm5BG7S=;rjh6cxArK6M&uA1d?(d)Xb3g2KFIA0$;C7P
zA4GmzacMhn=RJStljvey;Oa|r0i3(}b-4Z<07Gfq{a)PyCT@j{?tK7vCna}VDR-#h
zeNg5-YP5Ug#9buKkIxH%`TqWeynYBqAji4GbUfE_Kfjgt;#(4~M#{JT^R5hl`)6E;
zTxCGofJeHUdoX?gbiyMGKR}4UGaXp>_|pIpr1eZ^dCYM0%w6|LTZg7MKyoG?@+Odk
z^|5SWNWsybIXIAV%Sb4}V}1jY^W9^~x<}=NXD0q-c7<oD;iFI%#BCtRK-AN^AwafM
zEuZC46f`Pr_0+t+P~7m?JMoya|4>limFe(U68+qk=vANiT$JgVjpNlD4Lrm1rJi%O
zJc|;&bEKXp*1hW8yfWuK1UsMmCZ7NBc+QHr_x*#W4c<-r54pfgR>f2Oz~lIxM^=G%
z+Qj2Tg?ByK<D#2S;l#^SuunhWTSNn2pn>O}drU{aWE#TrD&W1DPidAu*%L2+Ej>$V
zUN-jO3k{DM`yS&RaM%R!l5Ocz^6L9|_fk;d+duC;Bjs7V|6Gg`BGKl1x{(O#6!w0e
zbHSKA{n#74SZVG|a6!=@j6i+@25oeLL1>audf?GSW;uE!v}!6VoRK6Z!+Mvm&N%XE
z%*HzC5_GuAS@HBpOxD-5Q@J9MZ<&^A4<_<E>5&VqZ;EsEWx#+&EZpUWK~u)0k7DdY
z1ihwF!mTZn=Z%(Xb^=`DF=Y&YI!I#^Psj{Uw<2Ew`ZZzZ7B-g`@?qf1$$L7}ETv`c
zNIjX#?Y?9p2jNE-C(A;)&%tZ*p2vri2^3`JOv$mwQ^7O^w{*+jr^VaKPaPd`yV?V>
zID~VhpBp7nQJ#VScq{$7{Gsr-N!LFHuTFDQjb2aIR_m=Y<vPSa7?K{iE-VK|tv6#N
zX|??F)rU8&aocul)X0Rr$6R1(%WW8<2HxMJxJjy153zLd-j6URrqie^i?q$AsH8%2
zv_Nh@QD#`MkSVj=kM(Z{)I<*P{ipzC?LQ`|e)}Z0vULD{NCbSBO*2Oc;yYj($frs`
z>Gk=IwlR6u$^AQ87GG4VC>G{Vl%Es``7&bE*<IH??>-IfP%F@L@BQM~voeem;}2bP
z$3P!4ja=IdCXEu*k+DqUA)U>OGrbMTh~4V?Z#iw<{9!XiX-jG?4uk%n@R$EM>emQ$
z<`gRBEafI3)V`yz>mR|J+>QocVFs%N_kRqw*0><p07)amoI4S)abca<o!3qzQNOK2
zb5qc|ld^sS?`=w}Af)aa|B7yD^$6ZzX$`XCacT9Oi2zerf-SppNSl#KY3dNO?ulp{
zf!Ct-oY=B$&7!LuTGQWt-LjxA(U8)n3j=PAV4(ed>?yo{-XNU3`>#p(N&Qkr3#?`D
ztvqW7h89Cr%Lno2%C5H@%N4EI;>(T$D_a;&gSVQatnKJDCEt5!TZpQArY=X=`#HQy
zPe*9vY@8-0)}GJ$-WTY>y58L}Rt%9PdS8z3N0nZVXiUgoP24&iS2DGO-pL5;L_ih!
zLBIx7L8Dj2>~AsQ*V-V2|CdWKcq=DBPn=mn<@JJ3oc&5}5vA16)~bzFfn@N3Epf`I
z`^iu##rvHIU1q-OZ8#hAP7()p6Lpf~8sBCv4ZLwTHx>JFmsYmCalg!TE1asGc$|N?
zLdw2*v+WrEFjnk_{Rk^VArm}L?8yMUPI`Hw;1639ndRQQRZ7wPuqew3&&ziFiPrt-
z^>W^m1|>!B``JW?=c6JU9G}CMyK>K)Lphe$r#lu0;d>ZSrqCw3F^29A1wQ!a1aJ4B
zPRCp@f{iHAGCBLNuv!KG{ri5N4HX=aJPiUI<OfEc^h1!?qH5YmzjkzM9|1zK-vv%e
zd_@0=Mlh@#6zsf-VZ6{y7=|76M<5N$hPR#IXfl|K%H+WIp!*ZDN)T`BM_faKp8uv|
zLWFuhV&~oWT>Nzm(!tcnOTQPRzGMx~en)}T#QQ})#^il2tqJC~dkcf-_fX5Uk00z%
zCCNPBgvgAV;<PmOac=zzvrIDq@S#b)^d|7Xff}uUC?{y8sn=AZv^jG=Y%fTCvi=)u
zYHLj1mn1<U%N88U_=yUkX;7)ZD#0n<g!BPbQZ@g3e9a04o&&8EIlog#aPAgkGOsil
zd@|gMIFsr}k(>(QbVz3@1!J_fG|iGz*bFDdd#Q(B^1Z28IZSG{@K9L+fN(mnegTQa
zdovu@$BqO;&uDS{cYSzinLifafWYGC*k8Dv2ycuAc*z}O^<%_BWOJ!VMHb~4mfl2U
zdDBWz(*F&OjmySsD}2XN{5MjEGl%$vR$82XD%#T}ms*HUR;_p{p}IDY*@sTvv{_Yk
z1`+?b6`7UzeW-f`<<E1>U_cN=j(|G)4o*>Qt*Zem>ik}5O7uhp0^^ECIfhV@c5HlM
zGLuE;uKyKjzhX@!^G376Nu$)lZd+@DP_wq0t1O2gP-~S|t10ZJ%t7Bqd;d|RA}pc8
zFoA)3N@u>SIHBADtJL5sOtWR}LzRmLgYFpNLKi%t#I5YufF*ps1t7#L%Z47CEru;r
zy}hlp*FX8nm%7x{e^c$xRc0|u-#mytR~4McXt|I6wcYhYMdM4EC7YZUchp>Y&Z&*%
z>5}G1{g0~Vz>=@8N6Sl+4dvg<m@OJ)_4<!yBU{r;jh#o9=3_OQyv7)eG>CNP?QiPF
zXUh!9FjmXy@tc}<8Gx^4sk+z5JT>qf+&<>Hq9BjboX+EeL#4##F67>v!qq`Z?vrHo
z%qS@7(n0~sbXBLjUJib>->Jps@YjLzx~hJk5?a+XllLXs?_cfMOtqd(KGZb!G5r1>
zVJB!x*x^3F?aCJ8^Sn*|)Hom%bOETOnGv7653vWaMgNR2qXWbj`^56Aqijld85xrM
zc}Sgpwdt8tye<s6A~`1y5%00$JPer#UnSX`>~YsLj!R~IkAtpQ(5E(yYg$}o{$;da
zl=B!-=yy(dlfK7k$T#Atc$u8wy~D%fIT7G{{RhlsDW<j5SGN&ZfSo1^p>hAY(})5h
zH7Z73s^PuR-T-)~ej0-YdkQw;Ap<gwY?hSkN%MyA3mSi>6XSO+3;n{})p2Df<{T}H
z<G$RrIZUVD)LNG2+PUj%%1-T^Tb9@0+>JeDwwf?Rf~(3OMei7+p#-8CWpk+9<glZ>
z*hrF_LKK0epHXi8;)bn`eGLK^Xg_gU-y!2Se#9<^1pxn?RYb%4u6NiM!9;pn=|+t`
zAM9ZfQhM7+6ZZo+YmQ+Kty_dNd;>}b--8@lH<{-fKN6E({sLO}K8ZY3%Go<dlz8t@
zWHxmhlDZ_kd)}u4+$$M`9YbkWw<&0vtAsMXhreq*loELyV8V9EgFf#mCN>R)!d-H3
z^iMePJ%PVfYgefq-p5kWaF3}}?Asy-y<^^Jf!WVzR}EfmCz+F;!@A5Tzfpwt7;*Tz
zlA~PGGFS5VB;A_VL$STw%@E#p_P2ynI-uXZC&`dP*;#)RYIBc>U+&1i{73P=&2R;`
zd_Um$U529)LE3Ew3__!W<E%tLan~B$He{LUH4m%{-`02B4;R^gpKr*xYdvqfa>(=w
zTo!)BFKGLi7VTZ!knzylA$%*)^|aK}e)j?A^#mu%r+9kpZm~e9H@f5L$l>1rhW~on
z$MST_^Dpk6=vW9fJWC+Et~2cYvwr?%<?`eD!yChkS<>Fs_0z6(c<B1DzR@KU@s&bm
zuMj{6UNgMy%jlIMhQa#`Xztfw=^Hmg84pOANQA^Np>;UI#@*Q3Nmu4=n-Q7;F-@sq
z_l?ortN}(b+4G^n`OC%)-3CTI84YIE#kaJpkAINx{$LRrEqpaXO3{P&k;7HiufP03
z#QTVay!m>#j^_IZ9sLhx`})(SKNqOy=NG`BxehAr-U~8XYw06W>e5lVF)q3xI=nz+
zNHaj?&wBkqb~Q`-tYG+#JDXCNf=V)*N|EA|X7(oo3TlgNYI_PA*K8VJ3fiD-+9(RT
z-`R8-6!iJo^yL%`2HD%Ee~76G)FL6EgC0WOh2n#cMNAsJqNE*2JzUygmJKobN-=<e
zl#-n~hn<;{gF6S#Axz0BnZv0_$;C~<tPW=FG+~6#6n*mJ{-=DTg$c>{^PLct+HIF5
zOtPqh*?v>dXLZx%iv6pCQ>H@0rh;pfLQ6S9!`lLIN)h-r9gI@AJxB0yTey4s^SJ3}
zb}BK#Tru_?I?Nq0v7OI*IpViDfcR042(#Ij{%sK_GhtOT;WJ9<+Z@TQZ6VU!FG0Cd
z#XG{qX7p)0g1^na+){p-%aubklX*)en6e{DNcAP(OlWFbv^iIz-%RRgN1o73(t1a^
zoJvm7Owyl9Jljl)&|H3t>a#(fD!RD}UY-&)mDVGbJo6_VUvp*qTrI`CFMwyR%37}E
z_^!HvxzH`#Oj0;kJ7`x&*i522PmVNC>oUja=#xahx#-!hnv<E><2IM?7;|<GVglAb
zKS;cC>ATCIU@nwTb`P-#%8$LRRQySke^)|mS0X>(x}4gkKHsLD+O|L6cAVO7F5hmA
z+I}zJ{*2n;Hs1kG?TB392w>BE$1C_wO5@Z|&FHQ`l?1hHpXQzEK`2E)(ufi7?SafJ
zIm2i8A(YIh1?;)`bZP!{5i}l(W*+$k9<i1l8J3=51s?S@UhS41^gZtXphh;0XK{g7
zx25;mz9(X#`*?wOb%EDbf#=%3?=X$eUV%S*p&vydKxpNCRA3DU_C4MfLT@dD5Uo6y
z{*e$xst|hGK<dI!?1LaetKi=U{#ykAdtu<^K9u4B8nh2BJ_zNfjT|qCJlhX!ruFbI
z^hG}KbUlb_w)$m2`xBea8-5TOwIA|G`)iywEXB&b*eYVGAkd;P9?!}jn>LQ0E=tTQ
zT(a;t{bAzc!OtL|Fen3&(TGy%fr399tYT~-c`_i#D=M5TZe~xzs@=oMc$jEnmHfLX
zl$1W3x;UGeK8L$FN0|P99;_mLo@Q~L0e!wjalSo$fopMrFMVN9aiMB4yL=Ck^|otJ
z4}S>dcWord22^TCTwsunKdOiS`H<7rnl?a;vA4S%egu?17FWQJDle_89*gO&kKk2!
z3>9x}Xp{VF5N#?Ik85w~tLPajbxJBNj;k-}YyXX}=r$F;HZ{mLjqD}0>?QT8$2DOk
zHPj6CIyO!DB~^sS^{NcbCM9h_C2dYN&0!_Y*2m4*rPW8r?W845k2Y=D$F1BpT@;LM
z+_sFJHbAp4LkD$92OeXStW8}tL(klC9W`UyFhd8wZ6|4I*P2aTHbZ~&arc?c;M<Zu
z|C8aAlFk&{enrOaONLI@f0V~|$cb@qt+c!RxTCscC<T#4Tb#AmxR@T7zZV>A1qoJ1
z;vN+T+d`!mdP=8?iKD~?3%j@~a|WGkC#V^FfNsRfcErl!)ADXHwnnCbVK~#WY{^n{
z*$TQH?apaAUQhX4*>b!8x>M<T_vy-6*&5>U#vao)HS^jT(|WTV8!mJW`)p0Me3kmF
zVvT9b{%rH|v_hwRH^_d~`fOSD_#nle7F2%dZ@=tozy7;?JHwtfw|vvO{21Nt2u{BT
zcru?AGwq!*pN^NGyZ-CXmiNlHm@AIf%8%(QE+`zXNz1qT%hvYFcd#99i|r4^Dprc^
zjvkq~^u$?{p!4?IytO@oqPW`fy@Hs2QZW#rQS0Iq@eTxWE|@=u`WbAl{MCZ_-vdB6
zm7sUxpm(f@?_leAl_%)1BYGJ49Z-odUxtX|NUMAXA*)1^azuo?Iifhj5T#(?3T706
zN~F|ElnUnEb}?i!M+{0Bs#hiE1S{4=C2Ec%*0v+6mLtf}5r@VRP0kTF6}H~aidq0e
z+^<BffPp(Ik+)gjgD+5OSTS+fz~Bok6d1Z%71-fBnnTrlC@Wf=Bcc*e`JuoO`?V6x
zQ-ukzy{RdCH}8lW_Z_#P^20kQg5?E6k{DOa`9n4f-y{@L+9lWv{W#DgSc)Lb=_eJ^
zD}5j?<Wgp~Wc?8=&Z-LKOoNrDT&!n&e~$s9A9bYd=%$ZwLP=$(eOF0WaR~}`qKmtv
z&2d8fr`yI=Gf`GDCb9zz15OMj)r`xR4BJkO^Op?MPH_4Xc1Fq?6r3w21`cML8q8NG
zj)_YSH4e^cN2Y=*R%kURnKLsP2TzX^8wv+2B?sRfJMY~kOXd}G&Lt0rGj{_!i@+6k
zK@FE12k$_QVDJ^coHMr@2gAN28;dh<0Xts>$LIA+zDy_4rz?P=v-=)dJX3gop;G)=
z5+u#qiNeK>a1tk630;_>Us$W)Qt#p9g>n?u@E>1tMAz^hv&#?E%1>}A47hO2a4PQC
zGOyPv-qk8SaVqV*DEC||AG@f$a;m=jp<2<cLR6>5!lf=%r>?}Mp;f10$fcR+q`dB;
zX33?t%&BejLlbc0Qgq|uYBlD2{P-nWfq$n*k_XO?V!R;ylk>o*tc-s!Qxhs<S*^tZ
z)&g#{c%X*hTl)JtV~!sjsN5#6TqZcKCdbvL98h@-R};Bg6D@8n-CNUeE)(bv6Omg(
zz||C|-f+FvjKNhKpW8I^RuQQGhJR}u&TS6mR_dtN1aNNcGF{Ex>dkGqw5~Wg;$1E0
zUB3~zIy!LMaJbUh+&aB-+dXkx5ZxLka$A+$YTea4B;HtN-hLNoaC@zHq;WIH=dozv
zRypQ&4X<+&;C3>-HPPkvc5t;hueA_ybz|VR7I$+l;kHY4^+4hAdvY_~c5~RSH>+vz
z#ODSY{#_@o!Joy|h~eILg2%MQRr$Ey8NSVJ7R(*U@x#%O`@4vn1qwGu(h(P?HlMEn
z8T?`)Mx1{IsyD!k;KB)-ypgywQCf9iJ*)G0$L$*I>Ji--jB>}0`@l}j=f@%bi{T-b
z!6R10;}_3E+&jKFfrofCK0mpp1jD9yx26Oe;33i3Bi6<vPRiqVIA1*UAwlaQMf~Bn
z(nD;?Ly~1vT8&3)>O*R{NBaCjS_9wj3BJs`rqrj0<aNG3(R@kUd|5cnsg!(a82ss8
zO(`_ZIj?-V9FM;SJd*Gq^F<#2`~!clO=&j#1!Vk1`yR;^51eUSihR<7rC>qtjIgII
zU?EFffOAs#1d6r+rMTm-xN3%Qg5d9YMX8_$X0920yn|6n;+*b$zbzm0j+^2V5o;Ov
z<19V%)dV=={o`1`4VGT<re1l5UiE4%IrtuQxdO4NPxV@SwbOhJT1`#jUagc5zb2lV
zOrILmTIvC>B(JBIyMG5-nlJ>LSUiA#R(%(^HTj>6Z^PeE@YF5h9lzbuNc7Nh?A50x
z*jU5YA<)_w?A0ys+`-Y*p(QX7Cy)@_+6X-LaXhyY@%5j3HR1?12DgrK2)2>64y}6+
z541M4Jhj>g^cg<YZnypoe(J9fY_Vw>b!+*h>z&LX)LziieD39#`P^&C_ZN8A+6VCU
z#I;O^d-t=vu$OxN8x0kA;p(j9)qI^`zQN|Id4B%JM#0jb057V+E6Vz@4{ITU@ImT+
znR^*c%O@=`7pK=CPTRPWKtGC4519wMeETd7bk?waSE+s1(6`y)pJXh2;3mB9CA=5z
zdjP=q)Y=c@gb%fZk8*^Mffir7Lf^d#__3+)@r3X}3H&hC_wZO4<|TYQ?|axFd~xo3
ze${@a=6lrQdx8O+MGM35JFd3jCsME1N+P#f9oOsd!)f1Z4B+2q_8{){LZIUwc)fIM
zzd!-*D1lp>jyn|KY6O0|FZ^IAa%CxU&hvWZB_c}2z!kG0`V&CP`J5wJro&}3UycCA
zV@yi%fDQhcZiN5RYjN3`fyAL+EQ!a`P$GxVl>c7<tO8U0-A0$_;EB9e?HR372XPj>
zdGnx7f&!EEEEtq$(V$0>0_90`DbuD-kuGhzGbz-nOp{8@y7lQ*qB*Zhotl-Z)~G<a
zel-|%>e{bLy;2RkHE!Fte+Jje?aH?3Pq0|oo;@no=wPE`0S`uN*J)R+ZVwAp4EL#D
zrG4`TKD!ff-JWXQq6Ms1v|y}}=T_~^*|F-?dJBuSI(slJTF(|bG&zytypAk^<BJrm
zWW#C<7beeb?O{ch88voXBhng8T5l8YEe)A<GFr#3AM-9=JS}9%f4nCHf7G`3_Cbxc
zM`kRUG4bhb+eQZd{r>pt-}AdKsQ=gtZ$0nkiw`~X)T=K%{n`_6!DNv6@4o9O91px>
zD#Xyg_5MSzJfto>u)Y6iiO;rUIIM8M{803;!3I5KQN8at{H{LtFf5Nh^G4(^r~_^E
zj=rNRED}fe?0XTyfAe$%ki`?>3oyk959Cq58&@1~!Tc!naY`1~+t0lrwX_jK2=6nd
z!z;r(DoFaML~+3Tj*0Kb7xNr2sQKaq)2k@c%d*2Wd30~W^!6iA!wxS*55F7FWRSof
zd6W>$2X!p)y7&+k(nT=ce9=!x=aW>)qZH&3()TVsvC#~<f4ptE-6p!IBXXvLs3B^e
ziO-mt@(b41Y9hK)V~yw9foC4-XoAMV^v;~nL;>+zF~A$aQxV2*!3|ejH~|#)+aJk2
z7e;i)^)_7l#O3ctcgKa1UUBs-_ex__G`HS+>$UP*bNO9YMgH!km)(H91vp?h=Z#n3
zbsyF?zk}WVS5aezE4H`efFn*<TZae6*JE$dMK@n~P5wAviywCQUX{mP_~LNqwfW&Z
vIi~nsodYBmH-{pUh$5M)qlRBS7cO{cYm7dcxZ<86mSm3wz8XN=0s;U#P8cQH

delta 22412
zcmV(`K-0gQivfeQ0Sre;MmRZPAOmCo_W-dB4g&`u17rXI2>+8I11Nt<vZTqAC{wCj
z$+D%(moQ_>oJq5$&6_xL>fFh*r_Y~2gMt)yljy(%<I=?>7fzr)a^w)oQ<`pEMWark
zDnzQZqP?g>!-^eCwyfE+EV0s53&3B@r{bb+(`s-oIeh>UxSgoh?$)FhtD+rDxUk{F
zh<%E~J5b&@apKZ#8<&3oKs$79(aF0k4qXCy;X2c^j4pJ$Z2;1R9yb~QI&tA77hFtG
zGdbC}0nmFqPM$z?kJ+_jDvoowdZ|OB(?+`UXXUL2l6#FDDmT}VDT59tPaZnmuVV}U
z9QT^F?#FTQ(w0u{`oP!XC<7o58#iRI=w7Ge)-JR-<d3zJTxWmGafZ%qsNLpXVhlFu
z;DZnz#@2yzQI^zX>HxqFQRJ{z&o<%g<IjNrkfV-0@(`s@WCmO~9RevD#8y&*0U(Y&
z^R?qoJLCA{PjT#&7!E%vl9PZr?XfscR2|auVTdDw#U76(w#N=@^oS!5J@BAIPgMzY
zXh1qd{YZ{KOQwIOol$IAH_(;zWGT;N=G;Rj0HD=V4|nsy6X$&CByb=%9kR2Hl1zG7
z6?5VQ8R4UlMk?u~EUj43HtBRoSycvvgHERHX|+v?<CHoMp<odum_G?@gOsU}5&vZA
zQAs5b4m;_5mq2`_5=DSGsG%y5QBbYwn6RVrS}H-PPIiBpT<ky<nO}kd#?_Aki7L-@
zDo!XR0NX@MPGnr=l@wi2afO;T^b8l5n(+E_EL}EEWfi6P=Bw|%{3@i@t5Ef5W3HF=
zmJeN3rK9kibg3$EoJif4jyyOL^lz=`@F?KG3U_6cUp9hUtiV-)obbZTLbXjjokd0F
zfiAjip;CY466F_D<g~+PK~U|LTFbRsd#+qP0|tOSEt4o%Rc@6`?{pb6*YDI+S8a7+
z>kZ2eJ-{L0+Bog(Mr?b-Dn;RR!>VWYa?{ZRngO#hHl9-TJT_38RNBaxo3yF_9GN{}
zYvbm?u91Tpd)JL?INB*HWnEPWw0)=ERdNINT(o~>UFG2=e%JVXUfu~BJM8djbXD|R
z*&dh!1Ky6{WFHNgJ)B={`|Y^rE)!(gg?V3@I@2>*RQ?RET7085EKuq3IRGAH8(Yl#
z%m8S!q<dGw=3B_wl0Ei-UfSi}@#YaUcsNlpU+QT(w<8$W^$puSrjSv`y>z@srastx
z#<zd{xbG_0c<Q$tmU-rQ;TxQ5(6IpBIq-oHj9^FfLBRx6rgj5am(=``j|C+F0TvVp
z2J?ad1V~LAQacC*^<lvfR&N0da@7Y7!a=#Ug&WuTU;?zU4Gww`gq0zP5B~+?LLCOt
zDMYNE0%j;gZs=o#G;|^bi3mg@ZsUsmK!|?}^Abb_Y_Kvq<lztnf&l(W@Qi3oqZ-$!
zuPcq?ifoLd9Op>KI`$+T^+=%|_sGXS`jJxpAix3`U;qO+ppXnG00I8U$VNKyk#Zye
z102AQee9zFnB2z!9$<h4EHWno2&E`TNy<{1@|37dr7Bm+%2vAam9UJZEN4l}TH1f|
zmblENE_cbxUi$Kvzzn7^he^y@R^S67umKKq@Pst%;F(W&LJn|H0ug+m0>#XxHn+*m
zZhG^Z;0&iY$4Sm|n)96I>_7-guuNy-<DDju06w6p18Q0mo9N7^KKIH0&wl#zp8yT0
zKnDuY4kR-NPuRg4B(RSNaFQPfXuy9wcaR1h*q{a>z$ZaB%F&K`^rIjRsYpl4(F}yZ
z203`<0QjN9B`RQv0N{rN*06&oP&1<>ePss(Fajq)Ap!?DKswqnoSZ?$eVU8_6r3;u
z0qiuVP>rfor%KhTT2+-GC_zfsu#W;Pv4vn&z#{oEPn(ti1RA|+2S#9m2Jn9aSLA@l
zI_9d5cc`PTcfIRek<<=(9AK#<c<NOLOW491_OOTzW(GungM=CYtdJ#uKOUgT1uS3y
zHN}B72TOt*4B!CuNQYeWO4`zz)~@O>M_(g)gQh0-wXlt?Y-dZ=lmF^q2Mr(qWFxze
zcQ#-F2^fGHEGmMuiW8g0Ri%G%HHu9Vm|y_)h?Y4}Yuf5sR~_zH2Rzb2j|Mmatrfs$
za+52{@vf4&=M?~XL%Ck{j<UV(O{IBJDc)Qnm%Zuj<$dW}URk17x$iCIe&GvW0Q2|2
z2u?69>wC%q^K!oNZRLIgj9&xyx5D!EuPGUP;Rd_+zoZPWfM2;@5J!Lb#ID2uV-@<?
z-GbF0`&h#oK3f0=%rgfi;6R8eY)lKsmjd^t00jgfWFq4e$U+7(k=;vy=SEkq)V;2h
zPn+6yu%iJtDDRMojASlz8Jk-ka*{(i<^XF{%tJo1n8`d!G>fwT$Y>64k(Z3-B7^zO
zYfdwmE3oAtr&-B!7BYXIBMf9YFL}v)7POL)%jiU>`Nv_Nb9o27X!Xun&V%+clNF6z
zCCB%^d=9Ui$DHI$Cz{M)7WA7DO=n9V8p(=QHLAJX=S+J!$+(7fqDAcLRNtA_hW2!-
z;jC!(h8e$-hP9EmY;0x|y3TK=w5wg+<z>g&(vi;dt&iR3Hn)FS&6oD{pB)Wp5qEjm
zf9`gxJ*{O^W1GxkCby_d4d_i9`PH;OHk(V$<Yk9C*W}hWo$0;rKqHw0CAe(^faqci
z7GNI|7{EUG!GHuDAhROCz@rPj=tti=)U$p!yxBZzW@|jx6#vk;%5$yqle7E=5+EHp
z)*)rPzQg7>zj=RMZC>4*ucIB&9l#A(o^m0(9OXqPI?|Jj0hh0w=uKDp(Up$$sGof5
zxW0PQQ|@$>H~r{e&wACj{&cdJUF$_(dDofFbgZ*|=}$lV(v6;Vw~xK;R_D6j$KLg_
z8$Ie?|NGmwE_S#BUh0E?``Yb(_`}m(@NJ)a-vh6A$Jc*7@oc}k<*PpRmXjUooo77V
z!|r#&gZ=S^-#g)x9{0!FUGbxbI@MW@b+oViqzQGvR~w#y0`TJj4J3f@6(E5f<e&x|
z(0t}S|9Z@Ke(z!D`q!sVdg@F6>rPGw&8?gB?tB0HI@h`XcGJO*dbrIV2QY#3uitX&
z13vHWuYP~!M{j-Wr=9fdZ+-jUKmF@h9_{y+f9w~4{a1hYw|`cbfKJDMniqWqcy{r(
ze)Xqz@mGOySAl<5eHhq&`DcM1sDbX+fgz}X%m;t|XMqy<fe+|<8K{B(M}d$>g3%{<
zCCGpUIDqhHe-jvj>vw<%xPL9EfFc-r{WpYlhk$=42!uIkgD-f4AgF>NfCFi;1{KG9
z79@N`)Itid4>e^2A>e>NID*Vae+M{#LUsXYhyiGrhHKb{Zn%bDCj$5&5A^^K+y{Pm
zh==9?4?vd>dO=3F@c;uL0&S><Z&-+M_=bkahKG0oi~s0`hPa52=!j`JiIP}}lbDE&
zxQTz9*oc{^h@HrYh1hhJ2#K5ch|>p(rWlHp_=$%YiJl0HaHxi)$cVJ)ilwNDl<0}O
zh=`7ei?KM1r^tqx=!%x8i=s%2uegh~sEVqnjJ~*v!N`ie*o@RTin<7l*T{>+*o&g*
zjnN2>+&GQTh>Wbbh$666E(KFACRv`uDi(jF01vPRX`orlxQ*Y~jn-I;+USNBfQJ9*
zhBtr@^&k)CpbpzdT6yRW4B3zl=?*vN4(bp#2eBhGM-C1kbZZ!ptjLi78HyGVl5Oab
z9{G{}c#`;Nk{h{_omi6B=#m%Uk}#Q)EUA(xNs~7JxrsArlRK%CJ^6+rX_7GclQDl;
zltxL79J!Mu*^)PjlQr3hM0t}**_0`%lt_7q|2UOLsgo*6lp#5lQ3;eWDU%~<lt~$o
zB{`H%xs?}C11Ti|E>uXm)j~dF00+PT@c2PDRs$Q5lTi7TTuGECZ~+@&0fQNng;|t_
zi2^5Z0vm7w9Ds%>Fp%w%ka-1u51D_Nn)wdxkVOZP4m<aA1%PD%c>*DM0*rY9hlv7c
z$N{Kn0j$}WgK3(B`H!-R0)1JOrumUs*_x`k0fjl6j46_X$pI*Un|<k#j%k>Q$&<AS
zkQT5391xi(P@2_w0j^n{kr|V<`H#7YhPj!Q$^S{1iD{T)xdE(+mZ!;@{`h~Ivgw%5
z37)FihU^)g^SOq!iJgWCpB%{nXt|w`xt;fUnz<>P-bt9V8I$a(n5x;Ihnb+Nd6;XM
zp1K*F(V3hm;F?)!0sa}A&-st3*_a4=l90)rgxQAQX`b`>osBu1rTLG>8J!Rsq4r6b
z-YKExX`iLpqQDuO+_{^Q*`a?NTATj4pDJpW#Ce~qsh<;Sn|-;UwHcr>`k3>Hn!;I>
zK5CnX*_VZxrD*x2<msdHDWHjInm*c?FB+29DWSN@qyg!l$=R0z`lE(vo+65+YuJ~l
zS)cRCk!8A(g(;(I3Zf@^0gMTxHfjM{wF3kH;57^I0Q~TKtfByJ@c@4f@L8HPmllLs
zI#2>sDv}$DpK4mDA&Q+RnxYP>r;F*Nj`^7I`Ix}zs<H~4j%fq<Ko2GoWq6fy4fzh5
z`K#_w4&+b(2mzWBxm^mdC0G`#@Y$-d3ahW$s*OpjuS%`X3ai-4tg<?-(t54UO0Cce
zt=tNn2<olVnyu*ith0Z5t<T!6vZ=1;s;#s-uk-q@?@6!LYOdHSt?fFl@S3ghTCM&1
zp642^%<8P)x~||_t>${KuIjJ|tFQ4&um1Y53EQpxnywVvt_QoX>e{jP>X^SNuk{M9
z_^PcFJFf5gu_(K-+{&*P8nFDjvJR`T_Wz2nFRQUMyRza6u-1S2uoTO#(JHjzx~)a)
zvEM4N1zWES8?o>ip}?7NI{=ppAOZ7$5B$(cfdl{zum<xG5A)CfhoprKumcvw0xf&A
z0^74pi>?eSuP2}Z8=wJn+n97)w;JG>d7HNzFp&CC59DAD>#&f%`m5y-tcP0;iCYfl
zP%#G44!+8VZNYyI@L&%0fMs_Zw|85&bGx~6d$*mdxpqspkGZ#U8@kMTx}O`XoQt<{
z8@Ip-x~A*7bE~<g+p3;>x}CeX&C0u=8@r?{yQPb|pDVX^Te`e!x1x);d>gv7o44G`
zxq1t{rK`M~3%kk-ysEps&l<e1%eTz`8@!%-yn7qEqf39QyxY0dYP!w~zPTH_?+U%f
zySlUcs_bjKu=}u|o4=Y{yVJV5xVyS@JHVuiz1Azc*W13i3%kBsyb7GV#*4htn!L!n
zxvcBE*ekrZ`?}~Gz5(32;#<DV%fPN{yB0jZyL-Lmo5B?gpZklzty{stTe}0iymf2B
zdYicn{J?+vI|H{R0shbc@sJJJum%pG4Q{ay^MDQ6&<)uz4-C)`U{$tHU;{Eh!qcn1
z#;doYyS%K6u+2NaIjjLJK(}Sg0%Z)ro-2o6k`4w?knI3vz^b^2OUHJ+xa)u%G^Bmo
zbq@(3S~T|#=FnXtK*olAw`q*XX8)YXhpfnEe8_)^%*blY$dqizX&lL(i^jPt$%>50
zn_J0h+{lw0%7_fgj~vRH49TcW%B<YVi|ojs49lc^$*O$HmORU+jLDoF%dVWsuuRL3
z49vca#-2>cXB^ABY|FL$%C?-!$_&BI?8v*E%w(*|(=5us%*oM=&8&RQqCCpbjLpaV
z%(8!6&5az*p6tuutj^_p0#Yjh3$O;+00G&M4H6&_`5?~$Fc0%U#P|RJn#vEE6$6{x
z%LRSO)a<z{u+R+M&<w2s4!r^pO_1#{SMZQC>|hUW6UXbokcwLlc7?cg{Lv)+4)S0_
z17Rof;1ANZb9eO)@ZbO@P{s}a%>og9(>Q;f(-N)IHVx4_z0lR%(02>e3vJUYK+`zw
z(?C7c4z1J=UDHtg)IV+15>3@NO}9k-&=8H)S-sU{9MwyG)LFgLNqxpm9oAr-)Jo0O
zM{U(Uz10z|)NXy&NBz}Oz0*!j*K94;W6jV#P1Jdv*L}^`XkFBX9oJgz*Eem|O}&5C
zcTLz4ZPbi?*iCKIG!556o!Med)lLo3lTFu<-P3lh*H8V~hi%!O4cTW6*M=<7o{iZ!
zjnt|=+kO4lUM<vb{ntmms1m>c5HJr){KWat4-60w`<#*U&<*jR09=#+0&P<<;LxV+
z*SSs9maW-joz_2%(+njK+y94JdzF7P-LV<PQV-@p(&&xe?SRtw>^6bBb2^t-eN_)8
zpwlX_-T0l~`n})$-QWKG-vA!q0^Z;EJ>Uj@;0T`JI9=cZ&fp6E;1C|+65h}bKH&-O
z!&CbJ@z4Oo2LbVb0243){X76%+W-&%0T0mJ7gvukVBz?!0x%xqGCt!oZUcV<zzzp_
znT0En>!4cn(0jyM55rNpf*Z&<=MLnM4+l|)b>)!VmtCFN4}N6>G+yOae&tx6<yyYw
zT;Ang{^ejE=3+kPWM1ZGe&%SN=4!s?Y~JQ>{^oEV=V3krkf2Z+-T>Tiach7p?63xs
z&;S6S0L3i{{J=#7u;MuXAOwGM&gCfp<BhHYkM8Ix(C8`P0Wcr|U-8o2r(L%ZBm|H`
zIG$P;4HcwL>U_8WJ~I&XaNcyBnSv}ED|5LqK<SVk<BuNcFzx}9?&!Sk0k*#Dx6bRr
zj_Z>i?35nq#ZKeLUhB&q?6q#|xh~_hzU$DQ>yPg1&yMTN?&!MC>$HC^>5%U0x1Q{}
zj_kj_?b$x=)Q;`X4({J>?bJT*!mjMYuI=qU@ATg8_73dX-t8)Y?e;G2_m1xT&hC)j
z?BOo$^S<l`PwnR}?ap59&|dEMUg^aC?&a?A4*%~55AGaq?g}sP^xo**UhEO?0U~ek
z!;bI2PVxS3^8B9c7XN?$?#aIL%&zkG{_s^^^Bu47TE6k$9`Dj#15NNu#XS$Yp(&n%
z4(~7kZqWeq>^xT(0rOCgivH{G&g{<4?~+dPIZxxdp7AJO=_nueCT{}W!48{Ix!dO(
zTQm^)01w|V_ut?RbicUekjDpM53GL2AU%Hd;4|F74*YNfFff1iV^8+wUiM_)_>5od
zi9hLxZ}u}!`5CYHl`r`gpZJS^`I`Uvo)7t%ulZp=`6kc!mOt~GZ}yxY`H<iFlF#|4
zzxtCO`Kgcjt}pthU;2wb`>gNyr2qPgPx-!I`>cQYtq=Rizx=2l`(j`AvhVzeulu`?
z`>GG~jeq@>-}`^q|4;j)5A)Ce`LmDx%`f(`5B%XD`o|Ca+wb|%ul?)4{o<ec+)w?E
zPyD<e{L%mZ;13WUQV0|{FvUTG1r-)VcyM7tf&ypcNVDVK8f+5CjSJV2W4LkT(#6ZR
zCZE3qDGlsMlOu=+m<DA!9LO+cM289?#-y3k!;2RKfu?^L`XVBa8UyOV6Df}!J$C8Z
z0iasdYP)jZvX;yG&8t^$-=<onnh%~=u3X=lBiF88J%42LwcS_aU{Rwy2=d@D5Qbj8
zK!3u7@q+JQm@onVOlXv#(V&QlCWh%)FJ%vXVPx)$7c%3*Jb(&LA^I~<fubp=*i6)Q
zX@i0*I~ISM*m3{npo<{~eeIa^VWTxIYc7~qb>beU74H_x*KzC0eUsD1jL<M;y+lXz
zW{o>=(A%pgGafITvp}PT??%SWKKAC(+Ii+K{gX!z`<cxP?ri#e-^4Z>42GH#%WkjA
zgt~3P;9`5>FThN5=(5QIYwxG)k{d5U{S31ZLDhf2+w8l<h{`Uw@bcmh!^c?k%^~m(
ziwQ#9?%GQ@?Mxi(x}g|D4?meQLu@Do`)Up$%0|Pl#?*r2?6`qcGxD(+n%H3t1@?(&
zoR2yRM;tIadIy|zQZkc(9d^iJ2JHweF|)!3J4m&fPAu&>3kw90v+Z7Rp-(@zbHM}$
z$Wediop;nR|HmAr>><mld(8Rj8*eJ5^wMw0c@zM8&ick3McYb;9eLV;C!KAqN`Rbq
z)?w#{KLH&r)<9kOGgez=4RqE%a|P<wUUvmnS3zY3mR4Sg-8EQch4mBHWTPE+*=CJ>
zR@z^)EjC(hk+s&_ZMXGy+;L|Gw^n3n#r1z&YQH6R+GU}ASKC?v_4U_#*)_LbdaKQr
zU46SP*IsM=oi|)=4d!;;eRTyl;A@X{R$gYWO%~pJ1$AMfiV9!{oh~~9Q=BioM8}>p
z^GQ<&CWg)UVr2sc*jAcrw%OJh29ReRb$UkBopRJ+XH%-`X{Q}>l3vstpMf@~fVh9i
zG9c-8(y{*<X?WIoryO$Vv1)2W)gcFp8g4fGY_!u>du_JccKdC(<Cc4Fy6d+4ZoKo>
zdvCt`_WN(Z0~g%w7mToS2YWUOdE_ub0;wJ|6CgkWHJ&I#iow+eVsp-203rw=Jm)-g
z6CQvkQJ;S%M**VQ;dxPbm`b`Wa+rTIVAFJJUzBHXz~M?Oau7(Ms!r8`$DCFVxMB3o
zK_A_D=A###dFVG6-392e554oygP4B%AgTv_de5tme)Q`-w>9+go8R7g_~B3AdhNgO
zoM!psAOHOA?Z5tg&Uw#&)T7_$*atxGJr95fWFGkf*ua{ZZ+-l$8TYb>KK*~aPkrdq
zp7q8D|2^kz(1X!)9_YfSK=(Dzek8n}0Phz+6mk!I>f2xc45+;j+RB9h#NPBSD8m4*
zP=!H+p%6t^JtMXcgaRbr5pDRyBl>WGL^NOyn`cESuJC&k{G1krSjG6QF9=C!LPb6n
zvM)6bBpgA9*H{OD1RQ`RO_+ay2&MPMC}!}14Se4Z_!m7RDCKxOYuY=?L^N_tYEr!e
z$2Z;~58Sy!ky@c7*AM`yqp@Qth#H#|{s;s=GVceW+#D%O2})6p&Xk}0WGGj;%2L7-
zl%tHED_MC;S<X_Gx8z(dYspLg!P1qq+~qHU*~(xV6P3KQ<ua3b%4dHn6PLi0B`t|5
zO=kZ>^N)TU<u!Y0N?$rNnB1HtG0E9WRC+U;+B{!4#mP=|elwM~%;hV$$x3y?bDqVF
zr#-b<r8LaL0rCKumqz9hE`dZ-3D`#i7yy6;tZWHJU?(@#S<PR%vzr^`=tga@k95d`
z9`#@c*46=!dTh!c?C5{WBr8=Za^&Mt?0835CW#hu3=dNE$Q{#qwvGeTprb=2YEg}P
zRHP0S2u}!xB%CS<HKZY_Q>AKEt$J0gW>u?O<!V>G`c<%oRjgwrYgx^D*0UyoB0F#Z
zJ;+f|FM$MPTpGtZ@X-$d3}6lTcmMz>_k<?U0If(hLSYO01H*qBVU&h7K|RcIQc=l+
z|D^Sh>82zFmX~JM8|#1^S?Zw<&DvBe;K->}=wYgLxWgT&J5)Pva9G7AR<?_UZEU+)
z*dizvvAc!sVsEQk#1?_Khutk=54!_wbVIqz4L|^}u><28HoDR^?sS10TO;K5w!$TD
zb!%&cGCV=NPq2UOcYS+a+LkxH-!<$Kc2L+T^ftb~C2nk^uwLKRx4g2gt#!MrUg`Gt
z2i)Z^djV|QQQo(=!L4qCquXHJX1Blr?q+WHi(Tv%*Si?zt$!2iVF)kwzU!T^Y~y?2
z0CRY@=QZ$bDV*N?Quo6$&hCTp3*rCH*t@iS@Pwzk;}n16SjIgb?nZkXWZ<G#yVL)r
z0VH;y1_$h89`HcNTtPP1y2b+n@i+hg7_f$TNMHf>NE0N=kc8W&nWOssFpOOsVG9=_
z3Q>?Vo$Fi#Jm<Lq=}6R=R@*akup?@Z%H8oEs}4qb1sp>o3q3CTjoz(e9gJ#=JP0so
zc9?@Js||k}qRf#FDEwf~c%HMJ>3r%^qq@$cUUi=Hoa$1O`qXk>HLGbo>M}&4xx41|
zt|d{1Sj(E!J9z32g5By^r}_!Ie!{KqT<0frW7kHov#f)$Yc+`a&U6m%6P{eTC#ZVX
zbH>K5A%SdCr~B4VP(!<$!0Kv`+Sc->wX^G;>|=kwdfxI*b-L+oYgwZj*0mNm|E!aJ
zZ-2+T-~z98!~Om2faAO2)lN9R2d?mi!<*t5&-lc}z3qa(8sq^NHoQF^?19_c;TNBE
z#91D1kGERiEQk2TZ*6dy^PArKMmW5YPzE$S*&YqpGL`8{S9Gi+5)i-!1SAlbeXL;s
zw;O*z<(hy5A~+n~1Rpof)!pwo6G7}_CwtkG0EN@_>`?CjnmQo)C~nN|XjY^9(b{3@
zN4d3XRjWJF_(2xel|!|vMHHgop~(moq3nxieB&Mec*r}x4CU^1<pa<SY(RqXLm)yF
z)S%n|s38iGFZ;5a8}7%OzPXGqf+U*T4bgv(-Aa+j#;&E!>qtDI@w}GA=t=)Cc3q$T
z_roWC@r{3c<R@SGV(0wwo&S93M_>BWr~cTRU;V}pVF_hag9HF@04>ME991U2Ike1l
z<;LT7+;{*3P@;e|p`i>|$3EnTaDV*gU;j36UE%#-4tThSNAZVrC<mKjBz2IqMHzn!
zs~~`MSeo78oklW;l&Xrr6TGze843h|x2OUA`#=y3K@l855)8pA2)<p*hDd0)NT9se
z!v-=CK{dd(64XD>V>S|0y>38*4{SXe9D*l6LCfQ{ZioaB?6oJTLHu*Q(^Enxd_pLU
zLMfa=Dy%{)yh1F@LM_}vF6=@tgu;I+P=ZE412x!z0`RMBfPgKFhj}oE|9Oaq^Q%2+
z%LaT<l?1ql28gOASOO~eLK36`KpX-<>_b5uL@Ky~8;A<s;TfPQ2L`l?d@u*m3N3FS
z6GEdDO01N1@EQwThe}$Dq<IH;;6!^!2NEd6J~YHsG{pN;#Z)Xr{By-sWW|416hu@E
z#8*UvUQ+`qhyq;Pf!f<OBOJt51jHF^E?z7~Vw6Q=yuBwN#APJJA>g$gfW<zn0wxT-
zC&<Pjv^+J4ML_H|Nq|OIL`5Rd#n<D-Xq-h^RK{DhMqDgMW@N{Cw8dAHMr*u9TBOH$
zR7HKfM`eV@So}wQ{KtMoMT38=$9P0YcC<%uWXOK3#b^vjh~&p*Oi2HKT*ruvNQi{T
zRy0U^j7M60M}XW%T(rf4ghzT*NrwzbLkvSQa56P$0DUlkc)+~#Q$wA^hHQ|w+k=3=
zQ-H|%vdU=$CAfl&^hbom$W=^9Lqy7_qyj5AL_l<cqj8FnDwW#7JF<UBhen!|u*j@-
zxFoBP2XM$lO{9lG+nG;cly=Z0d0-R)qzZCa2PcrqdAx!vm`bPw#J(KFsRYEp1kAqt
zOQ=N3!<@>%49sI}t{s>HSDb<|NG@jE%fkdj6jaA~v;xH(#B79wLDWk_3``dc2EoKj
zDv*TRb3x0?2B!)}Udw+5!L$OTth{FgOjjh$+Y5mE`^(Lw%DtS;|I_qKSwzag%uV7P
z&O<~@svJ$+3{2l_PTc&><Sfp-WX9v9&Dk{0>}1B^?8m=6PFMU+)Lcxy3{B}2%<x=J
z;grh7L{IpHPv|_)s5H!t)K1h?&f)A%<Q&g_d{5sjO!Q<<sf>TlLln>HR7L32&;C@!
zD^P-%OoQ(88hr3cNr1yOWV)VQE}vWj06>Yz;fFGW1Tr|vjXY83q)!FqOcrfX7e%{P
znKZW?l|i!#a!54HdZ~5Lsj8@lbwJC_s)tl53#17tQfY_B62;28hq`<RC)i6DjZ!I{
zQYx)dD~-~|jD&w9u!1RQ(JrL|GH^jjfYKIC&0TxUD|Jy9bWi^(rGiMfJxP#KZtS%c
ze1b;cOg2RWAw&Z<wSvlv1SPoA&*QZ&<<c}gR76cwMMcy?UDPXWR7Z_eNu5+mtyD|B
z)JTO?Ox08`bx}^m)G0jzGH3#JTLT6lfCGSl6qJN5)6jp*yFBLVhInuQIEtLfi7FBe
zR8YOrBk0vFwSr#-R$sLOC?KS@LzHqb2Yq-H1CT6vxSB=enF_25f4EUb>K(O9z_+6c
zdU#e-F^5Lv3UVL-ssNiq8312RS7Gf{VC779eb+6GS7BXOcx_j8t=D@M)+}gDY=~ER
z1=cCZ0v&&Z1a@UnGd;a@g;#k^SY8`edKK0wIL2;}gkK%j|Hj<4=Q@Kn(1LeuQ6;!o
zG+0>6BiMvZ*C}WM*laF$^;d_jSQahVl7-oXrB{|6){<RWm|a(QomYjWS(}|%hh5p1
z?bx0bT7~Ucp50k{9a^4s*?hfOr=?k(t=ON{*<gSD*QTA?r3KoSwO5(_TAjsNvGrQ2
zrCJw_S(cSrr$yVEU0R?0*Q4dyo&8z1g<85TTP;`uGk5}EScAU0h}T;#p6toM<xu6q
zh5@M6p<Dx8C4)9VTdRHBn4Q;ltz0eOSdKk{%hlY>z1#!H*0obDco@JNRg~DF8mnQ4
zA+>*ssF=H|k%v-A2S>3A$I1$uN(-)0fVk+Cuo0Why<Gpy{oUXNUd}CE%}rO}9p2-`
z++NMx<t1L=oq~w9JvDe<=apU;Oate=+%3q)CzyiZwcg}Cf+;}9=Gxxo-QG0dwJkW_
zUftJS%z`WcU+8sT^9_J(0AKgDyhtEk_vL@sED*xx;@r)3-;!ls19sl#HD2W{VB}@s
z1x{WC7G4Q1VC5xX2WH*|hF<?Q-UTjR3x3}86<!QRUJCA95x!sz*5LnjU=t2s7xv%Z
z{a^@QUJO=X8>V3nUSR}oVG`zGU)|mwPG1lv;+VZ)5Jq3|4dNh%VGj=B@m*mb9^roo
zzT7W3gP>D`eYm83C^y3eJy`wX4yB67QHdve0ur5J{{~j#D(+k!PGI?sV>!+OEr{c|
z)SAH?yhkh=(89#bq7-xBhOX3_1k6@ON{2yntlPz1OWIv{__LmY2R5K%N}l65uH#F_
zWJ{J`Os->3mSaukWJ~^JId+8IguQ=FzT7OhHe~#QP}TzP#R5<sWmOhfUK3?p*5viw
zwMvfT#>@sZzyehsW=^i-CrH^~P=a1=%`*ttPNrimP`z7z=1tD!Q0`??o@3y}Wo_=}
zYEI=$=4NQtW=(!&Z>Hu{zUFGK<!=_}S#D=hre<}%W_2Frai(N?re${y=SzQ%XMgTz
zXeQ-szGrW?W^9J%V$S7v9%z9E<$CVthu-I2=4bzk{$zUof<}OZUzkau1b{X8;xHCt
zFqYNZDdUuIfHeRHG%y1)IOtjS<a&<iO2%Ya#sZtJ=`676Eck-h#hS-btvv>Sc2FeE
z5{pIyEpIr7N>S>U3LfJ5(VKtb8lB2CNCq_dy=hs_X|2v`oz7{U#%Zwb>aCvRufA!m
z_G+}|<ckdeNq~f|&gv}Kfk+63sPby8&R#j@>YB#soZjW;isQXbYg^`JySC{a@U_AQ
z?5l>~*gS*89zr!Z>%GPT*5qZl=4-oF?6CgoyuNJCE^M>TY^)aT#lC;)&@OGPF6+rQ
zYpz~wy&mn>25r&i?9_Jc*5+%^UhU2PY||d?|I#*X$%bvumTk1=ZOJa~%-(9&?rpIK
z?9_&B#U^XuhU?j`ZQxGp(|&F3R&MTgZNa|m;Fj*(7H-z|ZJQngNRZG0=x7F*hYjV>
zY<O=m&W7aTvRWm8$U%Pqeu%#_@Pam&Zs=xi0C#TW{sJuc0s}vA1yArVfCD6Dir4kl
zX{Cxt0n*X63T=P~3+GXK_|ca_3wJPwcF?J~<TTb=2LwNG1YdCmZ*c}!@dbBr1)p&P
zPjQ>BaTv#O7XJb^5a7@|0~W^uHt2#cXoH&$@+{cRCwTE2ukn8sw`n$z<{Y>21jhou
z4ge>YWj63@Nyu`n=J6+}Y+zP#1|LFfcyj-<Uh7@E@zmb&=O%M9uktUSaW-%9Hg|Ft
z5Ar65@j5?pKi_jW|8g8>^965mCQtD}e{wme@+uE<8|QO3-*GheaziI{Jy-NbPxL;Q
z^g@sGLcjDZmvnzb&-6(TbUg=jMXz*7m-9M*bT?mhM&I;N=X6p>bs48|TtD(upK&k1
z;=W~olmGxJ(@^D#Z)`Yr_&)ZgQ^TfvI)6}rODp55O9Mz412*_`9cOhx5A{kna5nG)
zFJOaj4|i}McQ)VxRylFEoZYJ6hHurH->DRE*j6|qhYf%4QBVAoP`Nuzikh7fyuwqv
zbr^SW4|j1dc!U3Tf<JhMZ+LQl_=7KZ|8f_1f-iT4zxZqSaclsFho5+Z_joqowKQ0G
zh5vYo-*R4Sc#Nm`E1!6A?*i4!28(BSFMtGH+fDpMgOaECE^tBC9C?ZlcQb%Z)&%*F
zzxb1nc#nTi_>M>Tpig+D2YHN#dWoNTglGDoU-^Y+dW+}!f;V@khx(FtdX<NGm%sX*
zr+B2Vc$%;JvtN3$w|ca9`=uZIrmy(7fBLU~`mtAfxVL(hpZc}``l@I9t2cYVKY7CM
zd%y4c!Z&-v@A|$sd9n}sF?fWE*atJot9ekv_ojbu`99obSHpN{x&{D%2RML|jvRbY
zX)}m}j^FvP-}{Isd9e3_F35e|-+lkyH<Z*-t)Q9kZOEOwa|%+C3X$MZPFaU?@EzxG
z{$zE4)is)ss)ugHnrTf3<|n+~O+eoNe((=}@gINkFMl;i*~`O*G$?=F7kT#I{WRE3
z_K$!6UHg7ExL@#310N57NB{^%T?P^)C@BEKU<eULVpFh-!iB)PEqrM4BF2mwH*)Og
z@gvBPB1e)eY4Rk>lqy%UG-(D6FnkOW6i5I79|UaOc2ew?jVDl^*|0$<nl0Xg1^f0r
zNZ{1zJ~e3`jcKVd4na6wv=-z-P94~HV%>kaTL<9m*?Z=^l`H2?>^gJs(jh<#ARjw<
zdiU-<Pyp?{bnwWTTldZ#xNqOc|JekP?wwn*V&8#Pw@%DOtz0u|-nw=3=8d62nu)aV
zw4^ixwPL+^T6OEy7`3K3T`=~;+7&Hwu}$!i8f>Q*V*6D!<3WUKOfOti@X={CU3`Cs
z<~sU%R*j=&Kdj!p`*zQ8ux`hlc_8_V=GD(G*;%`1i{ZIjo!?%&{p<wScf_4PD}M$3
zhhBXDVHY2KDp_Yy9*(4;h6(;WaDYDt#6wX4L20OAhS?akMm_*M(7*!#JjK8oz@T9U
zW>)#f1srg|7~>mU%*X{B1Hc21Ip%+;b67d+=wnws>L{j|H%TT*j$HtdlVn>_mh+7|
z^`NB>JW4A0jW^%4V^ac;wMGBdTVhqVPK-9fC<cr+%J^oCGP((;n{BFjW1BIGktUpQ
zyzyq8d8#QR8p#b82_rPdX(t>Db(031d-7SQoM@c(h@5<8+J$Ziaq;JzjW&OmMld=J
zhfp@bFd_z<H)aZlp}|<XkQ#D!!CU~@BzhX7ug>_VrfCkEr=ouLIjNp^x;kj9g3>rB
zpmsv*XRkJbdMvPVUaKt}lJ+^HjFM(ctgwT=;q9lHGRkJ0)FR6$xPES1qmAB1+is}k
z&RMOi-|A^BzM9r(FQ7GQIwOCp=`Jg%o9MQyFS6<;Yb>Rc!n<v&_})pQq~=CUr@wj9
z`|22w{G!GI3hcuVH)|lY{~^pU=LA6mXF0Hs0VXDZKm$@aGDg4<Yuv4}`=%>r(QXvY
zg*@$$(+<?+@Mw-bX!(=Qky&%O&OQF1B@Z}Siro%c2|TGKl8Ytak3WCyxHBf1lbspH
z(rPMQ_tJOcy|=P@^UVbqT%=*>LfHV*_noi(U9{qRcLB^olGvU2a66rv>Z(XjekXI?
z1jB`J2w|fpbPIjn+d@e%?swjc+sL}>vEK>1?0D0@x$Udh{(9?t>&-gszwgfb@Wl6C
z{Oh;pUbphdtM0t(&EJ2S$09JT0RTVy0I&}P8OHoQPZ?s94L;Z(u+ITHC-A@yDlQ^C
z?z{U92L1Kd?}q(gu+#qzW$O$Ebvn3Vmv($(N@DXyJNj{@bsSJh<fzM8&bFl_d8sk-
zct-&vQzpt(#vFIJU;aWU!V%&R3?(#S{!Tc;6{0YOU`Rvg90h+2F^upvZcw2LMR=OS
zvCs{{z>p+Dh`$(~3RSAI5ew}XkpKkaaTp3f8t8{BU2%aCr)ePxvq--jW|50n6rvZC
zXumLuv5X@0A{oDE#x<@mjb~&d9NXB#Eyl5qcD$n)(TK-6mN1NAyn}<(AOWTD0|4`Q
z8Tb-uIi&1E0@;5n(NiR#2467Z4*2Wi9sd}@57Lj52dD=*-Xa-wn1dZ?=?6Q~;WQo@
z<{Rsvhg~FiKy&2m|19lj;5WdLK(r8WOd6CMSm05IPI8iy`THO;>vsoZCKC+BRHieR
zX}@Gj)0rzQCNkYHhdHPr8;<G*Fz{zg`o$0!)l}v<$C-beZm3}n%tR*`Vi=5c(vNUO
z3;++U2~BAx^M?$nqBj|Ghteg)nVky2IrT}+PKxlI@MPgT(}~SxK68ZQWN1UD`A~go
zl%mJ<C_@v<QISrQo*ji|NUJH*i$)Wf&zvYrPl`>5&NP}CHK|Q8n$VqI6QY|$Cr>%L
zQI#IEp*Vk4XiPDRQl%1=n2UJC$qtacejK2Cid3ZN29P}i@S|o=>DezH0SJly6r{u)
z>Idzh*0r+r4r~9d!#>E74#SM49Pda6TIj(ec(fxOd-1DX5&#dwfTOU9@y0u>be4Lo
zBQEA3hg{Q<4t8kcEb@SdI;2FV4Tc3DZDlKHLo0vU(UR7!qz$cU)tXw-&fyV;+R0j1
z8>`ixcCEOQglJPs&;Y1mwD!bCBwh>7+S<XiVOSz=?SQKQFk*^~umc!=aT?-M7q!$~
z=5?!E+|PoRwyJfjcT20?+>W=r=8f%n&l}$KT9>`VHLZ8C+g<Xym%Ht)?|iqb-8wMB
z7ixc40|U?pfHm|aD7pH{Rm0Z}eCVSe2_V2e7~l`5I3Pv6ScG=hOJ6(GVVic?CWt#c
zhdOi^3jQ+(|79lQ4kn$29#d*dVS{NGf25Kf`;Z4a*inzX)Wd6w#j!lnu@A|TqaH&7
z0RJ)wH_4c(G7=E+hwJd+5qFrx+Z6GL=P-X|DvP+vS+;VQy^Lirk9iJq=u|MUd}R=C
zbGg4@vxsA5<tuwx%XS{|9U!+5M|@e$c35Hne34};Z_`kbaD*J>z=h?4bI>6kag^(D
z2jrUhpvz73HV<0oNW*!~YF0Cw3GL`mhZxMEp7W#E?C1`2ILuw{G^$0d>Q9@R)OUYY
z^_NLa;$820%~3A(qvy=wS+DukrRK0&LELIj16$dgX7iM5P32(=+ryx)^Qm!t>N<lO
z*sUh?wc-CfXjFH*%AnqIuWjvX4;S0q+9vm_zs=<_Lz&7?Jy`>8wg3wuGAP+VaDscX
z2CuD6syah}B%k02I>@@%4bOF$gUx?%IZT`m6sH5jElvj~_$VxsDF-|(*p{p%2_8On
zOk%)+n0KthGW2LnJ<LI-!g9$Q@KAs)6C))Ja<(!9r~}4>E_94vT;oFzI>qOJ@oiH4
z;zbv_(v2?kqHmK9(oF~;k{<M^2VI_k82TJ$JM^b--QsfCEimk`^r<@{paFlt1|8~<
zXr$92Ru)&f92jj*GasGq{tSR5h)%@Q^djq4KMmcZ&UK<Ao$PCe`q<Zw_oXMD>s41g
z<H6o{|EZgO?oF2i(;pxCs&l^aj?Z}JM<4m4Pk!-<-#qJ&uJy-1eeP3lJj0c)`^^`<
z>0k#v-_5>zuG_x!OAmgcpYDJ9)}tQui4H#Gp+EN7uifcH?|tmcPV~idAMeB$J>#u!
zbgI*#503B!Fm_;o1Edch-h|n|10KG4Fu+9q5b#$gxr?u-zW<w-zSB8h@(myXCfW^r
zMP*QiT)+b?ImXCMiDHz4JZ!^R<O2gWkU4}10Nld`9?;F@TmdQB+r)oOlUc?MbY20b
zpbD-a3$~yN{sM+%Loc+TE}TX%pkSieoeLIV3`xQY5>7Dupy}WsuIvOb;9vpL0dghZ
zM4bOhBn%)7F2oK_Ar)4k6<#40W}y~tAs2R`7k(iahM^da;TQISB0yCz3=&fqQojYo
z_{iTPIe>`}fWbM1%BX)qSRn!#CSM=)p&$O?AJPF1v_ob5$YiX8JOF?{$V1CP29mr3
zS`YvMHIO<~%~@m#23CzE9?8!A5-^cc2>_xfjv^_RqA8xDD9S+}#1J;<f+-f^FO1?k
z1p_LIA|9wgbcmuZM8XqggDaBaEQ%r@=-nyKK}2+eB=n&w4qkr%#G*3dqUhA3G)^Nm
zR--juBQ|EEHf|#~cB40bBRGbmIC>)@=z@RkzycV+K5PR&#6ueb#rSwnzy&}&6jB0c
z|HA>~j2#|8`}Be$jH4g&fj|x<K^Ej0$U`9-;@iN(JpceZJk2T<28#gzJ*WdNC8CMQ
zl9reQEVWHJfP;S{YSw0z;5(2585AT+rld-)BulnrO7?-CbqFx1<Z}69AE0Ec+@wHG
z!Y}mXF9ZWn1|>n}g4^NKK<WZ84BjuWq@e(VP7)*@=v_<3iW~?eOb!P&sNz5(mrHh~
zSAHc}hNW1JC0UlGS)L_Yrlnf0C0n+oTY9A;^ukYS&&_}MLjwp$%)B8S1^@)4&pq<P
zOgsewSlBJR<v`*A9%MpdE+%3krebCSB0xdM8RA&HgFIYKIgk=x^;P|~*kH9B%`N{1
z1j$1=bctdZRw<!{D#-&mfP-d5#$<31SPXz1EM_JsCLb^+V&-OJHs)>OL2fE0aPmPW
z5T`&Urf`4yW^(o>b2=t)7Uyr_A#wIXQXUF#_GTs+iX;SsB*ako3`cMB0iflIBow7Y
z^Z{@t=5g-kX~ZINI;UbTR{%WWZ(e6&#>#bO0ujxmZ6a55Iwx=D=5Y$Ba6Tq(`lfE~
z=6e<>aw=zVCTM=z=W_1mZ5F3`S}0@Urf|OJaaMomZz5=VDyMCR=x!#cZa(OTa_C|P
z=!fbihJvVp{-=FX=!rU~h|VZ;CTDR<sEuOhiYljtYN&dK=wq7ba!%-k{wIRk|0j>$
zsFE^haOP)Y3MhOcXOaFWiB{;8F6V;!Xk(gVOw`4Q0DuGloPl+NUdkU|`egz5#Zx#%
ziuixREnKOE66cHdD2Z0-oi^tl^d=!V0skamT`?qSaw0fTO^fx_0k8uD<XDhF5CGHz
z0QFT~$b%)`Ly$SmIw%>>_1rs{gA)j6pDHP-ma3_qDyjCVE->RgiOwM?sXb*#t1d)u
z_9?3lhv5){skUdT79ld~0<4-2a~euEG^c-mqAIc`tFkUDs`lxqGAo~!>a#+twAL!M
zUaPg@!L%mp9&9VLdMk5+E4D^!wj$=JiYvK}E4pf{vaYL=3c@a&3>5su0Sx~D1Gt|&
zYDk*SBLrxVg!tu~#_27b>b8Qb!CHbK?13dLY_}%t!Y(W*<NzZUqKdggJJ7@1M2UYl
zAS#l@SYyQnW1NKoI1NOu64?NNJ}_coLEvkWplp_883+Qx$}GapEW>u|9teWA%51|X
zEYCiy&gQHo;DN&;Y|heb%>u0;I4sXD?ZV<gF2w3RVS_Jp>%ul|AV}@3B1F?FtS#KC
z==g#qC~U%#?Zb|3dzS6kI<2-+g0FvKLoYmRpISmC$P>{fY#?+_BpB_`?g4TsY|_%K
z;Erw0E^W>pt=tyv;hyc<S}ouTt<xGV;?}Iy@-5#2F2b5C(kAZW5-#H^|19X5t>@x_
z%?_^N@~qGjuC@kl<36tEmaWXjF6o}G&i1X*9&ORmEY2b=?&>bkR<7APE9-wcZQ**Z
z&^9dLIxX@lt<TP^>?-fs9`EwT?$9pn;1aITl5WpZZ{rf}(RQxYdavnHZ|*9u_LA@1
zQtsNSFYNX%`}QoudhX0tuEQpRT<!or*u*~I13wU0z5>Niyq}u(>scHCBn2!m_<}Ad
zt<hSq^)~R;>h9r^t<VO71z&$K24}D!Fv0=A!^_qcBBI!fEyj@ORhNj3$z>#$q)nHk
z0|5X4LYig-?wrfYraCl09SDL2Q$hw)LM3D{5C^dk2Qd$4@DF3~5o7-_4<9iwnCGcT
z!Y&MPC1~w%NP-n#aS-Q1FgUR!07E70!4P9`dkXOnSHd62XEykP5kG&?FJ$l`XffO#
z@ew0e7}xO-7qK1VF&W!25)&~HCvg}bu^tCPCCD)!=kX!$aU%cm1q1RTD{&D!@*^j*
zBunum|M4ILaVA@EB~Zd9UveP#u@HYUD2wtT^ujglfCJbB0<4Vx=B1e?*qZvo`Yiwh
zEPzc+SezaLBbPD|Z-RdXZvrI%b0B!~1q*W`m_RxtWW}D?V7Y^r)CJhc@YXaS)=<z1
zdx-+j7z2f1CvH~H{Rlibfe!PqFdH)uqjNF$a5@)pI`?of>oFdm|MM`rvpWy*E&xNU
zW<w<WLLr;;J$v#JWrHL{LNEZsFZ9AWZ^9=0K|}w6ClB&4|0I7<{=zO4vJz+X4hO;*
z!!rhN!r$eJ8tZXS0;NL7voI5~87DChBiA2n^aUGpFt77ENAxGlbP*GCOw;rZyE8+-
zvrNadPS^A?_w+IAG!KJ<JSR0w^K?)L0x{n+R2MTSe6&v&^;4%bR0H${D|H}%!X{KT
z9*;Fc!}Kt3bXb2+wMHMaJ(smn$8${s^-Q0$1y?mzw{ufxa7st@PH*&CL$z1i^jrI}
zT<bJ2OSDH5wK_MoCl57Be{?**GfzYEQ7bl6fAwK+!YQ8&KiL06J<x+cIKVs<n1Kbb
znj%Cz7=Qy9Fjujk0}ufF07EbEGE^V-R0p*=L-JBbbRd5?^dI~-a5pqV3-@nB!UIfZ
zY#zo4`j6AdLuC=rk(lPm76~<X$pb}dH_vd(GFb_#Lpq#+Lld`e19x~o^h1v~Lyxy_
z54U-XcX<DHdJlJbm$z^ecOh?rT=rxyV8U+?b9=+LZ_8yb{B|LC@_Q4vd84;{|G_`^
zH+w61dOLq~MehQ6A2@t(LLoP?NEd>9$G3;qH$;axd&hThgZP85w|Kkvh|{-vTX=+D
zIEa&XhXXi@&o_(9czvJugXg%5lX#5(_le(laF=*{pZIyR|M-i0_<_GTegAibzqgH(
zIFd*Bk3;x+1GsxXIfCoBk0<$uo4AsrIF;A<kavIhn8SFAWBG~?IgKwllNb4pL-;1x
z0{TUQ(~JW;;1xcE4$Q=(EPKxy7(h}000At(%?!Xk^o(rVf_n2fm3O(LGx=|8!jo6H
zCOCpJGxJ!GiCeq_JeU%Vv64CzmN``5TFg@Y(ZgC4P>Rv;NRs3@-@+eI`lOS$CeZq<
z&pLml-+HCfI<EJ6r33e`TY9e7I&ky%AMp3E<2tWTIwmOluupol>w2y4I<a4SuH(8H
zXM3^#I<8}aFGzwf+&Z({y08~JwgdaQSG%;QJGHO-yR-khx(oZXcRRY@ySLMOvl~0F
z?|ZxZda%Pgz1w@dn|rgDd%c@GzAHSu`#OKQ@4B^<d$2P*z#IF-b9=rMyR~C`#TUH5
zZ@j=?e6=UM#VfqObGxo*yvdim$7@0%*n%zOLN3UqK955<+<Z9D#TxLrzxnx9Wy3ir
z20U2Fgslw#IKTijzyL%-0_VcC`+K<u{Kh}~tz$wVTs<aeLe>{T)>C~B7{FvwrgDFi
z#mKb-jmZ@~cy`k$Rsl#&kMu}fG+;gak6+Eb2)=rEr$ZS~{j)!NA-Fu(1HRQ~z1Cm-
zt#iHK5B}C`{o!xD)jz!92mar4{nh_{;dedd55C}A|GniCe&h!};2S>QV}0ejJmWvS
z;G2Hub3WET{@|1T<7@rvAHL<2d+UFb!YGh}%Wr+<*FN7nKH_V>=Mz5F|NY{dzUJ?~
z=LbIF13u$Zz3f*$?`yvCPd@1*zV)xZ@jpJ+Uq1G;e&JJo)r-FLt3KwdKKaXj-w(g*
zm%jDeKIOl^`Zs>zBR}i+zWeX~;Uj<a14JQB0t*S$gsI>`ga<VpEQqNg!i0Yf3oUF&
z5tBp#U>Be0h!GKX6u6OMM1~IyUQ}4pqQ->~6A}bDu%OF|4^L)vNN}djfjb%AlsJ&3
zMwlNv*5t`>VZx3!N!r@Q%a^ZTzko$r7Y=LIbO7*S3lO0HTUfE!vVj$Q7Hzix=dy-l
zmjEtY0uCau8wUZHuU({O0<BpQ;>wJF29+*Vm=SSe#Tjjxkf#nZ<ad+bxwGu9T)B1%
z(tRuUZXG;$^Z)?3Cy$=CdaGBL24IgKY<cq1wNsb*@@2~JBEN&@<j`W_!-*GP4A^*b
z<H(gacP^aybLrEmS1%sjdUoyGxp(*e9ejB4<H;u{RSTEYYruj9ixh9%xVBh-<pGcY
z;VW9QY5A*Nn{^y^wuvhN0vJGmoN&a6=NDdTA#apXNRg0~3MZ5>LJCL0M2839iA=J|
za&zY!au8_en-o(lr#^ki;cXpr)`^jw($bkxGH}4jjQ_{nb^~#p0~!&<LQF_8azZ01
zRI)+|l|<4)3nPqD$|f<yP|6K|FPv~o4Kw7jNiM%U5=<sFOp?MX!Hg2i2r-%R%QiFo
z@=Z9Yv@*&!!_*MTDe*kg&o%9oGoual+_FwC`?T^<F$3+A%`250GtCR@tTfL_*#vaX
zMdQ5kP9~FFbI&vVbkj;KjZ8GdL@U%&PEfUU)KNGwgwoVIom5lHK+~LmG9yku1-8sK
zFT7IHN_)MuOj<V_RM|5dt<_dCWo@!gW?%i3$tSU`R@Pp(9YvH~q*7y^8E1@dKCH&6
z2Y?3R$xp0ovgt3aug;M#oN-bkFadwk$ts>}dXdHdO*<KdlUXl?#YB_{BNl~Xa>=c@
zVoW-bpdNET+^sUp%2|hh9(Z^x=NohE0YDybyiqYlcvgN4<`sJ!@yE#8xxto+BbJ!r
ziZ2d&XrX}yn&_f~_IXaEiC$W0jG<oIVxOm``e&#wKALKatKK?8tcP}b?5exA`su8v
zRyygj({9^rrmsfZYNf|kTWq7RX8Y^7F}8c|sM8J`Yr&HaT<xQO!M^)xzqM{$?}-V2
zd-B4irW<U?55HV)&fm@(@vOi8S>n>SF5KzRKmQwV(M^Bc^1W-}<rg*9VVB)@vm)mn
z03Mh}EqsOV2Ag;QOb1|=`3)zYyl^o*bIGBf`~Q>bt-l_7>>qI=9S}nl@v?MY%*?ZP
zOoPWW9={=3A9~h*N&n=Vb)(FCc%XPB`~3Capa1^-{~v$>6yN{}SU>|F5P=C)-~t)g
zKnFe$f)SM91Su#%R`3EC^H`ql+9i(!0KfoUG0X7I5&&#Kz<BOLM>^QC8g{e;9c_t6
zFdF0v33lQWozP(pcUV0g8p4M=><~FVwwv6TgB(fx&^q9M)ee+-CPnY4&ppr=Kl#m%
zevvT;Kbjy49e#0#IOO3MKVb+ke$k9M9OD=}Va6Yx(T!ttV;jYIMliAwjc4Q`8}~>?
z9RAUdbkt)Y?Fh#-_OOmYMB^I6s762r5t4=fyj~*_nMOQPl8k-iV;%2UNKPiQlR*Tc
zAQef-OZE_ddwOK!9L1<eJZ=()bJXM_VR=eNhEb57#3dp-sY^%t(3O28WG^*&%Uc5S
zht}gHBUiaYS=v#VW$Yy)8JS095>u9^Y$YW52*g7QvwCs-<0UVdo(yh~gKa^_$^ftc
z31EYSX*nT$+!Y^m+!Gz>d`Bd%;R{^2VvxAh<{p24`9?t+0-<g6geN+|P#toD6Q0n4
z0X|ej%J>K#?O-7{0AL$+hzK0y06;#-k(nJ?W-=eenLAPnBzfRq7-w|o4-skzPSli+
z4rS;<InhQ|D%7S8wW%A;Xhu);6rl^P>Hkg-now){)Tf>>gj8)R)rB5arVX_!L#GN<
zuI7?|rU^}|Lcc0i9Uc{-Jyj?jjrv8dvXY@a-6|X#T30wORI6;QYFDo+RJQ6>sc*!q
zE*sj@o2C`7YF%qlrK(t`{`H4cy{a2$%GR9@wy0;N>1M+UMx3gZvSN)aP;n~AoBGs@
zaz$z<kZRVn=CQ30H7ZVx>d>*yHm^d}schMQ8d21;_N}2+>{(|j*3-5Yjj@d@L|MC3
znNBsenk}wwS^Lzlva+_#t?OXVs#dGcaje9BV`<xJR_4}KvEN<lR&iksVB9X995mkt
z0Wbh~#&awvTwZ+mi%zV}u%EnO1wf(yo7BLD^|6ChD?}OGU<W@KqO!mR$Kb)6-WXAT
zWaV(jI_gsoc4%W8^^iw9+CcySB%mGeAP0#xDi3zp!yFn$Dg7)AnRMJB7ZBF*j(OZ;
zAO9H0K^F3miCknO9~sF>R`QaW++-&|8Ol+X@|3AuWe1y+p7Cv0fZzfEeXN0A@tkMw
z+|{pi0@xb7z(p!pVPzoq!UXg%M~%OKcTvogV=M3oO-9>M9{~7+9VY&>KV0}ib<8+t
zI&;D<+}vnKKN`}Jmh_}4U1>{S8q=A6FfOVAj4W$;KD<kSHtHKZ{B}bU=7=GHK+U{J
z0OJ?cSaXw6kqTSa8Wp)tMXq&yivL>Mde^8B2At*mv5#IfWKyKZZQw_a0(4Y=9qPhS
zwAWD?kkJ`3?QnuFgt2RHqv8~|2KTLlEpA(fyWF@YH?VolYjNk=*X#E6ugwi_V0$|i
z@uqjXca82^pCaDuR`<Ety>5f+`q%WncfW&;YjvYL-u`B|t_=?Ca8tbA4PUs$zfJIb
zr(5Czw|K(mop4_JJK_Dt^~FVhuJ4VzyX5=M_rUQD?tfF<<o9+t#%~SqnzNkdDZhEg
z&z<sb54_hJ=l9VyUUZ=o+~^0dIlhTbZj9TU<^@OhyYH>=eFHt%D4)90-R*9w@B8Eh
z7x=H|eRPsv9pMiDc+QtjcXiAEeb=0>3YlAtm8}4P0c$Wd2?LOgd8A{1)qT&3I$qr%
zTws0bGN(Ae6%KIAUmo+BzdS48T#t%c^rGr`VaPC}j{3;s&Q_E$$Rrb)b);k4!qEKe
zX<vKW-yZk5*ZuB!-+SNx9{9l*{_u%keB&P<`N>!Q@|oX!=W8AozNm^h6yKJ29bgS*
z!7o_iF#&n_SMlq(hBa`1VSH1Z|NH<)Kl)Fh3{zY_{qIi&FC@SYB6{!GA`(40a#IeG
z=EEFT3fWi>M9e`RZeSNqA^jBa{o=3vEUy9IPXX0${Une98}I@lPy!dQ0vXW#7%&6X
zj{?~b14oboK~Mx2Z~{B<1pgz@1xauOH!%L#Zv|0s20>5-Nw5TeHxTn=Z~`xI{ZjA;
zd9VUMkOqx#2w$)QX>bKO@C1ht2W1cjV~_`v&;eaA0~=5oYA^z+5DT+#22EiLeNY2;
z@B@YL2}KYLyRZZ^?*-8i1x*kG-wz3Kune0J0)Y_ywlEH@a0l~H14B>`0kH<*?*>DV
z1A{OO+b{)#@Cn&}FcH}g7CH?+zz@{W0Y64ac)r5$93UUkf$yp>9Du<ZrU3{Ckqie>
z50{V?TQL=Ip+nkX9q=#z)Ip>4?9kTXE%<LlSPF}<4Km>2rEsAYi_sX55gC(F8JCe6
zo6#Ac5gMaW8mEyOtI-<&uMr!wQ5(0B8@tgPzmXZ0VH(_j>(fLr9M)m*`r|7C00Qu#
zH1eSxED_YkVI4?u8m=)E@-Y_lkstR_AN_G3V?iMQ(G<`C3gqGQyk|1ZA*0wqeA*!a
zsOZ@8ucc@aGS;CTpa2p=!4&e5BmWT~L((HhG9XFPBL|WnO+g_2F%(deA5-!qM=~Z`
zGA3PeC0Vk6B})<~Z89J`k|a?wAO+GVY4RV7QYL4zC;`$VRWc`IQYBZiDTz`hgEA*e
z@+EPyDnU{yq4FfLav-@9B(st#gYqU>vMguPC8P2yk8&rg(k9vRCH3(sr_v{tk}12=
zEr;?bX;LZU@+bc?D*sIqD6cXwo3bpy(klBAE18miDh<;u@$w|=vM?L-CLJ>^zcLo?
zk}vx)D>pMUC9^O05-1xJGS_k{Lz6EvlNYQZ7~snsu>u|9!9UCbEY|TWuwfn=U>i=%
z9^z3R(`g*yK@F}!){GJ^5i>DIvKaQ#BVl1AopUc?p%-xA0c4RDcLRw$=rvksHfV#z
z`fN6T?yvRWVIL51IioWlmlHbkaXmxf6`r#>i=jQ?GauhmCY=*H=kqA_^E~YnI$z;D
zWl}u@v^hgTKacV}+fzZA(?HqNK<5)Wi-8!JGeBWsLhqA73lu-;6Fv_#6ath$)6+gP
z)IpV#L}k)HNz_CCOSD5p^hEPBJ?}F__wz)5V{|Vsv_LZy7M$}!XOu;mb3uO;NNe;#
zPn1A?bUzamJsEUGhm=VLR6d=QMn9BABeX_Gv`N>~IYV?gso@%e!5n^5%f_J{=D{m$
za~-n*HwU0J;({LH(HzZThJc|Oc)=Mc^hcL-MxFB&`qWSV^iS&(Q2W$93H23VK^f40
zzzOz&9IEX)-=Q2-#vWEI9nxVo>cIi%AyZjH7E=lyV9yMk;T0w{Pyw}24RugQ^*vKH
zRRvX5M-o;2)K&eI7zp)LSCvpFv{qx)R$0|o2lZA3^;8ShJ#RHuiPcx#lT>#VRhbo7
zdG%MLRa&EUSpT7wT4fbkWz|qap;n83^;ub!R9n?PRkc*LwN+>JT65J~$+cX~)m_E)
zS)FxU!S!6tRapy_S-CY^Y1LS@HCT&vUH^1f`}J1|R$PY_Uy~J32X<Atby`jJSp$|<
z+jU{P)m=@Y8opFa$CMoCK?2rb9^ye{<6$1w03RB_HR3`a>cKbrjvhl67{-r(8l=Hr
z9d<-<RbgLY75sD+b{0^3_D_NKSBU`^n!p4YbrylqH##cWX2cwx7HXf?Q9soL&Hxug
zRcLjVXMZ+TeU?>yHfsmfYx{I;z1D2E7HEZbY{gb*ch+sSmTQAHYu^@b>$Yv#Hf+fj
zY~MC*@78YrwKi<mHfZ(sZwnWHa0Pd8+4gJWHg5BFaS?Y~)fRH$c5T5{Zre6(=hklB
zR&dXjYdg1W(Kd7kcW(jrXDv5z#ny8d*KoI%YztR%VHa`f7IIfNTKl$Z3HNn1mvI|c
zcYT$2XZKLOG#Em$`o;krYUIQs-~b+g0sO%=0Du4pL?7&LWBVmI=hPa1s$m+OK~#5F
zbn}*T@3v=Gp&7~-ebX0x&sTlZw;7rN7arjT8Xz6?tRmGxq$=#fu8pPEfgBp(1{^^d
zh=CZ|*MQCUfD>4O7Z`yV*n!>mfg@Og)t7x0xPsR=f-iW2Cs>0SxPuq?gCTf>N0@|5
z*o03Qh5u7ng-5u8&o_jBS6G8%xO`uDgK5}+Lzo)eOV#*G6zwS;@&S6I_s;-^K<qD{
z;wwJZp*={k8n~BcSGZ@Hff+QI8KQU<s<;`dxQf+Rim}**iJ=*s!4@E4ep4)IZLwp3
z(e;pM0B*nxY{41c*M%FnXXW^UyI6|nn0;M%is^WY|JZ!r*Nf+WIFS4JeaqL7w|IT0
zIE(ApjuY9DXE>5E*p3m{g%4Sf`<Rih*nAaPlB1ZAv3Q0nS&IQ#jyw634;g_KIg49Z
ziUYZk580D3nSJy4k{h^{Te*u>S(FLciW}IFQ+bc&_>Wt8l`Hvt4VjbImxdp?lQ$We
zqj{76dpVN@*_tbVd4|2%lk3=$5&4p9*_vT_i^o}&M>(4vIhh$bmS>rllUbPEIhXNR
zlP}qn6}gW&IheUQnXkB<PuZ3=8J)Wsi#Iuv3E7S-c#y@JiuKr>`FN03IhO<3l6#pM
z#_<}^Mb+@Bzs3|D$RQo>frvwz0HzlJjM#XwH#mbKX18~L8l(Z65t@<}8k4uU8J1xg
znBk`78K?2srf)i@dD^CVnx~z?6l|dpY`_F003Rek7Nspl%mE+p!31!i5o}?AyZEQ2
zIH-YIr{y`PdpfJX`lqwnt9QDqwHl{=`mD_wt%drm!&<1nTBqfDuH71|?|Q7enyk}$
zt^d7xuiu(~t?N3l^%|@HTB{8kuiJX9)f%w-TCo?qt`WPe1>3O)d$KE=ugiL`8T+my
z`>)~Jui^T$=X$FjyR!-Vu;rSr6??TW+p#6Puk#wUK^wDIo2+G9r>7x@*P}h+AvEww
zqx}nc(LtjSU?29u1n|LRms`1&TLKyY3LJoC^NH_&?8zM3WBj-`8faUrX&bWDdKpS#
zr^Oqrd)lncyQfRRrp3FkcX}CWdKsKy8hAk(N+BBVTN<K)7FI#NZ5qCPI;ht>ya7C|
zzgoSSp%f1Mrqw&Z1G~J_d%@kCyal|#!5XN+I>F6*yfb{kgL=XX9H`mb!^<1N4?C^@
z8C=7E%bUE_`@qTjym1=ErMSfr9K9KQ#w$FhMSQ(+e89oG$1^*^6P(8bJit->!Bcyy
zG2EvaoW~VB$9vq!(L216T*1*Az+GI*0i4K_e8@4J$6;K+lf1iEoWPAd%EcVaEqubI
z9L0^i!fCw7`MSpkT+X+AypJ5jwH(Z?oXHP=+{E4cu+O}`Aw0bkoW)^W#sU4o$J)t@
zJj&1e$Ccd5=iJ2Ud>M4R8mwVG*drK7mLbUjxT$-%JK9p$AQIXG)!9SUQN2CX03TrJ
zp89LJuRCU|ff}~^!P}hDBfP?~yugzo8A<`z4c*X#-Pngc*oj@)hn?7&-O$6^#{ZLl
zz1fpt8K}M4uU*-*ec2Nn*ri?Cuf5x~{oA#j+>3qNvpw0zz1)SJ+r6FJvHjfPz1Y9q
z-Gx2f*&W~2ecYYh+|ga##U0(>J>UDi-SOSr_ub$FzTEl!*a!aI>D}P@9pLpn;u9X(
z|J~vVp4^RH;p2Vd)BWG;-QF>N+P9s5;+>u3*`4F<-Q)#c<If%8U0&lsUgk0W-j`k9
z!+qroUf_3L<vZTyl|34uff}fxi8X!GTYVl>EYv$%qsT!GR61s_Vf?7x>aU(XB7wx3
z{+{4LrHS(yXu;tTUgAxj<hvc@!+qXIp6BnK<7wXQSsvxl{^HfX?&-en<-Xy6|M4E}
zRo?FVzTE+T<d2>3-yZMho$mub@9BQ<4WI8B-|Y|o@9#eDb-wQlAKB$z@zdVhE#L1Y
zzws?!^bddV3!d5GUhf}&?N^`hH(&8fU+q&L^)LVQFJJZpf7)q(<vri$dH?h0e)s9U
z@LfOnDWCBpe&=<6_8njOAD;Ptm4EcvzVo4<^V#0-O~3V7{`EIM*lB?lo}uV79qBne
z80KNczOx<h;ncIciH*MK-#`4VLDPdl4M+^s?`g%>K&7`g>;WQKvQh#CmLh1dpu&L)
z4;FNI5FxE(6DbNjM$wqXiL{Q<`bhDZNMs#p8RO{jqe_n=IVQunlHx~ym?{4+%1n8Z
zqsxjaLr$dmk|Hu>7CjaO*;8Q7WXLoc!?;l;vXm8l`kblIr9hA^w>CXVlonBpCtrRf
z`ID(dk|DPW4BPS;+M_IYj<r}2YRRoTO|F#7R%XwnHiK3bX?A4CWEzvwlnT>qMWZkE
zYAi?+aY&dZ^-2|+*=I|CtvO3B?aEiG<i$b-Pn^1$WmniB4gbY^mbEdoe$(3g*?6bS
ztx%7hq{`R0Tc%|>S6$fFC1s{ac~%eWcB<Bna<R7FXm+?*@RJ2&<XzNzYQ{P*f@Irs
zH{a|tn-Z<OoPF5bC3!~WsGVA?JwpvO)mUTAHP-+GOfd7nLryz?<g5{l;DXguL(MZM
z8S{)vv}pLDfeWg!MmrL=1J69c0Ao!x&p?9`Zm*?BTvq1AR99&p_UDm~{q5)@kW}qR
z<d4P`i6oJu{TO7DP9j-akG-wrVUs^znWU8;J%waSRA~w2lUtS~<%T(J_#~J~4mqZo
z9v<0WkUp}Rrb=pPhRI}?RPIQpo_5ZpBUOUdDQ1<0iuvW3d`{`4l{%t1)0>9&*r%e0
z9=E5Mjw)%WpG7wLWu;HTndPFNiaJw=As)COgTcHC%rz}iV@$2w5os%d2fF&|s|vRG
PjH-7siqohn1q1*)gl~Q3

diff --git a/doc/html/slurm_ug_2010/agenda.htm b/doc/html/slurm_ug_2010/agenda.htm
index bcf45092c..c899e4b3c 100644
--- a/doc/html/slurm_ug_2010/agenda.htm
+++ b/doc/html/slurm_ug_2010/agenda.htm
@@ -129,7 +129,7 @@
       <div id="right"><span class="ucrlnum">13 September 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_2010/contact.htm b/doc/html/slurm_ug_2010/contact.htm
index 9a3e0781f..5620bb6ae 100644
--- a/doc/html/slurm_ug_2010/contact.htm
+++ b/doc/html/slurm_ug_2010/contact.htm
@@ -52,7 +52,7 @@
       <div id="right"><span class="ucrlnum">21 June 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_2010/directions.htm b/doc/html/slurm_ug_2010/directions.htm
index 88108716b..9129405d8 100644
--- a/doc/html/slurm_ug_2010/directions.htm
+++ b/doc/html/slurm_ug_2010/directions.htm
@@ -74,7 +74,7 @@
       <div id="right"><span class="ucrlnum">21 June 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_2010/index.htm b/doc/html/slurm_ug_2010/index.htm
index 01b0b9588..7725a5698 100644
--- a/doc/html/slurm_ug_2010/index.htm
+++ b/doc/html/slurm_ug_2010/index.htm
@@ -38,7 +38,7 @@
       </p>
       <p>This international event is opened to everyone who wants to :
 	<ul>
-	  <li>Learn more about <a href="https://computing.llnl.gov/linux/slurm/" target="_blank">SLURM</a>, 
+	  <li>Learn more about <a href="http://www.schedmd.com/slurmdocs/" target="_blank">SLURM</a>, 
 	    a highly scalable Resource Manager</li>
 	  <li>Share their knowledge and experience with other users and administrators</li>
 	  <li>Get detailed informations about the latest features and developments</li>
@@ -63,7 +63,7 @@
       <div id="right"><span class="ucrlnum">21 June 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_2010/registration.htm b/doc/html/slurm_ug_2010/registration.htm
index 123146b09..01cf03a23 100644
--- a/doc/html/slurm_ug_2010/registration.htm
+++ b/doc/html/slurm_ug_2010/registration.htm
@@ -91,7 +91,7 @@
       <div id="right"><span class="ucrlnum">21 June 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_2010/submission.htm b/doc/html/slurm_ug_2010/submission.htm
index bda1cd0b5..30ac9315c 100644
--- a/doc/html/slurm_ug_2010/submission.htm
+++ b/doc/html/slurm_ug_2010/submission.htm
@@ -112,7 +112,7 @@
       <div id="right"><span class="ucrlnum">21 June 2010&nbsp;&nbsp;</span></div>
     </div>
     <div id="footer2">
-      <div id="left2"><img src="https://computing.llnl.gov/linux/slurm/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
+      <div id="left2"><img src="http://www.schedmd.com/slurmdocs/sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div>
       <div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br />
         <span class="smalltextblue">7000 East Avenue &#8226; Livermore, CA 94550</span></div>
       <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span>
diff --git a/doc/html/slurm_ug_agenda.shtml b/doc/html/slurm_ug_agenda.shtml
new file mode 100644
index 000000000..8546a0f29
--- /dev/null
+++ b/doc/html/slurm_ug_agenda.shtml
@@ -0,0 +1,462 @@
+<!--#include virtual="header.txt"-->
+
+      <a href="http:///www.bull.com" target="_blank"><img src="bull.jpg" style="float: right;" border="0"></a></p>
+
+      <h1>Slurm User Group Meeting 2011</h1>
+
+      <p>Hosted by  <a href="http:///www.bull.com">Bull</a>
+
+      <h1>Agenda</h1>
+
+      <p>
+	The 2011 SLURM User Group Meeting will be held on September 22 and 23
+	in Phoenix, Arizona and will be hosted by Bull.
+	On September 22 there will be two parallel tracks of tutorials meeting in separate rooms.
+	One set of tutorials will be for users and the other will be for system adminitrators.
+	There will be a series of technical presentations on September 23.
+	The <a href="#schedule">Schedule</a>  amd <a href="#abstracts">Abstracts</a>
+	are shown below.
+      </p>
+
+      <h2>Hotel Information</h2>
+	<p>The meeting will be held at
+	<a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/index.do">Embassy Suites Phoenix - North</a>
+	2577 West Greenway Road, Phoenix, Arizona, USA (Phone: 1-602-375-1777 Fax: 1-602-375-4012).
+	You may book your reservations on line at
+	<a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/index.do">Embassy Suites Phoenix - North</a><p></p>
+
+	<p>Please reference Bull when making your reservations to recieve a $79/room rate.</p>
+
+      <h2>Directions and Transportation</h2>
+	<p>From Phoenix Sky Harbor Airport, take I-10 west to I-17 North.
+	Follow I-17 to the Greenway Road, exit 211 approximately 15 miles.
+	Exit and turn right, 1/8th of a mile on the right is the hotel entrance.</p>
+        <p><a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/directions.do;jsessionid=DDD31DD6EFFAF2D32299955C321976F3.etc83">
+	View all directions, map, and airport information</a></p>
+
+      <h2>Contact</h2>
+	<p>If you need further informations about the event, or the
+	registration protocols, contact the
+        <a href="mailto:Nancy.Kritkausky@bull.com?subject=Informations">
+	<b>Slurm User Group 2011</b></a> organizers.<br>
+
+
+      <h2>Registration</h2>
+	<p>Please <a href="slurm_ug_registration.html">register</a> online no later
+	than August 22.</p>
+
+      <a name="schedule"><h1>Schedule</h1></a>
+
+      <h2>September 22: User Tutorials.</h2>
+
+      <table width="100%" border=1 cellspacing=0 cellpadding=0>
+	
+	<tr>
+	  <th width="15%">Time</th>
+	  <th width="15%">Theme</th>
+	  <th width="25%">Speaker</th>
+	  <th width="45%">Title</th>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">08:30 - 09:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Registration</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">09:00 - 10:30</td>
+	  <td width="15%">&nbsp;User Tutorial #1</td>
+	  <td width="25%">&nbsp;Don Albert and Rod Schultz (Bull)</td>
+	  <td width="45%">&nbsp;SLURM: Beginners Usage</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">10:30 - 11:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">11:00 - 12:30</td>
+	  <td width="15%">&nbsp;User Tutorial #2</td>
+	  <td width="25%">&nbsp;Bill Brophy, Rod Schultz, Yiannis Georgiou (Bull)</td>
+	  <td width="45%">&nbsp;SLURM: Advanced Usage Usage</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">12:30 - 14:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Lunch at conference center</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">14:00 - 15:30</td>
+	  <td width="15%">&nbsp;User Tutorial #3</td>
+	  <td width="25%">&nbsp;Martin Perry and Yiannis Georgiou (Bull)</td>
+	  <td width="45%">&nbsp;Resource Management for multicore/multi-threaded usage</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">15:30 - 16:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+	
+	<tr>
+	  <td width="15%">16:00 - 17:00</td>
+	  <td width="15%">&nbsp;Question and Answer</td>
+	  <td width="25%">&nbsp;Danny Auble and Morris Jette (SchedMD)</td>
+	  <td width="45%">&nbsp;Get your questions answered by the developers</td>
+	</tr>
+
+      </table>
+
+      <h2>September 22: System Adminitrator Tutorials.</h2>
+
+      <table width="100%" border=1 cellspacing=0 cellpadding=0>
+	
+	<tr>
+	  <th width="15%">Time</th>
+	  <th width="15%">Theme</th>
+	  <th width="25%">Speaker</th>
+	  <th width="45%">Title</th>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">08:30 - 09:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Registration</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">09:00 - 10:30</td>
+	  <td width="15%">&nbsp;Admin Tutorial #1</td>
+	  <td width="25%">&nbsp;David Egolf and Bill Brophy (Bull)</td>
+	  <td width="45%">&nbsp;SLURM High Availability</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">10:30 - 11:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">11:00 - 12:30</td>
+	  <td width="15%">&nbsp;Admin Tutorial #2</td>
+	  <td width="25%">&nbsp;Dan Rusak (Bull)</td>
+	  <td width="45%">&nbsp;Power Management / sview</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">12:30 - 14:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Lunch at conference center</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">14:00 - 15:30</td>
+	  <td width="15%">&nbsp;Admin Tutorial #3</td>
+	  <td width="25%">&nbsp;Don Albert and Rod Schultz (Bull)</td>
+	  <td width="45%">&nbsp;Accounting, limits and Priorities configurations</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">15:30 - 16:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+	
+	<tr>
+	  <td width="15%">16:00 - 17:30</td>
+	  <td width="15%">&nbsp;Admin Tutorial #4</td>
+	  <td width="25%">&nbsp;Matthieu Hautreux (CEA), Yiannis Georgiou and Martin Perry (Bull)</td>
+	  <td width="45%">&nbsp;Scalability, Scheduling and Task placement</td>
+	</tr>
+
+      </table>
+
+      <h2>September 23: Technical Session</h2>
+
+      <table width="100%" border=1 cellspacing=0 cellpadding=0>
+	
+	<tr>
+	  <th width="15%">Time</th>
+	  <th width="15%">Theme</th>
+	  <th width="25%">Speaker</th>
+	  <th width="45%">Title</th>
+	</tr>
+	
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">08:30 - 09:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Registration</td>
+	</tr>
+	
+	<tr>
+	  <td width="15%" rowspan="4">09:00 - 10:40</td>
+	  <td width="85%" colspan="3">&nbsp;Welcome</td>
+	</tr>
+	
+	<tr>
+	  <td width="15%">&nbsp;Keynote</td>
+	  <td width="25%">&nbsp;William Kramer (NCSA)</td>
+	  <td width="45%">&nbsp;Challenges and Opportunities for Exscale Resource Management and how Today's Petascale Systems are Guiding the Way</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #1</td>
+	  <td width="25%">&nbsp;Matthieu Hautreux (CEA)</td>
+	  <td width="45%">&nbsp;SLURM at CEA</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #2</td>
+	  <td width="25%">&nbsp;Don Lipari (LLNL)</td>
+	  <td width="45%">&nbsp;LLNL site report</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">10:40 - 11:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+	
+	<tr>
+	  <td width="15%" rowspan="3">11:00 - 12:30</td>
+	  <td width="15%">&nbsp;Session #3</td>
+	  <td width="25%">&nbsp;Alejandro Lucero Palau (BSC)</td>
+	  <td width="45%">&nbsp;SLURM Simulator</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #4</td>
+	  <td width="25%">&nbsp;Danny Auble (SchedMD)</td>
+	  <td width="45%">&nbsp;SLURM operation on IBM BlueGene/Q</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #5</td>
+	  <td width="25%">&nbsp;Morris Jette (SchedMD)</td>
+	  <td width="45%">&nbsp;SLURM operation on Cray XT and XE</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">12:30 - 14:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Lunch at conference center</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" rowspan="3">14:00 - 15:30</td>
+	  <td width="15%">&nbsp;Session #6</td>
+	  <td width="25%">&nbsp;Mariusz Mamo&#324;ski (Pozna&#324; University)</td>
+	  <td width="45%">&nbsp;Introduction to SLURM DRMAA</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #7</td>
+	  <td width="25%">&nbsp;Robert Stober, Sr. (Bright Computing)</td>
+	  <td width="45%">&nbsp;Bright Cluster Manager & SLURM: Benefits of Seamless Integration</td>
+	</tr>
+	<tr>
+	  <td width="15%">&nbsp;Session #8</td>
+	  <td width="25%">&nbsp;Morris Jette (SchedMD)</td>
+	  <td width="45%">&nbsp;Proposed Design for Job Step Management in User Space</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" bgcolor="#F0F1C9">15:30 - 16:00</td>
+	  <td width="85%" colspan="3" bgcolor="#F0F1C9">&nbsp;Coffee break</td>
+	</tr>
+
+	<tr>
+	  <td width="15%" rowspan="3">16:00 - 17:30</td>
+	  <td width="15%">&nbsp;Session #9</td>
+	  <td width="25%">&nbsp;Don Lipari (LLNL)</td>
+	  <td width="45%">&nbsp;Proposed Design for Enhanced Enterprise-wide Scheduling</td>
+	</tr>
+
+	<tr>
+	  <td width="15%">&nbsp;Session #10</td>
+	  <td width="25%">&nbsp;Danny Auble and Morris Jette (SchedMD)</td>
+	  <td width="45%">&nbsp;SLURM Version 2.3 and plans for future releases</td>
+	</tr>
+
+	<tr>
+	  <td width="85%" colspan="3">&nbsp;Open discussion, feature requests, etc.</td>
+	</tr>
+
+      </table>
+
+      <br><br>
+      <a name="abstracts"><h1>Abstracts</h1></a>
+
+	<h2>User Tutorial #1</h2>
+	SLURM Beginners Usage<br>
+	Don Albert and Rod Schultz (Bull)
+	<ul>
+	<li>Simple use of commands (submission/monitoring/result collection)</li>
+	<li>Reservations</li>
+	<li>Use of accounting and reporting</li>
+	<li>Scheduling techniques for smaller response time (setting of walltime for backfill , etc)</li>
+	</ul>
+
+	<h2>User Tutorial #2</h2>
+	SLURM Advanced Usage<br>
+	Bill Brophy, Rod Schultz, Yiannis Georgiou (Bull)
+	<ul>
+	<li>MPI jobs</li>
+	<li>Checkpoint/Restart (BLCR or application level)</li>
+	<li>Preemption  / Gang Scheduling Usage</li>
+	<li>Dynamic allocations (growing/shrinking)</li>
+	<li>Grace Time Delay with Preemption</li>
+	</ul>
+
+	<h2>User Tutorial #3</h2>
+	Resource Management for multicore/multi-threaded usage<br>
+	Martin Perry and Yiannis Georgiou (Bull)
+	<ul>
+	<li>CPU allocation</li>
+	<li>CPU/tasks distribution</li>
+	<li>Task binding</li>
+	<li>Internals of the allocation procedures</li>
+	</ul>
+
+
+	<h2>Administrator Tutorial #1</h2>
+	SLURM High Availability<br>
+	David Egolf and Bill Brophy (Bull)
+	<ul>
+	<li>How to set up the High Availability SLURM</li>
+	<li>Event logging with striggers</li>
+	</ul>
+
+	<h2>Administrator Tutorial #2</h2>
+	Power Management / Sview<br>
+	Dan Rusak (Bull)
+	<ul>
+	<li>Power Management configuration</li>
+	<li>sview presentation</li>
+	</ul>
+
+	<h2>Administrator Tutorial #3</h2>
+	Accounting, limits and Priorities configurations<br>
+	Don Albert and Rod Schultz (Bull)
+	<ul>
+	<li>Accounting with slurmdbd configuration</li>
+	<li>Multifactor job priorities with examples considering all different factors</li>
+	<li>QOS configuration</li>
+	<li>Fairsharing setting</li>
+	</ul>
+
+	<h2>Administrator Tutorial #4</h2>
+	Scalability, Scheduling and Task placement<br>
+	Matthieu Hautreux (CEA), Yiannis Georgiou and Martin Perry (Bull)
+	<ul>
+	<li>High Throughput Computing</li>
+	<li>Topology constraints config</li>
+	<li>Generic Resources and GPUs config</li>
+	<li>Task Placement with Cgroups</li>
+	</ul>
+
+	<h2> Keynote Speaker</h2>
+	Challenges and Opportunities for Exscale Resource Management and how
+	Today's Petascale Systems are Guiding the Way<br>
+	William Kramer (NCSA)<br><br>
+	Resource management challenges currently experienced on the Blue Waters
+	computer will be described. These experiences will be extended to describe
+	the additional challenges faced in exascale and trans-petascale systems.
+
+	<h2>Session #1</h2>
+	CEA Site report<br>
+	Matthieu Hautreux (CEA)<br><br>
+	Evolutions and feedback from Tera100. SLURM on Curie, the PRACE second Tier-0 
+	system that is planned to be installed by the end of the year in a new facility 
+	hosted at CEA. Curie will be a 1.6 Petaflop system from Bull.
+
+	<h2>Session #2</h2>
+	LLNL site report<br>
+	Don Lipari (LLNL)<br><br>
+	Don Lipari will provide an overview of the batch scheduling systems in use 
+	at LLNL and an overview on how they are managed.
+
+	<h2>Session #3</h2>
+	SLURM Simulator<br>
+	Alejandro Lucero Palau (BSC)<br><br>
+	Batch scheduling for high performance cluster installations has two main goals:
+	1) to keep the whole machine working at full capacity at all times, and 
+	2) to respect priorities avoiding lower priority jobs jeopardizing higher 
+	priority ones. Usually, batch schedulers allow different policies with 
+	several variables to be tuned by policy. Other features like special job
+	requests, reservations or job preemption increase the complexity for achiev-
+	ing a fine-tuned algorithm. A local decision for a specific job can change
+	the full scheduling for a high number of jobs and what can be thought
+	as logical within a short term could make no sense for a long trace mea-
+	sured in weeks or months. Although it is possible to extract algorithms
+	from batch scheduling software to make simulations of large job traces,
+	this is not the ideal approach since scheduling is not an isolated part of
+	this type of tools and replicating same environment requires an important
+	effort plus a high maintenance cost. We present a method for obtaining a
+	special mode of operation for a real production-ready scheduling software,
+	SLURM, where we can simulate execution of real job traces to evaluate
+	impact of scheduling policies and policy tuning.
+
+	<h2>Session #4</h2>
+	SLURM Operation on IBM BlueGene/Q<br>
+	Danny Auble (SchedMD)<br><br>
+	SLURM version 2.3 supports IBM BlueGene/Q. This presentation will report the 
+	design and operation of SLURM with respect to BlueGene/Q systems.
+
+	<h2>Session #5</h2>
+	SLURM Operation on Cray XT and XE systems<br>
+	Morris Jette (SchedMD)<br><br>
+	SLURM version 2.3 supports Cray XT and XE systems running over Cray's ALPS
+	(Application Level Placement Scheduler) resource manager. This presentation 
+	will discuss the design and operation of SLURM with respect to Cray systems.
+
+	<h2>Session #6</h2>
+	Introduction to SLURM DRMAA<br>
+	Mariusz Mamo&#324;ski (Pozna&#324; University)<br><br>
+	DRMAA or Distributed Resource Management Application API is a high-level
+	Open Grid Forum API specification for the submission and control of jobs
+	in a Grid architecture.
+
+	<h2>Session #7</h2>
+	Bright Cluster Manager & SLURM: Benefits of Seamless Integration<br>
+	Robert Stober, Sr. (Bright Computing)<br><br>
+	Bright Cluster Manager, tightly integrated with SLURM, simplifies HPC
+	cluster installation and management while boosting system throughput. Bright
+	automatically installs, configures and deploys SLURM so that clusters are
+	ready to use in minutes rather than days. Bright provides extensive and
+	extensible monitoring and management through its intuitive Bright Cluster
+	Manager GUI, powerful cluster management shell, and customizable web-based
+	user portal.
+	Additional integration benefits include sampling, analysis and visualization
+	of all key SLURM metrics from within the Bright GUI, automatic head node
+	failover, and extensive pre-job health checking capability. Regarding the
+	latter, say good-bye to the black hole node syndrome: Bright plus SLURM
+	effectively prevent this productivity-killing problem by identifying and
+	sidelining problematic nodes before the job is run.
+
+	<h2>Session #8</h2>
+	Proposed Design for Job Step Management in User Space<br>
+	Morris Jette (SchedMD)<br><br>
+	SLURM currently creates and manages job steps using SLURM's control daemon,
+	slurmctld. Since some user jobs create thousands of job steps, the management 
+	of those job steps accounts for most of slurmctld's work. It is possible to 
+	move job step management from slurmctld into user space to improve SLURM 
+	scalability and performance. A possible implementation of this will be 
+	presented.
+
+	<h2>Session #9</h2>
+	Proposed Design for Enhanced Enterprise-wide Scheduling<br>
+	Don Lipari (LLNL)<br><br>
+	SLURM currently supports the ability to submit and status jobs between
+	computers at site, however the current design has some limitations. When a job
+	is submitted with several possible computers usable for its execution, the
+	job is routed to the computer on which it is expected to start earliest.
+	Changes in the workload or system failures could make moving the job to another
+	computer result in faster initiation, but that is currently impossible. SLURM
+	is also unable to support dependencies between jobs executing on different
+	computers. The design of a SLURM meta-scheduler with enhanced enterprise-wide
+	scheduling capabilities will be presented.
+
+	<h2>Session #10</h2>
+	Contents of SLURM Version 2.3 and plans for future releases<br>
+	Danny Auble and Morris Jette (SchedMD)<br><br>
+	An overview of the changes SLURM Version 2.3 will be presented along with 
+	current plans for future releases.
+
+	<h2>Open Discussion</h2>
+	All meeting attendees will be invited to provide input with respect to 
+	SLURM's design and development work.
+	We also invite proposals for hosting the SLURM User Group Meeting in 2012.
+
+<!--#include virtual="footer.txt"-->
+
diff --git a/doc/html/slurm_ug_cfp.shtml b/doc/html/slurm_ug_cfp.shtml
new file mode 100644
index 000000000..3fa75ef12
--- /dev/null
+++ b/doc/html/slurm_ug_cfp.shtml
@@ -0,0 +1,41 @@
+<!--#include virtual="header.txt"-->
+
+<h1>CALL  FOR  SUBMISSIONS</h1>
+<p>SLURM User Group Meeting 2011<br>
+September 22-23, 2011<br>
+Phoenix, Arizona, USA</p>
+
+<p>You are invited to submit an abstract of a presentation or tutorial
+to be given at the SLURM User Group Meeting 2011. This event will be
+sponsored and organized by <a href="http://www.bull.com">Bull</a> and
+will be held in Phoenix, Arizona, USA on September 22 and 23, 2011.</p>
+
+<p>This international event is opened to everyone who wants to:
+<ul>
+<li>Learn more about <a href="http://www.schedmd.com/slurmdocs/">
+SLURM</a>, a highly scalable Resource Manager and Job Scheduler</li>
+<li>Share their knowledge and experience with other users and administrators</li>
+<li>Get detailed informations about the latest features and developments</li>
+<li>Share requirements and discuss future developments</li>
+</ul></p>
+
+<p>Everyone who wants to present their own usage, developments, site report,
+or tutorial about SLURM is invited to send a short abstract including
+the presentation's expected duration to
+<a href="mailto:slurm_user_group@lists.llnl.gov">slurm_user_group@lists.llnl.gov</a>.</p>
+
+<p>IMPORTANT DATES:<br>
+April 30, 2011:      Submission of abstracts<br>
+May 31, 2011:        Notification of acceptance<br>
+September 22, 2011:  SLURM User Group Meeting 2011 (tutorials)<br>
+September 23, 2011:  SLURM User Group Meeting 2011<br>
+</p>
+
+<p>Program Committee:<br>
+Danny Auble (Lawrence Livermore National Laboratory)<br>
+Yiannis Georgiou (Bull)<br>
+Matthieu Hautreux (CEA, French Atomic and Alternative Energies Commission)<br>
+Morris Jette (Lawrence Livermore National Laboratory)<br>
+Nancy Kritkausky (Bull)</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurm_ug_registration.shtml b/doc/html/slurm_ug_registration.shtml
new file mode 100644
index 000000000..339cfacc9
--- /dev/null
+++ b/doc/html/slurm_ug_registration.shtml
@@ -0,0 +1,111 @@
+<!--#include virtual="header.txt"-->
+
+      <a href="http:///www.bull.com" target="_blank"><img src="bull.jpg" style="float: right;" border="0"></a></p>
+
+      <h1>Slurm User Group Meeting 2011</h1>
+
+      <p>Hosted by  <a href="http:///www.bull.com">Bull</a>
+
+      <h1>Registration</h1>
+      <p>The conference cost is $250 per person that can be paid on the morning
+	of the conference. This includes presentations, tutorials and lunch on
+	both days.</p>
+
+      <h2>Hotel Information</h2>
+	<p>The meeting will be held at
+	<a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/index.do">Embassy Suites Phoenix - North</a>
+	2577 West Greenway Road, Phoenix, Arizona, USA (Phone: 1-602-375-1777 Fax: 1-602-375-4012).
+	You may book your reservations on line at
+	<a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/index.do">Embassy Suites Phoenix - North</a><p></p>
+
+	<p>Please reference Bull when making your reservations to recieve a $79/room rate.</p>
+
+      <h2>Directions and Transportation</h2>
+	<p>From Phoenix Sky Harbor Airport, take I-10 west to I-17 North.
+	Follow I-17 to the Greenway Road, exit 211 approximately 15 miles.
+	Exit and turn right, 1/8th of a mile on the right is the hotel entrance.</p>
+        <p><a href="http://embassysuites1.hilton.com/en_US/es/hotel/PHXNOES-Embassy-Suites-Phoenix-North-Arizona/directions.do;jsessionid=DDD31DD6EFFAF2D32299955C321976F3.etc83">
+	View all directions, map, and airport information</a></p>
+
+      <h2>Contact</h2>
+	<p>If you need further informations about the event, or the
+	registration protocols, contact the
+        <a href="mailto:Nancy.Kritkausky@bull.com?subject=Informations">
+	<b>Slurm User Group 2011</b></a> organizers.<br>
+
+      <h2>Agenda</h2>
+	<p>A preliminary <a href="slurm_ug_agenda.html">agenda</a> is available online.
+
+      <h2>Attendee Information</h2>
+	<p>Please fill in the following form to register for the <b>Slurm User Group 2011</b>.
+	You should receive a confirmation of this initial stage within in a few days and
+	final confirmation by <b>August 26, 2011</b>.</p>
+
+      <FORM METHOD=POST ENCTYPE="text/plain" ACTION="mailto:slurm_user_group@lists.llnl.gov?subject=SLURM User Group Registration">
+	<PRE>
+	  <table width="100%" border=0 cellspacing=0 cellpadding=0>
+	    <tr>
+	      <td width="10%">First Name</td>
+	      <td width="80%"><INPUT NAME=FirstName size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Last Name</td>
+	      <td width="80%"><INPUT NAME=LastName size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Title</td>
+	      <td width="80%"><INPUT NAME=Title size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Email</td>
+	      <td width="80%"><INPUT NAME=Email size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Company/Organization</td>
+	      <td width="80%"><INPUT NAME=Company size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Street</td>
+	      <td width="80%"><INPUT NAME=Street size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">City</td>
+	      <td width="80%"><INPUT NAME=City size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">State/Province</td>
+	      <td width="80%"><INPUT NAME=State size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Postal Code</td>
+	      <td width="80%"><INPUT NAME=ZipCode size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Country</td>
+	      <td width="80%"><INPUT NAME=Country size=30></td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Tutorial Attendance</td>
+	      <td width="10%"><INPUT TYPE="radio" NAME="Tutorial" VALUE="Administrator">Administrator</td>
+	    </tr>
+	    <tr>
+	      <td width="10%"> </td>
+	      <td width="10%"><INPUT TYPE="radio" NAME="Tutorial" VALUE="User">User</td>
+	    </tr>
+	    <tr>
+	      <td width="10%"> </td>
+	      <td width="10%"><INPUT TYPE="radio" NAME="Tutorial" VALUE="None">None</td>
+	    </tr>
+	    <tr>
+	      <td width="10%">Comments</td>
+	      <td width="80%"><TEXTAREA NAME=Comments rows=5 cols=40></TEXTAREA></td>
+	    </tr>
+	    <tr>
+	      <td width="30%"><INPUT TYPE=SUBMIT VALUE=Submit></td>
+	      <td width="30%"><INPUT TYPE=RESET VALUE=Clear></td>
+	    </tr>
+	  </table>
+	</PRE>
+      </FORM>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/sun_const.shtml b/doc/html/sun_const.shtml
index 25830b50b..9edcf7a9f 100644
--- a/doc/html/sun_const.shtml
+++ b/doc/html/sun_const.shtml
@@ -113,7 +113,7 @@ Two examples of SLURM configuration files are shown below:</p>
 # Configuration parameters removed here
 
 # Automatic orders nodes following a Hilbert curve
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=DEFAULT CPUs=8 RealMemory=2048 State=Unknown
 NodeName=tux[0000x3337]
 PartitionName=debug Nodes=tux[0000x3337] Default=Yes State=UP
 </pre>
@@ -125,7 +125,7 @@ PartitionName=debug Nodes=tux[0000x3337] Default=Yes State=UP
 # Configuration parameters removed here
 
 # Manual ordering of nodes following a space-filling curve
-NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=DEFAULT CPUs=8 RealMemory=2048 State=Unknown
 NodeName=tux[0000-0007]  #  8 nodes at 0,0,0
 NodeName=tux[0010-001B]  # 12 nodes at 0,0,1
 NodeName=tux[0100-0107]  #  8 nodes at 0,1,0
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index de4735ac4..e727c02db 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -21,6 +21,7 @@ The actual mechanism used to task binding is dependent upon the available
 infrastructure as determined by the "configure" program when SLURM is built
 and the value of the <b>TaskPluginParam</b> as defined in the <b>slurm.conf</b>
 (SLURM configuration file).</li>
+<li><b>cgroup</b>&#151;Use Linux cgroups for binding tasks to resources.</li>
 <li><b>none</b>&#151;A plugin that implements the API without providing any
 services. This is the default behavior and provides no task binding.</li>
 </ul>
@@ -175,11 +176,24 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
+<p class="commandline">int task_post_step (slurmd_job_t *job);</p>
+<p style="margin-left:.2in"><b>Description</b>: task_post_step() is called
+after termination of all the tasks of the job step.
+Executed by the <b>slurmstepd</b> program as user root.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline">job</span>&nbsp;&nbsp;&nbsp;(input)
+pointer to the job which has terminated.
+See <b>src/slurmd/slurmstepd/slurmstepd_job.h</b> for the
+data structure definition.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful.
+On failure, the plugin should return SLURM_ERROR and set the errno to an
+appropriate value to indicate the reason for failure.</p>
+
 <h2>Versioning</h2>
-<p> This document describes version 1 of the SLURM Task Plugin API.
+<p> This document describes version 2 of the SLURM Task Plugin API.
 Future releases of SLURM may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 19 February 2009</p>
+<p style="text-align:center;">Last modified 29 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 699732e0f..4a1b1d07e 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -1,27 +1,22 @@
 <!--#include virtual="header.txt"-->
 
 <h1>SLURM Team</h1>
-<p>SLURM development has been a joint effort of
-<a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory</a> (LLNL),
-<a href="http://www.schedmd.com/">SchedMD</a>,
-<a href="http://www.hp.com/">HP</a>,
-<a href="http://www.bull.com/">Bull</a>,
-Linux NetworX and many other contributors.
-
-<p>The current SLURM development staff includes: </p>
+<p>SLURM development has been a joint effort of many companies and
+organizations. The current SLURM development staff includes: </p>
 <ul>
-<li>Morris Jette (LLNL, Project leader)</li>
-<li>Danny Auble (LLNL)</li>
-<li>Don Lipari (LLNL)</li>
+<li>Danny Auble (SchedMD)</li>
+<li>Morris Jette (SchedMD)</li>
 </ul>
 
 <p> SLURM contributers include: </p>
 <ul>
+<li>Ramiro Alba (Centre Tecnol&ograve;gic de Tranfer&egrave;ncia de Calor, Spain)</li>
 <li>Amjad Majid Ali (Colorado State University)</li>
 <li>P&auml;r Andersson (National Supercomputer Centre, Sweden)</li>
 <li>Don Albert (Bull)</li>
 <li>Ernest Artiaga (Barcelona Supercomputer Center, Spain)</li>
 <li>Susanne Balle (HP)</li>
+<li>Ralph Bean (Rochester Institute of Technology)</li>
 <li>Anton Blanchard (Samba)</li>
 <li>Janne Blomqvist (Aalto University, Finland)</li>
 <li>David Bremer (LLNL)</li>
@@ -31,6 +26,8 @@ Linux NetworX and many other contributors.
 <li>Daniel Christians (HP)</li>
 <li>Gilles Civario (Bull)</li>
 <li>Chuck Clouston (Bull)</li>
+<li>Yuri D'Elia (Center for Biomedicine, EURAC Research, Italy)</li>
+<li>Carles Fenoy (Barcelona Supercomputer Center, Spain)</li>
 <li>Joseph Donaghy (LLNL)</li>
 <li>Chris Dunlap (LLNL)</li>
 <li>Joey Ekstrom (LLNL/Bringham Young University)</li>
@@ -40,6 +37,7 @@ Linux NetworX and many other contributors.
 <li>Didier Gazen (Laboratoire d'Aerologie, France)</li>
 <li>Raphael Geissert (Debian)</li>
 <li>Yiannis Georgiou (Bull)</li>
+<li>Andriy Grytsenko (Massive Solutions Limited, Ukraine)</li>
 <li>Mark Grondona (LLNL)</li>
 <li>Takao Hatazaki (HP, Japan)</li>
 <li>Matthieu Hautreux (CEA, France)</li>
@@ -50,12 +48,13 @@ Linux NetworX and many other contributors.
 <li>Klaus Joas (University Karlsruhe, Germany)</li>
 <li>Greg Johnson (LANL)</li>
 <li>Jason King (LLNL)</li>
-<li>Aaron Knister (Environmental Protection Agency)</li>
+<li>Aaron Knister (Environmental Protection Agency, UMBC)</li>
 <li>Nancy Kritkausky (Bull)</li>
 <li>Roman Kurakin (Institute of Natural Science and Ecology, Russia)</li>
-<li>Eric Lin (Bull)</li>
 <li>Puenlap Lee (Bull)</li>
+<li>Dennis Leepow</li>
 <li>Bernard Li (Genome Sciences Centre, Canada)</li>
+<li>Eric Lin (Bull)</li>
 <li>Donald Lipari (LLNL)</li>
 <li>Steven McDougall (SiCortex)</li>
 <li>Donna Mecozzi (LLNL)</li>
@@ -66,33 +65,37 @@ Linux NetworX and many other contributors.
 <li>Bryan O'Sullivan (Pathscale)</li>
 <li>Gennaro Oliva (Institute of High Performance Computing and
     Networking, Italy)</li>
+<li>Alejandro Lucero Palau (Barcelona Supercomputer Center, Spain)</li>
 <li>Daniel Palermo (HP)</li>
 <li>Dan Phung (LLNL/Columbia University)</li>
 <li>Ashley Pittman (Quadrics, UK)</li>
 <li>Vijay Ramasubramanian (University of Maryland)</li>
 <li>Krishnakumar Ravi[KK] (HP)</li>
 <li>Petter Reinholdtsen (University of Oslo, Norway)</li>
-<li>Gerrit Renker (Swiss National Computer Centre)</li>
+<li>Gerrit Renker (Swiss National Supercomputing Centre)</li>
 <li>Andy Riebs (HP)</li>
 <li>Asier Roa (Barcelona Supercomputer Center, Spain)</li>
+<li>Andy Roosen (University of Deleware)</li>
 <li>Miguel Ros (Barcelona Supercomputer Center, Spain)</li>
 <li>Beat Rubischon (DALCO AG, Switzerland)</li>
 <li>Dan Rusak (Bull)</li>
 <li>Eygene Ryabinkin (Kurchatov Institute, Russia)</li>
 <li>Federico Sacerdoti (D.E. Shaw)</li>
 <li>Rod Schultz (Bull)</li>
-<li>Tyler Strickland</li>
+<li>Tyler Strickland (University of Florida)</li>
 <li>Jeff Squyres (LAM MPI)</li>
 <li>Prashanth Tamraparni (HP, India)</li>
 <li>Jimmy Tang (Trinity College, Ireland)</li>
 <li>Kevin Tew (LLNL/Bringham Young University)</li>
 <li>Adam Todorski (Rensselaer Polytechnic Institute)</li>
+<li>Stephen Trofinoff (Swiss National Supercomputing Centre)</li>
 <li>Nathan Weeks (Iowa State University)</li>
 <li>Tim Wickberg (Rensselaer Polytechnic Institute)</li>
+<li>Ramiro Brito Willmersdorf (Universidade Federal de Pemambuco, Brazil)</li>
 <li>Jay Windley (Linux NetworX)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 15 October 2010</p>
+<p style="text-align:center;">Last modified 24 October 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/testimonials.shtml b/doc/html/testimonials.shtml
index c839de978..e5941156b 100644
--- a/doc/html/testimonials.shtml
+++ b/doc/html/testimonials.shtml
@@ -2,24 +2,6 @@
 
 <h1>Customer Testimonials</h1>
 
-<i>
-"Today our largest IBM computers, BlueGene/L and Purple, ranked #1 and #3
-respectively on the November 2005 Top500 list, use SLURM.
-This decision reduces large job launch times from tens of minutes to seconds.
-This effectively provides
-us with millions of dollars with of additional compute resources without
-additional cost.  It also allows our computational scientists to use their
-time more effectively.  SLURM is scalable to very large numbers of processors,
-another essential ingredient for use at LLNL. This means larger computer
-systems can be used than otherwise possible with a commensurate increase in
-the scale of problems that can be solved. SLURM's scalability has eliminated
-resource management from being a concern for computers of any foreseeable
-size. It is one of the best things to happen to massively parallel computing."
-<br><br>
-Dona Crawford, Associate Directory Lawrence Livermore National Laboratory
-</i>
-<HR SIZE=4>
-
 <i>
 "Thank you for SLURM! It is one of the nicest pieces of free software
 for managing HPC clusters we have come across in a long time.
@@ -40,6 +22,24 @@ Aaron Knister, Environmental Protection Agency
 </i>
 <HR SIZE=4>
 
+<i>
+"Today our largest IBM computers, BlueGene/L and Purple, ranked #1 and #3
+respectively on the November 2005 Top500 list, use SLURM.
+This decision reduces large job launch times from tens of minutes to seconds.
+This effectively provides
+us with millions of dollars with of additional compute resources without
+additional cost.  It also allows our computational scientists to use their
+time more effectively.  SLURM is scalable to very large numbers of processors,
+another essential ingredient for use at LLNL. This means larger computer
+systems can be used than otherwise possible with a commensurate increase in
+the scale of problems that can be solved. SLURM's scalability has eliminated
+resource management from being a concern for computers of any foreseeable
+size. It is one of the best things to happen to massively parallel computing."
+<br><br>
+Dona Crawford, Associate Directory Lawrence Livermore National Laboratory
+</i>
+<HR SIZE=4>
+
 <i>
 "We are extremely pleased with SLURM and strongly recommend it to others
 because it is mature, the developers are highly responsive and
@@ -133,6 +133,6 @@ Bill Celmaster, XC Program Manager, Hewlett-Packard Company
 </i>
 <HR SIZE=4>
 
-<p style="text-align:center;">Last modified 8 September 2009</p>
+<p style="text-align:center;">Last modified 8 April 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
index 8c06e2d75..1fd746dd9 100644
--- a/doc/html/topology.shtml
+++ b/doc/html/topology.shtml
@@ -1,6 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1>Topology</h1>
+<h1>Topology Guide</h1>
 
 <p>SLURM version 2.0 can be configured to support topology-aware resource
 allocation to optimize job performance.
@@ -14,7 +14,7 @@ Jobs are allocated resources on a best-fit basis.
 For larger jobs, this minimizes the number of sets of consecutive nodes
 allocated to the job.</p>
 
-<h2>Three-dimension Topology</h2>
+<a name="topo_3d"><h2>Three-dimension Topology</h2>
 
 <p>Some larger computers rely upon a three-dimensional torus interconnect.
 The IBM BlueGene computers is one example of this which has highly
@@ -48,6 +48,14 @@ on its underlying leaf switches using a best-fit algorithm.
 Use of this logic requires a configuration setting of
 <i>TopologyPlugin=topology/tree</i>.</p>
 
+<p>Note that slurm uses a best-fit algorithm on the currently
+available resources. This may result in an allocation with
+more that the optimum number of switches. The user can request
+a maximum number of switches for the job as well as a
+maximum time willing to wait for that number using the <i>--switch</i>
+option with the salloc, sbatch and srun commands. The parameters can
+also be changed for pending jobs using the scontrol and squeue commands.</p>
+
 <p>At some point in the future SLURM code may be provided to
 gather network topology information directly.
 Now the network topology information must be included
@@ -124,6 +132,6 @@ SwitchName=s3 Nodes=tux[12-15]
 SwitchName=s4 Switches=s[0-3]
 </pre>
 
-<p style="text-align:center;">Last modified 20 August 2009</p>
+<p style="text-align:center;">Last modified 8 July 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology_plugin.shtml b/doc/html/topology_plugin.shtml
index 0cf8ce1a7..3cd51fd58 100644
--- a/doc/html/topology_plugin.shtml
+++ b/doc/html/topology_plugin.shtml
@@ -7,7 +7,7 @@
 defines them.
 It is intended as a resource to programmers wishing to write their own
 SLURM topology plugin.
-This is version 100 of the API.</p>
+This is version 101 of the API.</p>
 
 <p>SLURM topology plugins are SLURM plugins that implement
 convey system topology information so that SLURM is able to
@@ -64,6 +64,12 @@ Functions which are not implemented should be stubbed.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or
 SLURM_ERROR on failure.</p>
 
+<p class="commandline">bool topo_generate_node_ranking(void)</p>
+<p style="margin-left:.2in"><b>Description</b>: Determine if this plugin will
+reorder the node records based upon each job's node rank field.</p>
+<p style="margin-left:.2in"><b>Returns</b>: true if node reording is supported,
+false otherwise.</p>
+
 <p class="commandline">int topo_get_node_addr(char* node_name, char** paddr, char** ppatt);</p>
 <p style="margin-left:.2in"><b>Description</b>: Get Topology address of a given node.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
@@ -80,10 +86,10 @@ in the hierarchy is separated by a period. The final element will always be
 SLURM_ERROR on failure.</p>
 
 <h2>Versioning</h2>
-<p> This document describes version 100 of the SLURM topology API.
+<p> This document describes version 101 of the SLURM topology API.
 Future releases of SLURM may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 27 August 2009</p>
+<p style="text-align:center;">Last modified 13 January 2011</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index b060ae610..b1e23bcce 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -53,6 +53,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_delete_partition.3 \
 	man3/slurm_delete_reservation.3 \
 	man3/slurm_free_ctl_conf.3 \
+	man3/slurm_free_front_end_info_msg.3 \
 	man3/slurm_free_job_info_msg.3 \
 	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
@@ -72,6 +73,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
 	man3/slurm_get_triggers.3 \
+	man3/slurm_init_update_front_end_msg.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_init_resv_desc_msg.3 \
@@ -88,6 +90,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_kill_job.3 \
 	man3/slurm_kill_job_step.3 \
 	man3/slurm_load_ctl_conf.3 \
+	man3/slurm_load_front_end.3 \
 	man3/slurm_load_job.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
@@ -99,6 +102,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_pid2jobid.3 \
 	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
+	man3/slurm_print_front_end_info_msg.3 \
+	man3/slurm_print_front_end_table.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
 	man3/slurm_print_job_step_info.3 \
@@ -120,6 +125,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_signal_job.3 \
 	man3/slurm_signal_job_step.3 \
 	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_front_end_table.3 \
 	man3/slurm_sprint_job_info.3 \
 	man3/slurm_sprint_job_step_info.3 \
 	man3/slurm_sprint_node_table.3 \
@@ -142,6 +148,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_takeover.3 \
 	man3/slurm_terminate_job.3 \
 	man3/slurm_terminate_job_step.3 \
+	man3/slurm_update_front_end.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3 \
@@ -150,6 +157,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 
 man5_MANS = man5/bluegene.conf.5 \
 	man5/cgroup.conf.5 \
+	man5/cray.conf.5 \
 	man5/gres.conf.5 \
 	man5/slurm.conf.5 \
 	man5/slurmdbd.conf.5 \
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 5aa61b159..52e710845 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -120,7 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -157,6 +162,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -214,6 +220,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -249,6 +256,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -354,6 +362,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_delete_partition.3 \
 	man3/slurm_delete_reservation.3 \
 	man3/slurm_free_ctl_conf.3 \
+	man3/slurm_free_front_end_info_msg.3 \
 	man3/slurm_free_job_info_msg.3 \
 	man3/slurm_free_job_alloc_info_response_msg.3 \
 	man3/slurm_free_job_step_create_response_msg.3 \
@@ -373,6 +382,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_get_rem_time.3 \
 	man3/slurm_get_select_jobinfo.3 \
 	man3/slurm_get_triggers.3 \
+	man3/slurm_init_update_front_end_msg.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
 	man3/slurm_init_resv_desc_msg.3 \
@@ -389,6 +399,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_kill_job.3 \
 	man3/slurm_kill_job_step.3 \
 	man3/slurm_load_ctl_conf.3 \
+	man3/slurm_load_front_end.3 \
 	man3/slurm_load_job.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
@@ -400,6 +411,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_pid2jobid.3 \
 	man3/slurm_ping.3 \
 	man3/slurm_print_ctl_conf.3 \
+	man3/slurm_print_front_end_info_msg.3 \
+	man3/slurm_print_front_end_table.3 \
 	man3/slurm_print_job_info.3 \
 	man3/slurm_print_job_info_msg.3 \
 	man3/slurm_print_job_step_info.3 \
@@ -421,6 +434,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_signal_job.3 \
 	man3/slurm_signal_job_step.3 \
 	man3/slurm_slurmd_status.3 \
+	man3/slurm_sprint_front_end_table.3 \
 	man3/slurm_sprint_job_info.3 \
 	man3/slurm_sprint_job_step_info.3 \
 	man3/slurm_sprint_node_table.3 \
@@ -443,6 +457,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_takeover.3 \
 	man3/slurm_terminate_job.3 \
 	man3/slurm_terminate_job_step.3 \
+	man3/slurm_update_front_end.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
 	man3/slurm_update_partition.3 \
@@ -451,6 +466,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 
 man5_MANS = man5/bluegene.conf.5 \
 	man5/cgroup.conf.5 \
+	man5/cray.conf.5 \
 	man5/gres.conf.5 \
 	man5/slurm.conf.5 \
 	man5/slurmdbd.conf.5 \
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 890d06498..ae9ebf300 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -44,7 +44,7 @@ Much of the data reported by \f3sacct\fP has been generated by
 the \f2wait3()\fP and \f2getrusage()\fP system calls. Some systems
 gather and report incomplete information for these calls;
 \f3sacct\fP reports values of 0 for this missing data. See your systems
-\f2getrusage(3)\fP man page for information about which data are
+\f2getrusage (3)\fP man page for information about which data are
 actually available on your system.
 .IP
 If \-\-dump is specified, the field selection options (\-\-brief,
@@ -126,21 +126,21 @@ Print a list of fields that can be specified with the \f3\-\-format\fP option.
 .ft 3
 Fields available:
 
-AllocCPUS      Account       AssocID       AveCPU
-AvePages       AveRSS        AveVMSize     BlockID
-Cluster        CPUTime       CPUTimeRAW    DerivedExitCode
-DerivedExitStr Elapsed       Eligible      End
-ExitCode       GID           Group         JobID
-JobName        Layout        MaxPages      MaxPagesNode
-MaxPagesTask   MaxRSS        MaxRSSNode    MaxRSSTask
-MaxVMSize      MaxVMSizeNode MaxVMSizeTask MinCPU
-MinCPUNode     MinCPUTask    NCPUS         NNodes
-NodeList       NTasks        Priority      Partition
-QOS            QOSRAW        ReqCPUS       Reserved
-ResvCPU        ResvCPURAW    Start         State
-Submit         Suspended     SystemCPU     Timelimit
-TotalCPU       UID           User          UserCPU
-WCKey          WCKeyID
+AllocCPUS       Account       AssocID       AveCPU
+AvePages        AveRSS        AveVMSize     BlockID
+Cluster         Comment       CPUTime       CPUTimeRAW
+DerivedExitCode Elapsed       Eligible      End
+ExitCode        GID           Group         JobID
+JobName         Layout        MaxPages      MaxPagesNode
+MaxPagesTask    MaxRSS        MaxRSSNode    MaxRSSTask
+MaxVMSize       MaxVMSizeNode MaxVMSizeTask MinCPU
+MinCPUNode      MinCPUTask    NCPUS         NNodes
+NodeList        NTasks        Priority      Partition
+QOS             QOSRAW        ReqCPUS       Reserved
+ResvCPU         ResvCPURAW    Start         State
+Submit          Suspended     SystemCPU     Timelimit
+TotalCPU        UID           User          UserCPU
+WCKey           WCKeyID
 
 .ft 1
 .fi
@@ -317,6 +317,9 @@ Job terminated due to failure of one or more allocated nodes.
 \f3PD  PENDING\fP
 Job is awaiting resource allocation.
 .TP
+\fBPR  PREEMPTED\fR
+Job terminated due to preemption.
+.TP
 \f3R   RUNNING\fP
 Job currently has an allocation.
 .TP
@@ -434,6 +437,13 @@ Block ID, applicable to BlueGene computers only.
 \f3cluster\fP
 Cluster name.
 
+.TP
+\f3Comment\fP
+The job's comment string when the AccountingStoreJobComment parameter
+in the slurm.conf file is set (or defaults) to YES.  The Comment
+string can be modified by invoking \f3sacctmgr modify job\fP or the
+specialized \f3sjobexitmod\fP command.
+
 .TP
 \f3cputime\fP
 Formatted number of cpu seconds a process was allocated.
@@ -451,12 +461,6 @@ process to terminate if it was terminated by a signal.  The
 DerivedExitCode can be modified by invoking \f3sacctmgr modify job\fP
 or the specialized \f3sjobexitmod\fP command.
 
-.TP
-\f3DerivedExitStr\fP
-The reason the job failed.  This string starts off as null.  The
-DerivedExitStr can be modified by invoking \f3sacctmgr modify job\fP
-or the specialized \f3sjobexitmod\fP command.
-
 .TP
 \f3elapsed\fP
 The jobs elapsed time.
@@ -660,15 +664,20 @@ Initiation time of the job in the same format as \f3end\fP.
 Displays the job status, or state.
 
 Output can be RUNNING, RESIZING, SUSPENDED, COMPLETED, CANCELLED, FAILED,
-TIMEOUT, or NODE_FAIL. If multiple job states are found for a single
-job (e.g. the job was requeued after a NODE_FAIL and then COMPLETED)
-then the last job state will be displayed followed by a "+".
+TIMEOUT, PREEMPTED or NODE_FAIL. If more information is available on the job state
+than will fit into the current field width (for example, the uid that CANCELLED
+a job) the state will be followed by a "+".  You can increase the size of
+the displayed state using the "%NUMBER" format modifier described earlier.
 
 .TP
 \f3submit\fP
 The  time and date stamp (in Universal Time Coordinated, UTC) the job
 was submitted.  The format of the output is identical to that of the end field.
 
+NOTE: If a job is requeued, the submit time is reset.  To obtain the
+original submit time it is necessary to use the \-D or \-\-duplicate option
+to display all duplicate entries for a job.
+
 .TP
 \f3suspended\fP
 How long the job was suspended for.
@@ -1196,7 +1205,7 @@ Copyright (C) 2008\-2009 Lawrence Livermore National Security. Produced at Lawre
 DISCLAIMER). CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1217,4 +1226,5 @@ designate the job accounting log file that collects system job accounting.
 The default job accounting log file.
 By default, this file is set to read and write permission for root only.
 .SH "SEE ALSO"
-sstat(1), ps(1), srun(1), squeue(1), getrusage(2), time(2)
+\fBsstat\fR(1), \fBps\fR (1), \fBsrun\fR(1), \fBsqueue\fR(1),
+\fBgetrusage\fR (2), \fBtime\fR (2)
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index c7fc68542..17cd414e2 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -34,7 +34,7 @@ commands.
 
 .TP
 \fB\-h\fR, \fB\-\-help\fR
-Print a help message describing the usage of \fBssacctmgr\fR.
+Print a help message describing the usage of \fBsacctmgr\fR.
 This is equivalent to the \fBhelp\fR command.
 
 .TP
@@ -203,7 +203,7 @@ Events like downed or draining nodes on clusters.
 .TP
 \fIjob\fR
 Job - but only two specific fields of the job: Derived Exit Code and
-Derived Exit String
+the Comment String
 
 .TP
 \fIqos\fR
@@ -242,6 +242,15 @@ priority. Can also be the string \fIparent\fR, this means that the
 parent association is used for fairshare. To clear a previously set
 value use the modify command with a new value of \-1.
 
+.TP
+\fIGraceTime\fP=<preemption grace time in seconds>
+Specifies, in units of seconds, the preemption grace time
+to be extended to a job which has been selected for preemption.
+The default value is zero, no preemption grace time is allowed on
+this QOS.
+.P			  
+NOTE: This value is only meaningful for QOS PreemptMode=CANCEL)
+	   
 .TP
 \fIGrpCPUMins\fP=<max cpu minutes>
 Maximum number of CPU minutes running jobs are able to be allocated in
@@ -875,11 +884,15 @@ the user's judgement of whether the job succeeded or failed.  The user
 can only modify the derived exit code of their own job.
 
 .TP
-\fIDerivedExitString\fP
-Initially NULL, the derived exit string can be populated after a job
-completes with a textual description of why the job succeeded or
-failed.  The user can only modify the derived exit string of their own
-job.
+\f3Comment\fP
+The job's comment string when the AccountingStoreJobComment parameter
+in the slurm.conf file is set (or defaults) to YES.  The user can only
+modify the comment string of their own job.
+
+.TP
+The \fIDerivedExitCode\fP and \f3Comment\fP fields are the only fields
+of a job record in the database that can be modified after job
+completion.
 
 .SH "LIST/SHOW JOB FORMAT OPTIONS"
 
@@ -901,12 +914,12 @@ any jobs submitted with this QOS that fall below the UsageThreshold
 will be held until their Fairshare Usage goes above the Threshold.
 .TP
 \fINoReserve\fP
-If set, and using backfill, jobs using this QOS will all be considered
-at the same level within this QOS meaning if a larger, higher priority
-job is unable to run a smaller job will run if possible even if the
-larger higher priority job will be delayed starting.
-NOTE: This could cause starvation on these larger jobs, but if that is
-ok, this flag most likely will increase utilization.
+If this flag is set and backfill scheduling is used, jobs using this QOS will
+not reserve resources in the backfill schedule's  map of resources allocated
+through time. This flag is intended for use with a QOS that may be preempted
+by jobs associated with all other QOS (e.g use with a "standby" QOS). If the
+allocated is used with a QOS which can not be preempted by all other QOS, it
+could result in starvation of larger jobs.
 .TP
 \fIPartitionMaxNodes\fP
 If set jobs using this QOS will be able to
@@ -921,6 +934,11 @@ If set jobs using this QOS will be able to
 override the requested partition's TimeLimit.
 .RE
 
+.TP
+\fIGraceTime\fP
+Preemption grace time to be extended to a job which has been 
+selected for preemption.
+
 .TP
 \fIGrpCPUMins\fP
 Maximum number of CPU minutes running jobs are able to be allocated in
@@ -962,6 +980,10 @@ Maximum number of CPU minutes each job is able to use.
 \fIMaxCPUs\fP
 Maximum number of CPUs each job is able to use.
 
+.TP
+\fIMaxCpusPerUser\fP
+Maximum number of CPUs each user is able to use.
+
 .TP
 \fIMaxJobs\fP
 Maximum number of jobs each user is allowed to run at one time.
@@ -970,6 +992,10 @@ Maximum number of jobs each user is allowed to run at one time.
 \fIMaxNodes\fP
 Maximum number of nodes each job is able to use.
 
+.TP
+\fIMaxNodesPerUser\fP
+Maximum number of nodes each user is able to use.
+
 .TP
 \fIMaxSubmitJobs\fP
 Maximum number of jobs pending or running state at any time per user.
@@ -992,7 +1018,9 @@ Mechanism used to preempt jobs of this QOS if the clusters \fIPreemptType\fP
 is configured to \fIpreempt/qos\fP.  The default preemption mechanism
 is specified by the cluster\-wide \fIPreemptMode\fP configuration parameter.
 Possible values are "Cluster" (meaning use cluster default), "Cancel",
-"Checkpoint", "Requeue" and "Suspend".
+"Checkpoint" and "Requeue".  This option is not compatible with
+PreemptMode=OFF or PreemptMode=SUSPEND (i.e. preempted jobs must be removed
+from the resources).
 
 .TP
 \fIPriority\fP
@@ -1021,6 +1049,13 @@ Display information with previously deleted data.
 \fIDescription\fP
 An arbitrary string describing a QOS.
 
+.TP
+\fIGraceTime\fP
+Preemption grace time to be extended to a job which has been
+selected for preemption in the format of hh:mm:ss.  The default
+value is zero, no preemption grace time is allowed on this partition.
+NOTE: This value is only meaningful for QOS PreemptMode=CANCEL.
+			  
 .TP
 \fIGrpCPUMins\fP
 Maximum number of CPU minutes running jobs are able to be allocated in
@@ -1086,6 +1121,11 @@ value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
 You can still set this, but have to wait for future versions of SLURM
 before it is enforced.)
 
+.TP
+\fIMaxCpusPerUser\fP
+Maximum number of CPUs each user is able to use.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxJobs\fP
 Maximum number of jobs each user is allowed to run at one time.
@@ -1096,6 +1136,11 @@ To clear a previously set value use the modify command with a new value of \-1.
 Maximum number of nodes each job is able to use.
 To clear a previously set value use the modify command with a new value of \-1.
 
+.TP
+\fIMaxNodesPerUser\fP
+Maximum number of nodes each user is able to use.
+To clear a previously set value use the modify command with a new value of \-1.
+
 .TP
 \fIMaxSubmitJobs\fP
 Maximum number of jobs pending or running state at any time per user.
@@ -1127,7 +1172,9 @@ Mechanism used to preempt jobs of this QOS if the clusters \fIPreemptType\fP
 is configured to \fIpreempt/qos\fP.  The default preemption mechanism
 is specified by the cluster\-wide \fIPreemptMode\fP configuration parameter.
 Possible values are "Cluster" (meaning use cluster default), "Cancel",
-"Checkpoint", "Requeue" and "Suspend".
+"Checkpoint" and "Requeue".  This option is not compatible with
+PreemptMode=OFF or PreemptMode=SUSPEND (i.e. preempted jobs must be removed
+from the resources).
 
 .TP
 \fIPriority\fP
@@ -1700,7 +1747,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1713,5 +1760,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBslurm.conf\fR(5)
+\fBslurm.conf\fR(5),
 \fBslurmdbd\fR(8)
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index d10e5d96f..9990122f1 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -1,4 +1,4 @@
-.TH "salloc" "1" "SLURM 2.2" "October 2010" "SLURM Commands"
+.TH "salloc" "1" "SLURM 2.3" "August 2011" "SLURM Commands"
 
 .SH "NAME"
 salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command,
@@ -21,6 +21,11 @@ section). If no command is specified, then the value of
 \fBSallocDefaultCommand\fR is not set, then \fBsalloc\fR runs the
 user's default shell.
 
+The following document describes the the influence of various options on the 
+allocation of cpus to jobs and tasks. 
+.br
+http://www.schedmd.com/slurmdocs/cpu_management.html
+
 NOTE: The salloc logic includes support to save and restore the terminal line
 settings and is designed to be executed in the foreground. If you need to
 execute salloc in the background, set its standard input to some file, for
@@ -315,6 +320,11 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with an exit code of zero).
 .TP
+\fBexpand:job_id\fR
+Resources allocated to this job should be used to expand the specified job.
+The job to expand must share the same QOS (Quality of Service) and partition.
+Gang scheduling of resources in the partition is also not supported.
+.TP
 \fBsingleton\fR
 This job can begin execution after any previously launched jobs
 sharing the same job name and user have terminated.
@@ -326,10 +336,11 @@ change directory to \fIpath\fR before beginning execution.
 
 .TP
 \fB\-\-exclusive\fR
-The job allocation cannot share nodes with other running jobs.  This is
-the oposite of \-\-share, whichever option is seen last on the command line
-will win.  (The default shared/exclusive behaviour depends on system
-configuration.)
+The job allocation can not share nodes with other running jobs.
+This is the opposite of \-\-share, whichever option is seen last
+on the command line will be used. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
 
 .TP
 \fB\-F\fR, \fB\-\-nodefile\fR=<\fInode file\fR>
@@ -386,7 +397,7 @@ Examples of use include "\-\-gres=gpus:2*cpu,disk=40G" and "\-\-gres=help".
 \fB\-H, \-\-hold\fR
 Specify the job is to be submitted in a held state (priority of zero).
 A held job can now be released using scontrol to reset its priority
-(e.g. "\fIscontrol update jobid=<id> priority=1\fR".
+(e.g. "\fIscontrol release <job_id>\fR").
 
 .TP
 \fB\-h\fR, \fB\-\-help\fR
@@ -442,7 +453,9 @@ your command any time that the SLURM controller tells salloc that its job
 allocation has been revoked. The job allocation can be revoked for a
 couple of reasons: someone used \fBscancel\fR to revoke the allocation,
 or the allocation reached its time limit.  If you do not specify a signal
-name or number, the default signal is SIGTERM.
+name or number and SLURM is configured to signal the spawned command at job
+termination, the default signal is SIGHUP for interactive and SIGTERM for 
+non\-interactive sessions.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
@@ -479,7 +492,8 @@ method (before the ":") controls the distribution of resources across
 nodes. The optional second distribution method (after the ":")
 controls the distribution of resources across sockets within a node.
 Note that with select/cons_res, the number of cpus allocated on each
-socket and node may be different. Refer to the mc_support.html document
+socket and node may be different. Refer to
+http://www.schedmd.com/slurmdocs/mc_support.html
 for more information on resource allocation, assignment of tasks to
 nodes, and binding of tasks to CPUs.
 .RS
@@ -516,11 +530,11 @@ followed by an optional specification of the task distribution scheme
 within a block of tasks and between the blocks of tasks.  For more
 details (including examples and diagrams), please see
 .br
-https://computing.llnl.gov/linux/slurm/mc_support.html
+http://www.schedmd.com/slurmdocs/mc_support.html
 .br
 and
 .br
-https://computing.llnl.gov/linux/slurm/dist_plane.html.
+http://www.schedmd.com/slurmdocs/dist_plane.html
 .TP
 .B arbitrary
 The arbitrary method of distribution will allocate processes in\-order
@@ -659,11 +673,9 @@ Specify a minimum number of logical cpus/processors per node.
 .TP
 \fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
 Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
-The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
-A limit on the maximum node count may be specified with \fImaxnodes\fR
-(e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
-same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
-for two and ONLY two nodes).
+A maximum node count may also be specified with \fImaxnodes\fR.
+If only one number is specified, this is used as both the minimum and
+maximum node count.
 The partition's node limits supersede those of the job.
 If a job's node limits are outside of the range permitted for its
 associated partition, the job will be left in a PENDING state.
@@ -679,6 +691,9 @@ behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
 The job will be allocated as many nodes as possible within the range specified
 and without delaying the initiation of the job.
+The node count specification may include a numeric value followed by a suffix
+of "k" (multiplies numeric value by 1,024) or "m" (multiplies numeric value by
+1,048,576).
 
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
@@ -783,7 +798,7 @@ allowing more than one task per processor.  However no more than
 .TP
 \fB\-p\fR, \fB\-\-partition\fR=<\fIpartition_names\fR>
 Request a specific partition for the resource allocation.  If not specified,
-the default behaviour is to allow the slurm controller to select the default
+the default behavior is to allow the slurm controller to select the default
 partition as designated by the system administrator. If the job can use more
 than one partition, specify their names in a comma separate list and the one
 offering earliest initiation will be used.
@@ -806,9 +821,12 @@ Allocate resources for the job from the named reservation.
 
 .TP
 \fB\-s\fR, \fB\-\-share\fR
-The job allocation can share nodes with other running jobs.  (The default
-shared/exclusive behaviour depends on system configuration.)
-This may result the allocation being granted sooner than if the \-\-share
+The job allocation can share nodes with other running jobs.
+This is the opposite of \-\-exclusive, whichever option is seen last
+on the command line will be used. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
+This option may result the allocation being granted sooner than if the \-\-share
 option was not set and allow higher system utilization, but application
 performance will likely suffer due to competition for resources within a node.
 
@@ -830,6 +848,18 @@ Restrict node selection to nodes with at least the specified number of
 sockets.  See additional information under \fB\-B\fR option above when
 task/affinity plugin is enabled.
 
+.TP
+\fB\-\-switch\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
+When a tree topology is used, this defines the maximum count of switches
+desired for the job allocation and optionally the maximum time to wait
+for that number of switches. If SLURM finds an allocation containing more
+switches than the count specified, the job remain pending until it either finds
+an allocation with desired switch ount or the time limit expires. By default
+there is no switch count limit and there is no delay in starting the job.
+The job's maximum time delay may be limited by the system administrator using
+the \fBSchedulerParameters\fR configuration parameter with the
+\fBmax_switch_wait\fR parameter option.
+
 .TP
 \fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
 Set a limit on the total run time of the job allocation.  If the
@@ -949,6 +979,7 @@ SLURM will normally allocate a TORUS if possible for a given geometry.
 If running on a BGP system and wanting to run in HTC mode (only for 1
 midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
+A comma separated lists of connection types may be specified, one for each dimension.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
@@ -1039,6 +1070,9 @@ Same as \fB\-I, \-\-immediate\fR
 \fBSALLOC_JOBID\fR
 Same as \fB\-\-jobid\fR
 .TP
+\fBSALLOC_KILL_CMD\fR
+Same as \fB\-K\fR, \fB\-\-kill\-command\fR
+.TP
 \fBSALLOC_MEM_BIND\fR
 Same as \fB\-\-mem_bind\fR
 .TP
@@ -1164,7 +1198,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1178,4 +1212,5 @@ details.
 
 .SH "SEE ALSO"
 .LP
-sinfo(1), sattach(1), sbatch(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
+\fBsinfo\fR(1), \fBsattach\fR(1), \fBsbatch\fR(1), \fBsqueue\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1),
+\fBslurm.conf\fR(5), \fBsched_setaffinity\fR (2), \fBnuma\fR (3)
diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1
index 0054a8882..17508a344 100644
--- a/doc/man/man1/sattach.1
+++ b/doc/man/man1/sattach.1
@@ -66,7 +66,7 @@ Display SLURM version number and exit.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Increase the verbosity of sattach's informational messages.  Multiple \-v's
+Increase the verbosity of sattach's informational messages.  Multiple \fB\-v\fR's
 will further increase sattach's verbosity.
 
 .SH "INPUT ENVIRONMENT VARIABLES"
@@ -96,7 +96,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -110,4 +110,6 @@ details.
 
 .SH "SEE ALSO"
 .LP
-sinfo(1), salloc(1), sbatch(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
+\fBsinfo\fR(1), \fBsalloc\fR(1), \fBsbatch\fR(1), \fBsqueue\fR(1),
+\fBscancel\fR(1), \fBscontrol\fR(1),
+\fBslurm.conf\fR(5), \fBsched_setaffinity\fR (2), \fBnuma\fR (3)
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 0cdfa03e1..da6709d2b 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -1,4 +1,4 @@
-.TH "sbatch" "1" "SLURM 2.2" "October 2010" "SLURM Commands"
+.TH "sbatch" "1" "SLURM 2.3" "August 2011" "SLURM Commands"
 
 .SH "NAME"
 sbatch \- Submit a batch script to SLURM.
@@ -25,6 +25,11 @@ When the job allocation is finally granted for the batch script, SLURM
 runs a single copy of the batch script on the first node in the set of
 allocated nodes.
 
+The following document describes the the influence of various options on the 
+allocation of cpus to jobs and tasks. 
+.br
+http://www.schedmd.com/slurmdocs/cpu_management.html
+
 .SH "OPTIONS"
 .LP
 
@@ -324,6 +329,11 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with an exit code of zero).
 .TP
+\fBexpand:job_id\fR
+Resources allocated to this job should be used to expand the specified job.
+The job to expand must share the same QOS (Quality of Service) and partition.
+Gang scheduling of resources in the partition is also not supported.
+.TP
 \fBsingleton\fR
 This job can begin execution after any previously launched jobs
 sharing the same job name and user have terminated.
@@ -332,7 +342,7 @@ sharing the same job name and user have terminated.
 .TP
 \fB\-D\fR, \fB\-\-workdir\fR=<\fIdirectory\fR>
 Set the working directory of the batch script to \fIdirectory\fR before
-it it executed.
+it is executed.
 
 .TP
 \fB\-e\fR, \fB\-\-error\fR=<\fIfilename pattern\fR>
@@ -345,10 +355,11 @@ See the \fB\-\-input\fR option for filename specification options.
 
 .TP
 \fB\-\-exclusive\fR
-The job allocation cannot share nodes with other running jobs.  This is
-the oposite of \-\-share, whichever option is seen last on the command line
-will win.  (The default shared/exclusive behaviour depends on system
-configuration.)
+The job allocation can not share nodes with other running jobs.
+This is the opposite of \-\-share, whichever option is seen last
+on the command line will be used. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
 
 .TP
 \fB\-\-export\fR=<\fIenvironment variables | ALL | NONE\fR>
@@ -418,7 +429,7 @@ Examples of use include "\-\-gres=gpus:2*cpu,disk=40G" and "\-\-gres=help".
 \fB\-H, \-\-hold\fR
 Specify the job is to be submitted in a held state (priority of zero).
 A held job can now be released using scontrol to reset its priority
-(e.g. "\fIscontrol update jobid=<id> priority=1\fR".
+(e.g. "\fIscontrol release <job_id>\fR").
 
 .TP
 \fB\-h\fR, \fB\-\-help\fR
@@ -534,7 +545,8 @@ method (before the ":") controls the distribution of resources across
 nodes. The optional second distribution method (after the ":")
 controls the distribution of resources across sockets within a node.
 Note that with select/cons_res, the number of cpus allocated on each
-socket and node may be different. Refer to the mc_support.html document
+socket and node may be different. Refer to
+http://www.schedmd.com/slurmdocs/mc_support.html
 for more information on resource allocation, assignment of tasks to
 nodes, and binding of tasks to CPUs.
 .RS
@@ -571,16 +583,16 @@ followed by an optional specification of the task distribution scheme
 within a block of tasks and between the blocks of tasks.  For more
 details (including examples and diagrams), please see
 .br
-https://computing.llnl.gov/linux/slurm/mc_support.html
+http://www.schedmd.com/slurmdocs/mc_support.html
 .br
 and
 .br
-https://computing.llnl.gov/linux/slurm/dist_plane.html.
+http://www.schedmd.com/slurmdocs/dist_plane.html
 .TP
 .B arbitrary
 The arbitrary method of distribution will allocate processes in\-order
 as listed in file designated by the environment variable
-SLURM_HOSTFILE.  If this variable is listed it will over ride any
+SLURM_HOSTFILE.  If this variable is listed it will override any
 other method specified.  If not set the method will default to block.
 Inside the hostfile must contain at minimum the number of hosts
 requested and be one per line or comma separated.  If specifying a
@@ -714,11 +726,9 @@ Specify a minimum number of logical cpus/processors per node.
 .TP
 \fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
 Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
-The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
-A limit on the maximum node count may be specified with \fImaxnodes\fR
-(e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
-same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
-for two and ONLY two nodes).
+A maximum node count may also be specified with \fImaxnodes\fR.
+If only one number is specified, this is used as both the minimum and
+maximum node count.
 The partition's node limits supersede those of the job.
 If a job's node limits are outside of the range permitted for its
 associated partition, the job will be left in a PENDING state.
@@ -734,6 +744,9 @@ behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
 The job will be allocated as many nodes as possible within the range specified
 and without delaying the initiation of the job.
+The node count specification may include a numeric value followed by a suffix
+of "k" (multiplies numeric value by 1,024) or "m" (multiplies numeric value by
+1,048,576).
 
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
@@ -844,7 +857,7 @@ The default value is specified by the system configuration parameter
 .TP
 \fB\-p\fR, \fB\-\-partition\fR=<\fIpartition_names\fR>
 Request a specific partition for the resource allocation.  If not specified,
-the default behaviour is to allow the slurm controller to select the default
+the default behavior is to allow the slurm controller to select the default
 partition as designated by the system administrator. If the job can use more
 than one partition, specify their names in a comma separate list and the one
 offering earliest initiation will be used.
@@ -863,7 +876,7 @@ options may not be supported on some systems):
 All limits listed below
 .TP
 \fBAS\fR
-The maximum address space for a processes
+The maximum address space for a process
 .TP
 \fBCORE\fR
 The maximum size of core file
@@ -875,7 +888,9 @@ The maximum amount of CPU time
 The maximum size of a process's data segment
 .TP
 \fBFSIZE\fR
-The maximum size of files created
+The maximum size of files created. Note that if the user sets FSIZE to less
+than the current size of the slurmd.log, job launches will fail with 
+a 'File size limit exceeded' error.
 .TP
 \fBMEMLOCK\fR
 The maximum size that may be locked into memory
@@ -919,9 +934,12 @@ Allocate resources for the job from the named reservation.
 
 .TP
 \fB\-s\fR, \fB\-\-share\fR
-The job allocation can share nodes with other running jobs.  (The default
-shared/exclusive behaviour depends on system configuration.)
-This may result in the allocation being granted sooner than if the \-\-share
+The job allocation can share nodes with other running jobs.
+This is the opposite of \-\-exclusive, whichever option is seen last
+on the command line will be used. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
+This option may result the allocation being granted sooner than if the \-\-share
 option was not set and allow higher system utilization, but application
 performance will likely suffer due to competition for resources within a node.
 
@@ -943,6 +961,18 @@ Restrict node selection to nodes with at least the specified number of
 sockets.  See additional information under \fB\-B\fR option above when
 task/affinity plugin is enabled.
 
+.TP
+\fB\-\-switch\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
+When a tree topology is used, this defines the maximum count of switches
+desired for the job allocation and optionally the maximum time to wait
+for that number of switches. If SLURM finds an allocation containing more
+switches than the count specified, the job remain pending until it either finds
+an allocation with desired switch count or the time limit expires. By default
+there is no switch count limit and there is no delay in starting the job.
+The job's maximum time delay may be limited by the system administrator using
+the \fBSchedulerParameters\fR configuration parameter with the
+\fBmax_switch_wait\fR parameter option.
+
 .TP
 \fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
 Set a limit on the total run time of the job allocation.  If the
@@ -1070,6 +1100,7 @@ SLURM will normally allocate a TORUS if possible for a given geometry.
 If running on a BGP system and wanting to run in HTC mode (only for 1
 midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
+A comma separated lists of connection types may be specified, one for each dimension.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
@@ -1339,7 +1370,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1353,4 +1384,5 @@ details.
 
 .SH "SEE ALSO"
 .LP
-sinfo(1), sattach(1), salloc(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3)
+\fBsinfo\fR(1), \fBsattach\fR(1), \fBsalloc\fR(1), \fBsqueue\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1),
+\fBslurm.conf\fR(5), \fBsched_setaffinity\fR (2), \fBnuma\fR (3)
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index ea54d0da6..ad52b326b 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -109,7 +109,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1
index d3b1d0847..d99194564 100644
--- a/doc/man/man1/scancel.1
+++ b/doc/man/man1/scancel.1
@@ -1,4 +1,4 @@
-.TH SCANCEL "1" "January 2011" "scancel 2.2" "Slurm components"
+.TH SCANCEL "1" "January 2011" "scancel 2.3" "Slurm components"
 
 .SH "NAME"
 scancel \- Used to signal jobs or job steps that are under the control of Slurm.
@@ -33,6 +33,8 @@ for details.
 \fB-\-ctld\fR
 Send the job signal request to the slurmctld daemon rather than directly to the
 slurmd daemons. This increases overhead, but offers better fault tolerance.
+This is the default behavior on architectures using front end nodes (e.g.
+BlueGene and Cray computers) or when the \fB\-\-clusters\fR option is used.
 
 .TP
 \fB\-\-help\fR
@@ -223,7 +225,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -236,4 +238,4 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBslurm_kill_job\fR(3), \fBslurm_kill_job_step\fR(3)
+\fBslurm_kill_job\fR (3), \fBslurm_kill_job_step\fR (3)
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 5999834c2..6ea932a9b 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1,4 +1,4 @@
-.TH SCONTROL "1" "November 2010" "scontrol 2.2" "Slurm components"
+.TH SCONTROL "1" "July 2011" "scontrol 2.3" "Slurm components"
 
 .SH "NAME"
 scontrol \- Used view and modify Slurm configuration and state.
@@ -41,7 +41,8 @@ By default, neither partitions that are configured as hidden nor those partition
 unavailable to user's group will be displayed (i.e. this is the default behavior).
 .TP
 \fB\-M\fR, \fB\-\-clusters\fR=<\fIstring\fR>
-Cluster to issue commands to.
+The cluster to issue commands to. Only one cluster name may be specified.
+
 .TP
 \fB\-o\fR, \fB\-\-oneliner\fR
 Print information one line per record.
@@ -123,6 +124,10 @@ If set, resume job on the same nodes are previously used.
 Valid with the \fIrestart\fP option only.
 .RE
 
+.TP
+\fBcluster\fR \fICLUSTER_NAME\fP
+The cluster to issue commands to. Only one cluster name may be specified.
+
 .TP
 \fBcreate\fP \fISPECIFICATION\fP
 Create a new partition or reservation.  See the full list of parameters
@@ -141,11 +146,14 @@ The two \fISPECIFICATION\fP choices are \fIPartitionName=<name>\fP and
 \fIReservation=<name>\fP.  On Dynamically laid out Bluegene systems
 \fIBlockName=<name>\fP also works. Reservations and partitions should have
 no associated jobs at the time of their deletion (modify the job's first).
+If the specified partition is in use, the request is denied.
 
 .TP
 \fBdetails\fP
-Causes the \fIshow\fP command to provide additional details where available,
-namely the specific CPUs and NUMA memory allocated on each node.
+Causes the \fIshow\fP command to provide additional details where available.
+Batch job information will include the batch script for jobs the user is
+authorized to view.
+Job information will include CPUs and NUMA memory allocated on each node.
 Note that on computers with hyperthreading enabled and SLURM configured to
 allocate cores, each listed CPU represents one physical core.
 Each hyperthread on that core can be allocated a separate task, so a job's
@@ -273,12 +281,20 @@ This value is temporary and will be overwritten whenever the slurmctld
 daemon reads the slurm.conf configuration file (e.g. when the daemon
 is restarted or \fBscontrol reconfigure\fR is executed).
 
+.TP
+\fBsetdebugflags\fP [+|\-]\fIFLAG\fP
+Add or remove DebugFlags of the slurmctld daemon.
+See "man slurm.conf" for a list of supported DebugFlags.
+NOTE: Changing the value of some DebugFlags will have no effect without
+restarting the slurmctld daemon, which would set DebugFlags based upon the
+contents of the slurm.conf configuration file.
+
 .TP
 \fBshow\fP \fIENTITY\fP \fIID\fP
 Display the state of the specified entity with the specified identification.
-\fIENTITY\fP may be \fIaliases\fP, \fIconfig\fP, \fIdaemons\fP, \fIjob\fP,
-\fInode\fP, \fIpartition\fP, \fIreservation\fP, \fIslurmd\fP, \fIstep\fP,
-\fItopology\fP, \fIhostlist\fP or \fIhostnames\fP
+\fIENTITY\fP may be \fIaliases\fP, \fIconfig\fP, \fIdaemons\fP, \fIfrontend\fP,
+\fIjob\fP, \fInode\fP, \fIpartition\fP, \fIreservation\fP, \fIslurmd\fP,
+\fIstep\fP, \fItopology\fP, \fIhostlist\fP or \fIhostnames\fP
 (also \fIblock\fP or \fIsubbp\fP on BlueGene systems).
 \fIID\fP can be used to identify a specific element of the identified
 entity: the configuration parameter name, job ID, node name, partition name,
@@ -293,6 +309,9 @@ named nodes will be shown.
 \fINodeHostname\fP (useful to get the list of virtual nodes associated with a
 real node in a configuration where multiple slurmd daemons execute on a single
 compute node).
+\fIconfig\fP displays parameter names from the configuration files in mixed
+case (e.g. SlurmdPort=7003) while derived parameters names are in upper case
+only (e.g. SLURM_VERSION).
 \fIhostnames\fP takes an optional hostlist expression as input and
 writes a list of individual host names to standard output (one per
 line). If no hostlist expression is supplied, the contents of the
@@ -613,6 +632,19 @@ SLURM scheduler (e.g., 60 seconds with the default sched/builtin).
 year is assumed, unless the combination of MM/DD and HH:MM:SS has
 already passed for that year, in which case the next year is used.
 .RE
+.TP
+\fISwitches\fP=<count>[@<max\-time\-to\-wait>]
+When a tree topology is used, this defines the maximum count of switches
+desired for the job allocation. If SLURM finds an allocation containing more
+switches than the count specified, the job remain pending until it either finds
+an allocation with desired switch count or the time limit expires. By default
+there is no switch count limit and no time limit delay. Set the count
+to zero in order to clean any previously set count (disabling the limit).
+The job's maximum time delay may be limited by the system administrator using
+the \fBSchedulerParameters\fR configuration parameter with the
+\fBmax_switch_wait\fR parameter option.
+Also see \fIwait\-for\-switch\fP.
+
 .TP
 \fITimeLimit\fP=<time>
 The job's time limit.
@@ -622,6 +654,24 @@ hours:minutes:seconds, days\-hours, days\-hours:minutes or
 days\-hours:minutes:seconds.
 Time resolution is one minute and second values are rounded up to
 the next minute.
+If changing the time limit of a job, either specify a new time limit value or
+preceed the time with a "+" or "\-" to increment or decrement the current
+time limit (e.g. "TimeLimit=+30"). In order to increment or decrement the
+current time limit, the \fIJobId\fP specification must preceed the
+\fITimeLimit\fP specification.
+
+.TP
+\fIwait\-for\-switch\fP=<max\-time\-to\-wait>
+When a tree topology is used, this defines the  maximum time to wait for the
+desired count of switches. If SLURM finds an allocation containing more
+switches than the count specified, the job remain pending until it either finds
+an allocation with desired switch count or the time limit expires. By default
+there is no switch count limit and there is not time delay. Set the time
+to zero in order to clean any previously set time limit (disabling the limit).
+The job's maximum time delay may be limited by the system administrator using
+the \fBSchedulerParameters\fR configuration parameter with the
+\fBmax_switch_wait parameter\fR option.
+Also see \fISwitches\fP.
 .TP
 \fIWCKey\fP=<key>
 Set the job's workload characterization key to the specified value.
@@ -658,12 +708,28 @@ The list of nodes allocated to the job.
 The NodeIndices expose the internal indices into the node table
 associated with the node(s) allocated to the job.
 .TP
+\fIPreemptTime\fP
+Time at which job was signaled that it was selected for preemption.
+(Meaningful only for PreemptMode=CANCEL and the partition or QOS
+with which the job is associated has a GraceTime value designated.)
+.TP
 \fIPreSusTime\fP
 Time the job ran prior to last suspend.
 .TP
 \fIReason\fP
 The reason job is not running: e.g., waiting "Resources".
 .TP
+\fISubmitTime\fP
+The time  and  date stamp (in Universal Time Coordiated, UTC)
+the job was submitted.  The format of the output is identical
+to that of the EndTime field.
+
+NOTE: If a job is requeued, the submit time is reset.
+To obtain the original submit time it is necessary
+to use the "sacct \-j <job_id[.<step_id>]" command also
+designating the \-D or \-\-duplicate option to display all
+duplicate entries for a job.
+.TP
 \fISuspendTime\fP
 Time the job was last suspended or resumed.
 .TP
@@ -698,6 +764,11 @@ hours:minutes:seconds, days\-hours, days\-hours:minutes or
 days\-hours:minutes:seconds.
 Time resolution is one minute and second values are rounded up to
 the next minute.
+If changing the time limit of a step, either specify a new time limit value or
+preceed the time with a "+" or "\-" to increment or decrement the current
+time limit (e.g. "TimeLimit=+30"). In order to increment or decrement the
+current time limit, the \fIStepId\fP specification must preceed the
+\fITimeLimit\fP specification.
 
 .TP
 \fBSPECIFICATIONS FOR UPDATE COMMAND, NODES\fR
@@ -731,7 +802,7 @@ restarts of slurmctld or the execution of \fBscontrol reconfig\fR.
 
 .TP
 \fIReason\fP=<reason>
-Identify the reason the node is in a "DOWN" or "DRAINED", "DRAINING",
+Identify the reason the node is in a "DOWN". "DRAINED", "DRAINING",
 "FAILING" or "FAIL" state.
 Use quotes to enclose a reason having more than one word.
 
@@ -759,6 +830,8 @@ changing its underlying state.
 While all of the above states are valid, some of them are not valid new
 node states given their prior state.
 Generally only "DRAIN", "FAIL" and "RESUME" should be used.
+NOTE: The scontrol command should not be used to change node state on Cray
+systems. Use Cray tools such as \fIxtprocadmin\fR instead.
 
 .TP
 \fIWeight\fP=<weight>
@@ -771,6 +844,30 @@ preserved or slurmctld's receipt of a SIGHUP.
 Update slurm.conf with any changes meant to be persistent across normal 
 restarts of slurmctld or the execution of \fBscontrol reconfig\fR.
 
+.TP
+\fBSPECIFICATIONS FOR UPDATE COMMAND, FRONTEND\fR
+
+.TP
+\fIFrontendName\fP=<name>
+Identify the front end node to be updated. This specification is required.
+
+.TP
+\fIReason\fP=<reason>
+Identify the reason the node is in a "DOWN" or "DRAIN" state.
+Use quotes to enclose a reason having more than one word.
+
+.TP
+\fIState\fP=<state>
+Identify the state to be assigned to the front end node. Possible values are
+"DOWN", "DRAIN" or "RESUME".
+If you want to remove a front end node from service, you typically want to set
+it's state to "DRAIN".
+"RESUME" is not an actual node state, but will return a "DRAINED", "DRAINING",
+or "DOWN" front end node to service, either "IDLE" or "ALLOCATED" state as
+appropriate.
+Setting a front end node "DOWN" will cause all running and suspended jobs on
+that node to be terminated.
+
 .TP
 \fBSPECIFICATIONS FOR CREATE, UPDATE, AND DELETE COMMANDS, PARTITIONS\fR
 .TP
@@ -807,11 +904,28 @@ Run time limit used for jobs that don't specify a value. If not set
 then MaxTime will be used.
 Format is the same as for MaxTime.
 
+.TP
+\fIDefMemPerCPU\fP=<MB>
+Set the default memory to be allocated per CPU for jobs in this partition.
+The memory size is specified in megabytes.
+.TP
+\fIDefMemPerCNode\fP=<MB>
+Set the default memory to be allocated per node for jobs in this partition.
+The memory size is specified in megabytes.
+
 .TP
 \fIDisableRootJobs\fP=<yes|no>
 Specify if jobs can be executed as user root.
 Possible values are "YES" and "NO".
 
+.TP
+\fIGraceTime\fP=<seconds>
+Specifies, in units of seconds, the preemption grace time
+to be extended to a job which has been selected for preemption.
+The default value is zero, no preemption grace time is allowed on
+this partition or qos.
+(Meaningful only for PreemptMode=CANCEL)
+
 .TP
 \fIHidden\fP=<yes|no>
 Specify if the partition and its jobs should be hidden from view.
@@ -819,11 +933,22 @@ Hidden partitions will by default not be reported by SLURM APIs
 or commands.
 Possible values are "YES" and "NO".
 
+.TP
+\fIMaxMemPerCPU\fP=<MB>
+Set the maximum memory to be allocated per CPU for jobs in this partition.
+The memory size is specified in megabytes.
+.TP
+\fIMaxMemPerCNode\fP=<MB>
+Set the maximum memory to be allocated per node for jobs in this partition.
+The memory size is specified in megabytes.
+
 .TP
 \fIMaxNodes\fP=<count>
 Set the maximum number of nodes which will be allocated to any single job
 in the partition. Specify a number, "INFINITE" or "UNLIMITED".  (On a
 Bluegene type system this represents a c\-node count.)
+Changing the \fIMaxNodes\fP of a partition has no effect upon jobs that
+have already begun execution.
 
 .TP
 \fIMaxTime\fP=<time>
@@ -834,11 +959,15 @@ hours:minutes:seconds, days\-hours, days\-hours:minutes or
 days\-hours:minutes:seconds.
 Time resolution is one minute and second values are rounded up to
 the next minute.
+Changing the \fIMaxTime\fP of a partition has no effect upon jobs that
+have already begun execution.
 
 .TP
 \fIMinNodes\fP=<count>
 Set the minimum number of nodes which will be allocated to any single job
 in the partition.   (On a Bluegene type system this represents a c\-node count.)
+Changing the \fIMinNodes\fP of a partition has no effect upon jobs that
+have already begun execution.
 
 .TP
 \fINodes\fP=<name>
@@ -846,6 +975,8 @@ Identify the node(s) to be associated with this partition. Multiple node names
 may be specified using simple node range expressions (e.g. "lx[10\-20]").
 Note that jobs may only be associated with one partition at any time.
 Specify a blank data value to remove all nodes from a partition: "Nodes=".
+Changing the \fINodes\fP in a partition has no effect upon jobs that
+have already begun execution.
 
 .TP
 \fIPartitionName\fP=<name>
@@ -997,6 +1128,13 @@ for all flags).
 Currently supported flags include:
 .RS
 .TP 12
+\fILICENSE_ONLY\fR
+This is a reservation for licenses only and not compute nodes.
+If this flag is set, a job using this reservation may use the associated
+licenses and any compute nodes.
+If this flag is not set, a job using this reservation may use only the nodes
+and licenses associated with the reservation.
+.TP
 \fIMAINT\fR
 Maintenance mode, receives special accounting treatment.
 This partition is permitted to use resources that are already in another
@@ -1065,8 +1203,23 @@ Commandline options will always override these settings.)
 \fBSCONTROL_ALL\fR
 \fB\-a, \-\-all\fR
 .TP
+\fBSLURM_CLUSTERS\fR
+Same as \fB\-\-clusters\fR
+.TP
 \fBSLURM_CONF\fR
 The location of the SLURM configuration file.
+.TP
+\fBSLURM_TIME_FORMAT\fR
+Specify the format used to report time stamps. A value of \fIstandard\fR, the
+default value, generates output in the form "year-month-dateThour:minute:second".
+A value of \fIrelative\fR returns only "hour:minute:second" if the current day.
+For other dates in the current year it prints the "hour:minute" preceded by
+"Tomorr" (tomorrow), "Ystday" (yesterday), the name of the day for the coming
+week (e.g. "Mon", "Tue", etc.), otherwise the date (e.g. "25 Apr").
+For other years it returns a date month and year without a time (e.g.
+"6 Jun 2012").
+Another suggested value is "%a %T" for a day of week and time stamp (e.g.
+"Mon 12:34:56"). All of the time stamps use a 24 hour format.
 
 .SH "AUTHORIZATION"
 
@@ -1207,7 +1360,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1223,15 +1376,16 @@ details.
 /etc/slurm.conf
 .SH "SEE ALSO"
 \fBscancel\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
-\fBslurm_checkpoint\fR(3),
-\fBslurm_create_partition\fR(3),
-\fBslurm_delete_partition\fR(3),
-\fBslurm_load_ctl_conf\fR(3),
-\fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
-\fBslurm_load_partitions\fR(3),
-\fBslurm_reconfigure\fR(3),  \fBslurm_requeue\fR(3), \fBslurm_resume\fR(3),
-\fBslurm_shutdown\fR(3), \fBslurm_suspend\fR(3),
-\fBslurm_takeover\fR(3),
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
-\fBslurm_update_partition\fR(3),
+\fBslurm_checkpoint\fR (3),
+\fBslurm_create_partition\fR (3),
+\fBslurm_delete_partition\fR (3),
+\fBslurm_load_ctl_conf\fR (3),
+\fBslurm_load_jobs\fR (3), \fBslurm_load_node\fR (3),
+\fBslurm_load_partitions\fR (3),
+\fBslurm_reconfigure\fR (3),  \fBslurm_requeue\fR (3),
+\fBslurm_resume\fR (3),
+\fBslurm_shutdown\fR (3), \fBslurm_suspend\fR (3),
+\fBslurm_takeover\fR (3),
+\fBslurm_update_job\fR (3), \fBslurm_update_node\fR (3),
+\fBslurm_update_partition\fR (3),
 \fBslurm.conf\fR(5), \fBslurmctld\fR(8)
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index 3b4abe715..d5208999d 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -1,4 +1,4 @@
-.TH SINFO "1" "September 2010" "sinfo 2.2" "Slurm components"
+.TH SINFO "1" "September 2011" "sinfo 2.3" "Slurm components"
 
 .SH "NAME"
 sinfo \- view information about SLURM nodes and partitions.
@@ -172,9 +172,15 @@ Size of memory per node in megabytes
 \fB%M\fR
 PreemptionMode
 .TP
+\fB%n\fR
+List of node hostnames
+.TP
 \fB%N\fR
 List of node names
 .TP
+\fB%o\fR
+List of node communication addresses
+.TP
 \fB%P\fR
 Partition name
 .TP
@@ -480,6 +486,18 @@ Same as \fB\-\-clusters\fR
 .TP
 \fBSLURM_CONF\fR
 The location of the SLURM configuration file.
+.TP
+\fBSLURM_TIME_FORMAT\fR
+Specify the format used to report time stamps. A value of \fIstandard\fR, the
+default value, generates output in the form "year-month-dateThour:minute:second".
+A value of \fIrelative\fR returns only "hour:minute:second" if the current day.
+For other dates in the current year it prints the "hour:minute" preceded by
+"Tomorr" (tomorrow), "Ystday" (yesterday), the name of the day for the coming
+week (e.g. "Mon", "Tue", etc.), otherwise the date (e.g. "25 Apr").
+For other years it returns a date month and year without a time (e.g.
+"6 Jun 2012").
+Another suggested value is "%a %T" for a day of week and time stamp (e.g.
+"Mon 12:34:56"). All of the time stamps use a 24 hour format.
 
 .SH "EXAMPLES"
 .eo
@@ -554,7 +572,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -568,9 +586,10 @@ details.
 
 .SH "SEE ALSO"
 \fBscontrol\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1),
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
-\fBslurm_load_partitions\fR(3),
-\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3),
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
-\fBslurm_update_partition\fR(3),
+\fBslurm_load_ctl_conf\fR (3), \fBslurm_load_jobs\fR (3),
+\fBslurm_load_node\fR (3),
+\fBslurm_load_partitions\fR (3),
+\fBslurm_reconfigure\fR (3), \fBslurm_shutdown\fR (3),
+\fBslurm_update_job\fR (3), \fBslurm_update_node\fR (3),
+\fBslurm_update_partition\fR (3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/slurm.1 b/doc/man/man1/slurm.1
index c23c3d987..d80934bf6 100644
--- a/doc/man/man1/slurm.1
+++ b/doc/man/man1/slurm.1
@@ -37,7 +37,7 @@ SLURM configuration is maintained in the \fBslurm.conf\fR file.
 Man pages are available for all SLURM commands, daemons, APIs, plus the
 \fBslurm.conf\fR file.
 Extensive documenation is also available on the internet at
-\fB<https://computing.llnl.gov/linux/slurm/>\fR.
+\fB<http://www.schedmd.com/slurmdocs/>\fR.
 
 .SH "COPYING"
 Copyright (C) 2005\-2007 The Regents of the University of California.
@@ -46,7 +46,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -62,7 +62,7 @@ details.
 \fBsacct\fR(1), \fBsacctmgr\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1),
 \fBsbatch\fR(1), \fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1),
 \fBsinfo\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), \fBsreport\fR(1),
-\fBsrun\fR(1),\fBsshare\fR(1), \fBsstate\fR(1), \fBstrigger\fR(1),
+\fBsrun\fR(1), \fBsshare\fR(1), \fBsstate\fR(1), \fBstrigger\fR(1),
 \fBsview\fR(1),
 \fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBslurmdbd.conf\fR(5),
 \fBwiki.conf\fR(5),
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 34ee3c346..1d77b4452 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -473,6 +473,9 @@ Job terminated due to failure of one or more allocated nodes.
 \fBPD  PENDING\fR
 Job is awaiting resource allocation.
 .TP
+\fBPR  PREEMPTED\fR
+Job terminated due to preemption.
+.TP
 \fBR   RUNNING\fR
 Job currently has an allocation.
 .TP
@@ -496,7 +499,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -510,9 +513,10 @@ details.
 
 .SH "SEE ALSO"
 \fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1),
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3),
-\fBslurm_load_partitions\fR(3),
-\fBslurm_reconfigure\fR(3), \fBslurm_shutdown\fR(3),
-\fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3),
-\fBslurm_update_partition\fR(3),
+\fBslurm_load_ctl_conf\fR (3), \fBslurm_load_jobs\fR (3),
+\fBslurm_load_node\fR (3),
+\fBslurm_load_partitions\fR (3),
+\fBslurm_reconfigure\fR (3), \fBslurm_shutdown\fR (3),
+\fBslurm_update_job\fR (3), \fBslurm_update_node\fR (3),
+\fBslurm_update_partition\fR (3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/sprio.1 b/doc/man/man1/sprio.1
index f776e1f54..a0d8532fa 100644
--- a/doc/man/man1/sprio.1
+++ b/doc/man/man1/sprio.1
@@ -1,4 +1,4 @@
-.TH SPRIO "1" "March 2009" "sprio 2.0" "SLURM commands"
+.TH SPRIO "1" "May 2011" "sprio 2.3" "SLURM commands"
 
 .SH "NAME"
 sprio \- view the factors that comprise a job's scheduling priority
@@ -34,7 +34,7 @@ Report more of the available information for the selected jobs.
 
 .TP
 \fB\-M\fR, \fB\-\-clusters\fR=<\fIstring\fR>
-Clusters to issue commands to.
+The cluster to issue commands to. Only one cluster name may be specified.
 
 .TP
 \fB\-n\fR, \fB\-\-norm\fR
@@ -141,6 +141,14 @@ Print version information and exit.
 factor.  This is for information purposes only.  Actual job data is
 suppressed.
 
+.SH "ENVIRONMENT VARIABLES"
+.PP
+If no corresponding commandline option is specified, \fBsprio\fR will use the value of
+the following environment variables.
+.TP 20
+\fBSLURM_CLUSTERS\fR
+Same as \fB\-\-clusters\fR
+
 .SH "EXAMPLES"
 .eo
 Print the list of all pending jobs with their weighted priorities
@@ -210,7 +218,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index 19d48e518..b2382f87e 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -1,4 +1,4 @@
-.TH SQUEUE "1" "September 2010" "squeue 2.2" "Slurm components"
+.TH SQUEUE "1" "June 2011" "squeue 2.3" "Slurm components"
 
 .SH "NAME"
 squeue \- view information about jobs located in the SLURM scheduling queue.
@@ -119,6 +119,15 @@ This reports the value of the \fBsrun \-\-ntasks\fR option.
 Generic resources (gres) required by the job or step.
 (Valid for jobs and job steps)
 .TP
+\fB%B\fR
+Executing (batch) host. For an allocated session, this is the host on which
+the session is executing (i.e. the node from which the the \fBsrun\fR or the
+\fBsalloc\fR command was executed). For a batch job, this is the node executing
+the batch script. In the case of a typical Linux cluster, this would be the
+compute node zero of the allocation. In the case of a BlueGene or a Cray
+system, this would be the front\-end host whose slurmd daemon executes the job
+script.
+.TP
 \fB%c\fR
 Minimum number of CPUs (processors) per node requested by the job.
 This reports the value of the \fBsrun \-\-mincpus\fR option with a
@@ -295,7 +304,7 @@ See the \fBJOB STATE CODES\fR section below for more information.
 \fB%T\fR
 Job state, extended form:
 PENDING, RUNNING, SUSPENDED, CANCELLED, COMPLETING, COMPLETED, CONFIGURING,
-FAILED, TIMEOUT, and NODE_FAIL.
+FAILED, TIMEOUT, PREEMPTED, and NODE_FAIL.
 See the \fBJOB STATE CODES\fR section below for more information.
 (Valid for jobs only)
 .TP
@@ -311,6 +320,14 @@ User ID for a job or job step.
 Reservation for the job.
 (Valid for jobs only)
 .TP
+\fB%w\fR
+Workload Characterization Key (wckey).
+(Valid for jobs only)
+.TP
+\fB%W\fR
+Licenses reserved for the job.
+(Valid for jobs only)
+.TP
 \fB%x\fR
 List of node names explicitly excluded by the job.
 (Valid for jobs only)
@@ -331,6 +348,10 @@ list of partition names.
 Specify the qos(s) of the jobs or steps to view. Accepts a comma
 separated list of qos's.
 
+.TP
+\fB\-R\fR, \fB\-\-reservation\fR=\fIreservation_name\fR
+Specify the reservation of the jobs to view. 
+
 .TP
 \fB\-s\fR, \fB\-\-steps\fR
 Specify the job steps to view.  This flag indicates that a comma separated list
@@ -373,8 +394,8 @@ reported. If no state is specified then pending, running, and completing
 jobs are reported. Valid states (in both extended and compact form) include:
 PENDING (PD), RUNNING (R), SUSPENDED (S),
 COMPLETING (CG), COMPLETED (CD), CONFIGURING (CF), CANCELLED (CA),
-FAILED (F), TIMEOUT (TO), and NODE_FAIL (NF). Note the \fB<state_list>\fR
-supplied is case insensitve ("pd" and "PD" work the same).
+FAILED (F), TIMEOUT (TO), PREEMPTED (PR) and NODE_FAIL (NF). Note the
+\fB<state_list>\fR supplied is case insensitve ("pd" and "PD" work the same).
 See the \fBJOB STATE CODES\fR section below for more information.
 
 .TP
@@ -473,6 +494,9 @@ Job terminated due to failure of one or more allocated nodes.
 \fBPD  PENDING\fR
 Job is awaiting resource allocation.
 .TP
+\fBPR  PREEMPTED\fR
+Job terminated due to preemption.
+.TP
 \fBR   RUNNING\fR
 Job currently has an allocation.
 .TP
@@ -494,6 +518,18 @@ Same as \fB\-\-clusters\fR
 \fBSLURM_CONF\fR
 The location of the SLURM configuration file.
 .TP
+\fBSLURM_TIME_FORMAT\fR
+Specify the format used to report time stamps. A value of \fIstandard\fR, the
+default value, generates output in the form "year-month-dateThour:minute:second".
+A value of \fIrelative\fR returns only "hour:minute:second" if the current day.
+For other dates in the current year it prints the "hour:minute" preceded by
+"Tomorr" (tomorrow), "Ystday" (yesterday), the name of the day for the coming
+week (e.g. "Mon", "Tue", etc.), otherwise the date (e.g. "25 Apr").
+For other years it returns a date month and year without a time (e.g.
+"6 Jun 2012").
+Another suggested value is "%a %T" for a day of week and time stamp (e.g.
+"Mon 12:34:56"). All of the time stamps use a 24 hour format.
+.TP
 \fBSQUEUE_ACCOUNT\fR
 \fB\-A <account_list>, \-\-account=<account_list>\fR
 .TP
@@ -580,7 +616,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -594,6 +630,6 @@ details.
 .SH "SEE ALSO"
 \fBscancel\fR(1), \fBscontrol\fR(1), \fBsinfo\fR(1),
 \fBsmap\fR(1), \fBsrun\fR(1),
-\fBslurm_load_ctl_conf\fR(3), \fBslurm_load_jobs\fR(3),
-\fBslurm_load_node\fR(3),
-\fBslurm_load_partitions\fR(3)
+\fBslurm_load_ctl_conf\fR (3), \fBslurm_load_jobs\fR (3),
+\fBslurm_load_node\fR (3),
+\fBslurm_load_partitions\fR (3)
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index aa0ba34b6..87ec15709 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -432,7 +432,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index 8187ac65d..abf7b5311 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -1,4 +1,4 @@
-.TH "srun" "1" "SLURM 2.2" "November 2010" "SLURM Commands"
+.TH "srun" "1" "SLURM 2.3" "August 2011" "SLURM Commands"
 
 .SH "NAME"
 srun \- Run parallel jobs
@@ -10,6 +10,11 @@ srun \- Run parallel jobs
 Run a parallel job on cluster managed by SLURM.  If necessary, srun will
 first create a resource allocation in which to run the parallel job.
 
+The following document describes the the influence of various options on the 
+allocation of cpus to jobs and tasks. 
+.br
+http://www.schedmd.com/slurmdocs/cpu_management.html
+
 .SH "OPTIONS"
 .LP
 
@@ -180,7 +185,7 @@ is in use:
         SLURM_CPU_BIND_LIST
 .fi
 
-See the \fBENVIRONMENT VARIABLE\fR section for a more detailed description
+See the \fBENVIRONMENT VARIABLES\fR section for a more detailed description
 of the individual SLURM_CPU_BIND* variables.
 
 When using \fB\-\-cpus\-per\-task\fR to run multithreaded tasks, be aware that
@@ -338,6 +343,11 @@ in some failed state (non-zero exit code, node failure, timed out, etc).
 This job can begin execution after the specified jobs have successfully
 executed (ran to completion with an exit code of zero).
 .TP
+\fBexpand:job_id\fR
+Resources allocated to this job should be used to expand the specified job.
+The job to expand must share the same QOS (Quality of Service) and partition.
+Gang scheduling of resources in the partition is also not supported.
+.TP
 \fBsingleton\fR
 This job can begin execution after any previously launched jobs
 sharing the same job name and user have terminated.
@@ -376,12 +386,13 @@ parameter in slurm.conf.
 .TP
 \fB\-\-exclusive\fR
 When used to initiate a job, the job allocation cannot share nodes with
-other running jobs.  This is the oposite of \-\-share, whichever option
-is seen last on the command line will win.  (The default shared/exclusive
-behavior depends on system configuration.)
+other running jobs.  This is the opposite of \-\-share, whichever option
+is seen last on the command line will win. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
 
-This option can also be used when initiating more than job step within
-an existing resource allocation and you want separate processors to
+This option can also be used when initiating more than one job step within
+an existing resource allocation, where you want separate processors to
 be dedicated to each job step. If sufficient processors are not
 available to initiate the job step, it will be deferred. This can
 be thought of as providing resource management for the job within
@@ -412,13 +423,18 @@ The available generic consumable resources is configurable by the system
 administrator.
 A list of available generic consumable resources will be printed and the
 command will exit if the option argument is "help".
-Examples of use include "\-\-gres=gpus:2*cpu,disk=40G" and "\-\-gres=help".
+Examples of use include "\-\-gres=gpu:2*cpu,disk=40G" and "\-\-gres=help".
+NOTE: By default, a job step is allocated all of the generic resources that
+have allocated to the job. To change the behavior so that each job step is
+allocated no generic resources, explicitly set the value of \-\-gres to specify
+zero counts for each generic resource OR set "\-\-gres=none" OR set the
+SLURM_STEP_GRES environment variable to "none".
 
 .TP
 \fB\-H, \-\-hold\fR
 Specify the job is to be submitted in a held state (priority of zero).
 A held job can now be released using scontrol to reset its priority
-(e.g. "\fIscontrol update jobid=<id> priority=1\fR".
+(e.g. "\fIscontrol release <job_id>\fR").
 
 .TP
 \fB\-h\fR, \fB\-\-help\fR
@@ -480,11 +496,15 @@ Using this option will cause \fBsrun\fR to behave exactly as if the
 SLURM_JOB_ID environment variable was set.
 
 .TP
-\fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR
-Immediately terminate a job if any task exits with a non\-zero exit code.
-Note: The \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR option takes precedence
-over \fB\-W\fR, \fB\-\-wait\fR to terminate the job immediately if a task
-exits with a non\-zero exit code.
+\fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR[=0|1]
+Controls whether or not to terminate a job if any task exits with a non\-zero
+exit code. If this option is not specified, the default action will be based
+upon the SLURM configuration parameter of \fBKillOnBadExit\fR. If this option
+is specified, it will take precedence over \fBKillOnBadExit\fR. An option
+argument of zero will not terminate the job. A non\-zero argument or no
+argument will terminate the job.
+Note: This option takes precedence over the \fB\-W\fR, \fB\-\-wait\fR option
+to terminate the job immediately if a task exits with a non\-zero exit code.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
@@ -528,7 +548,8 @@ method (before the ":") controls the distribution of resources across
 nodes. The optional second distribution method (after the ":")
 controls the distribution of resources across sockets within a node.
 Note that with select/cons_res, the number of cpus allocated on each
-socket and node may be different. Refer to the mc_support.html document
+socket and node may be different. Refer to
+http://www.schedmd.com/slurmdocs/mc_support.html
 for more information on resource allocation, assignment of tasks to
 nodes, and binding of tasks to CPUs.
 .RS
@@ -565,11 +586,11 @@ followed by an optional specification of the task distribution scheme
 within a block of tasks and between the blocks of tasks.  For more
 details (including examples and diagrams), please see
 .br
-https://computing.llnl.gov/linux/slurm/mc_support.html
+http://www.schedmd.com/slurmdocs/mc_support.html
 .br
 and
 .br
-https://computing.llnl.gov/linux/slurm/dist_plane.html.
+http://www.schedmd.com/slurmdocs/dist_plane.html
 .TP
 .B arbitrary
 The arbitrary method of distribution will allocate processes in\-order
@@ -754,11 +775,9 @@ below for details on the configuration file contents.
 .TP
 \fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
 Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
-The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
-A limit on the maximum node count may be specified with \fImaxnodes\fR
-(e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
-same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
-for two and ONLY two nodes).
+A maximum node count may also be specified with \fImaxnodes\fR.
+If only one number is specified, this is used as both the minimum and
+maximum node count.
 The partition's node limits supersede those of the job.
 If a job's node limits are outside of the range permitted for its
 associated partition, the job will be left in a PENDING state.
@@ -768,12 +787,15 @@ If a job node limit exceeds the number of nodes configured in the
 partition, the job will be rejected.
 Note that the environment
 variable \fBSLURM_NNODES\fR will be set to the count of nodes actually
-allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section
+allocated to the job. See the \fBENVIRONMENT VARIABLES\fR section
 for more information.  If \fB\-N\fR is not specified, the default
 behavior is to allocate enough nodes to satisfy the requirements of
 the \fB\-n\fR and \fB\-c\fR options.
 The job will be allocated as many nodes as possible within the range specified
 and without delaying the initiation of the job.
+The node count specification may include a numeric value followed by a suffix
+of "k" (multiplies numeric value by 1,024) or "m" (multiplies numeric value by
+1,048,576).
 
 .TP
 \fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
@@ -820,18 +842,6 @@ NOTE: This option is not supported unless
 \fISelectTypeParameters=CR_Core\fR or
 \fISelectTypeParameters=CR_Core_Memory\fR is configured.
 
-.TP
-\fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
-Request the maximum \fIntasks\fR be invoked on each socket.
-Meant to be used with the \fB\-\-ntasks\fR option.
-Related to \fB\-\-ntasks\-per\-node\fR except at the socket level
-instead of the node level.  Masks will automatically be generated
-to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
-is specified.
-NOTE: This option is not supported unless
-\fISelectTypeParameters=CR_Socket\fR or
-\fISelectTypeParameters=CR_Socket_Memory\fR is configured.
-
 .TP
 \fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
 Request the maximum \fIntasks\fR be invoked on each node.
@@ -847,6 +857,18 @@ all of the parallelism present in the node, or submitting a single
 setup/cleanup/monitoring job to each node of a pre\-existing
 allocation as one step in a larger job script.
 
+.TP
+\fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
+Request the maximum \fIntasks\fR be invoked on each socket.
+Meant to be used with the \fB\-\-ntasks\fR option.
+Related to \fB\-\-ntasks\-per\-node\fR except at the socket level
+instead of the node level.  Masks will automatically be generated
+to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
+is specified.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Socket\fR or
+\fISelectTypeParameters=CR_Socket_Memory\fR is configured.
+
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
 Overcommit resources. Normally, \fBsrun\fR
@@ -880,7 +902,7 @@ The default value is specified by the system configuration parameter
 .TP
 \fB\-p\fR, \fB\-\-partition\fR=<\fIpartition_names\fR>
 Request a specific partition for the resource allocation.  If not specified,
-the default behaviour is to allow the slurm controller to select the default
+the default behavior is to allow the slurm controller to select the default
 partition as designated by the system administrator. If the job can use more
 than one partition, specify their names in a comma separate list and the one
 offering earliest initiation will be used.
@@ -907,7 +929,7 @@ options may not be supported on some systems):
 All limits listed below
 .TP
 \fBAS\fR
-The maximum address space for a processes
+The maximum address space for a process
 .TP
 \fBCORE\fR
 The maximum size of core file
@@ -919,7 +941,9 @@ The maximum amount of CPU time
 The maximum size of a process's data segment
 .TP
 \fBFSIZE\fR
-The maximum size of files created
+The maximum size of files created. Note that if the user sets FSIZE to less
+than the current size of the slurmd.log, job launches will fail with 
+a 'File size limit exceeded' error.
 .TP
 \fBMEMLOCK\fR
 The maximum size that may be locked into memory
@@ -939,7 +963,7 @@ The maximum stack size
 
 .TP
 \fB\-\-pty\fR
-Execute task zero in pseudo terminal.
+Execute task zero in pseudo terminal mode.
 Implicitly sets \fB\-\-unbuffered\fR.
 Implicitly sets \fB\-\-error\fR and \fB\-\-output\fR to /dev/null
 for all tasks except task zero, which may cause those tasks to 
@@ -996,8 +1020,14 @@ be read (used by the checkpoint/blcrm and checkpoint/xlch plugins only).
 
 .TP
 \fB\-s\fR, \fB\-\-share\fR
-The job can share nodes with other running jobs. This may result in faster job
-initiation and higher system utilization, but lower application performance.
+The job allocation can share nodes with other running jobs.
+This is the opposite of \-\-exclusive, whichever option is seen last
+on the command line will be used. The default shared/exclusive
+behavior depends on system configuration and the partition's \fBShared\fR
+option takes precedence over the job's option.
+This option may result the allocation being granted sooner than if the \-\-share
+option was not set and allow higher system utilization, but application
+performance will likely suffer due to competition for resources within a node.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
@@ -1024,6 +1054,18 @@ Restrict node selection to nodes with at least the specified number of
 sockets.  See additional information under \fB\-B\fR option above when
 task/affinity plugin is enabled.
 
+.TP
+\fB\-\-switch\fR=<\fIcount\fR>[@<\fImax\-time\fR>]
+When a tree topology is used, this defines the maximum count of switches
+desired for the job allocation and optionally the maximum time to wait
+for that number of switches. If SLURM finds an allocation containing more
+switches than the count specified, the job remain pending until it either finds
+an allocation with desired switch count or the time limit expires. By default
+there is no switch count limit and there is no delay in starting the job.
+The job's maximum time delay may be limited by the system administrator using
+the \fBSchedulerParameters\fR configuration parameter with the
+\fBmax_switch_wait\fR parameter option.
+
 .TP
 \fB\-T\fR, \fB\-\-threads\fR=<\fInthreads\fR>
 Allows limiting the number of concurrent threads used to
@@ -1078,6 +1120,9 @@ Returns an estimate of when a job would be scheduled to run given the
 current job queue and all the other \fBsrun\fR arguments specifying
 the job.  This limits \fBsrun's\fR behavior to just return
 information; no job is actually submitted.
+EXCEPTION: On Bluegene/Q systems on when running within an existing job
+allocation, this disables the use of "runjob" to launch tasks. The program
+will be executed directly by the slurmd dameon.
 
 .TP
 \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
@@ -1204,6 +1249,7 @@ SLURM will normally allocate a TORUS if possible for a given geometry.
 If running on a BGP system and wanting to run in HTC mode (only for 1
 midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
+A comma separated lists of connection types may be specified, one for each dimension.
 
 .TP
 \fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
@@ -1486,6 +1532,9 @@ Also see \fBSLURM_EXIT_ERROR\fR.
 \fBSLURM_GEOMETRY\fR
 Same as \fB\-g, \-\-geometry\fR
 .TP
+\fBSLURM_GRES\fR
+Same as \fB\-\-gres\fR. Also see \fBSLURM_STEP_GRES\fR
+.TP
 \fBSLURM_JOB_NAME\fR
 Same as \fB\-J, \-\-job\-name\fR except within an existing
 allocation, in which case it is ignored to avoid using the batch job's name
@@ -1497,6 +1546,12 @@ Same as \fB\-l, \-\-label\fR
 \fBSLURM_MEM_BIND\fR
 Same as \fB\-\-mem_bind\fR
 .TP
+\fBSLURM_MEM_PER_CPU\fR
+Same as \fB\-\-mem\-per\-cpu\fR
+.TP
+\fBSLURM_MEM_PER_NODE\fR
+Same as \fB\-\-mem\fR
+.TP
 \fBSLURM_MPI_TYPE\fR
 Same as \fB\-\-mpi\fR
 .TP
@@ -1559,6 +1614,10 @@ Same as \fB\-e, \-\-error\fR
 \fBSLURM_STDINMODE\fR
 Same as \fB\-i, \-\-input\fR
 .TP
+\fBSLURM_STEP_GRES\fR
+Same as \fB\-\-gres\fR (only applies to job steps, not to job allocations).
+Also see \fBSLURM_GRES\fR
+.TP
 \fBSLURM_STDOUTMODE\fR
 Same as \fB\-o, \-\-output\fR
 .TP
@@ -1593,10 +1652,6 @@ of the executing tasks on the remote compute nodes.
 These environment variables are:
 
 .TP 22
-\fBBASIL_RESERVATION_ID\fR
-The reservation ID on Cray systems running ALPS/BASIL only.
-
-.TP
 \fBSLURM_CHECKPOINT_IMAGE_DIR\fR
 Directory into which checkpoint images should be written
 if specified on the execute line.
@@ -1630,6 +1685,12 @@ Set to value of the \-\-dependency option.
 \fBSLURM_JOB_ID\fR (and \fBSLURM_JOBID\fR for backwards compatibility)
 Job id of the executing job
 
+.TP
+\fBSLURM_JOB_NAME\fR
+Set to the value of the \-\-job\-name option or the command name when srun
+is used to create a new job allocation. Not set when srun is used only to
+create a job step (i.e. within an existing job allocation).
+
 .TP
 \fBSLURM_LAUNCH_NODE_IPADDR\fR
 IP address of the node from which the task launch was
@@ -1746,7 +1807,7 @@ These tasks initiated outside of SLURM's monitoring
 or control. SLURM's epilog should be configured to purge
 these tasks when the job's allocation is relinquished.
 
-See \fIhttps://computing.llnl.gov/linux/slurm/mpi_guide.html\fR
+See \fIhttp://www.schedmd.com/slurmdocs/mpi_guide.html\fR
 for more information on use of these various MPI implementation
 with SLURM.
 
@@ -1952,7 +2013,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -1967,5 +2028,5 @@ details.
 .SH "SEE ALSO"
 \fBsalloc\fR(1), \fBsattach\fR(1), \fBsbatch\fR(1), \fBsbcast\fR(1),
 \fBscancel\fR(1), \fBscontrol\fR(1), \fBsqueue\fR(1), \fBslurm.conf\fR(5),
-\fBsched_setaffinity\fR(2), \fBnuma\fR(3)
-\fBgetrlimit\fR(2),
+\fBsched_setaffinity\fR (2), \fBnuma\fR (3)
+\fBgetrlimit\fR (2)
diff --git a/doc/man/man1/srun_cr.1 b/doc/man/man1/srun_cr.1
index 11378aa75..77322298b 100644
--- a/doc/man/man1/srun_cr.1
+++ b/doc/man/man1/srun_cr.1
@@ -57,7 +57,7 @@ Produced at National University of Defense Technology, China (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
index a3a127dcd..bc90ff7c0 100644
--- a/doc/man/man1/sshare.1
+++ b/doc/man/man1/sshare.1
@@ -122,7 +122,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -135,5 +135,5 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBslurm.conf\fR(5)
+\fBslurm.conf\fR(5),
 \fBslurmdbd\fR(8)
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index 4231bb9ff..7978968d9 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -1,4 +1,4 @@
-.TH SSTAT "1" "January 2009" "sstat 2.0" "Slurm components"
+.TH SSTAT "1" "August 2011" "sstat 2.3" "Slurm components"
 
 .SH "NAME"
 sstat \- Display various status information
@@ -183,11 +183,11 @@ Total number of tasks in a job or step.
 .SH "EXAMPLES"
 
 .TP
-\f3sstat \-\-format=AveCPU,AvePages,AveRSS,AveVSize,JobID \-j 11\fP
+\f3sstat \-\-format=AveCPU,AvePages,AveRSS,AveVMSize,JobID \-j 11\fP
 25:02.000  0K         1.37M      5.93M      9.0
 
 .TP
-\f3sstat \-p \-\-format=AveCPU,AvePages,AveRSS,AveVSize,JobID \-j 11\fP
+\f3sstat \-p \-\-format=AveCPU,AvePages,AveRSS,AveVMSize,JobID \-j 11\fP
 25:02.000|0K|1.37M|5.93M|9.0|
 
 .SH "COPYING"
@@ -196,7 +196,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1
index 905764458..1949d3e16 100644
--- a/doc/man/man1/strigger.1
+++ b/doc/man/man1/strigger.1
@@ -1,4 +1,4 @@
-.TH STRIGGER "1" "September 2010" "strigger 2.2" "Slurm components"
+.TH STRIGGER "1" "December 2010" "strigger 2.3" "Slurm components"
 
 .SH "NAME"
 strigger \- Used set, get or clear Slurm trigger information.
@@ -56,14 +56,14 @@ Trigger an event when the primary slurmctld resuming operation after failure.
 \fB\-b\fR, \fB\-\-primary_slurmctld_resumed_control\fR
 Trigger an event when primary slurmctld resumes control.
 
-.TP
-\fB\-B\fR, \fB\-\-backup_slurmctld_failure\fR
-Trigger an event when the backup slurmctld fails.
-
 .TP
 \fB\-\-block_err\fP
 Trigger an event when a BlueGene block enters an ERROR state.
 
+.TP
+\fB\-B\fR, \fB\-\-backup_slurmctld_failure\fR
+Trigger an event when the backup slurmctld fails.
+
 .TP
 \fB\-c\fR, \fB\-\-backup_slurmctld_resumed_operation\fR
 Trigger an event when the backup slurmctld resumes operation after failure.
@@ -83,7 +83,6 @@ be cleared.
 \fB\-d\fR, \fB\-\-down\fR
 Trigger an event if the specified node goes into a DOWN state.
 
-
 .TP
 \fB\-D\fR, \fB\-\-drained\fR
 Trigger an event if the specified node goes into a DRAINED state.
@@ -100,6 +99,13 @@ Trigger an event if the specified node goes into a FAILING state.
 \fB\-f\fR, \fB\-\-fini\fR
 Trigger an event when the specified job completes execution.
 
+.TP
+\fB\-\-front_end\fR
+Trigger events based upon changes in state of front end nodes rather than
+compute nodes. Applies to BlueGene and Cray architectures only, where the
+slurmd daemon executes on front end nodes rather than the compute nodes.
+Use this option with either the \fB\-\-up\fR or \fB\-\-down\fR option.
+
 .TP
 \fB\-g\fR, \fB\-\-primary_slurmdbd_failure\fR
 Trigger an event when the primary slurmdbd fails.
@@ -341,7 +347,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index 905dc4318..cf6d408fb 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -1,4 +1,4 @@
-.TH "sview" "1" "SLURM 2.0" "July 2009" "SLURM Commands"
+.TH "sview" "1" "SLURM 2.3" "February 2011" "SLURM Commands"
 .SH "NAME"
 .LP
 sview \- graphical user interface to view and modify SLURM state.
@@ -39,23 +39,18 @@ The sview command can only be build if \fIgtk+\-2.0\fR is installed.
 Systems lacking these libraries will have SLURM installed without
 the sview command.
 
-On larger systems (2000+ nodes) some gtk themes can considerably slow down
-the grid display.  If you think this is happening you may
-try defining SVIEW_GRID_SPEEDUP=1 in your environment.  This will use
-a code path to try to avoid functions that typically take a
-relatively large amount of time.  THIS OPTION DOESN'T WORK FOR EVERY
-GTK THEME, but if it does work for your theme this provides an
-outrageous amount of speedup.  We have found it to work very well with
-QT based themes.
+At least some gtk themes are unable to display large numbers of lines (jobs,
+nodes, etc). The information is still in gtk's internal data structures, but
+not visible by scrolling down the window.
 
 .SH "COPYING"
 Copyright (C) 2006\-2007 The Regents of the University of California.
-Copyright (C) 2008\-2009 Lawrence Livermore National Security.
+Copyright (C) 2008\-2011 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -69,5 +64,5 @@ details.
 
 .SH "SEE ALSO"
 .LP
-sinfo(1), squeue(1), scontrol(1), slurm.conf(5),
-sched_setaffinity(2), numa(3)
+\fBsinfo\fR(1), \fBsqueue\fR(1), \fBscontrol\fR(1), \fBslurm.conf\fR(5),
+\fBsched_setaffinity\fR (2), \fBnuma\fR (3)
diff --git a/doc/man/man2html.py b/doc/man/man2html.py
new file mode 100755
index 000000000..a7dda8dc4
--- /dev/null
+++ b/doc/man/man2html.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os
+include_pat = r'(<!--\s*#include\s*virtual\s*=\s*"([^"]+)"\s*-->)'
+include_regex = re.compile(include_pat)
+
+url_pat = r'(\s+href\s*=\s*")([^"#]+)(#[^"]+)?(")'
+url_regex = re.compile(url_pat)
+
+dirname = ''
+
+# Instert tags for options
+#   Two styles are processed.
+#       <DT><B>pppppp</B><DD>
+#           has tag <a id="OPT_pppppp"></a>
+#       <DT><B>--pppppp</B> or <DT><B>-P</B>, <B>--pppppp</B>
+#           has tag <a id="OPT_pppppp"></a>
+#   <H2>hhhh</h2> also has tag has tag <a id="SECTION_hhhh"></a> inserted
+def insert_tag(html, lineIn):
+    if lineIn[0:4] == "<H2>":
+        posEnd = lineIn.find("</H2>")
+        if posEnd != -1:
+            html.write('<a id="SECTION_' + lineIn[4:posEnd] + '"></a>\n')
+            return
+
+    if lineIn[0:7] != "<DT><B>":
+        return
+    posBgn = lineIn.find("--")
+    if posBgn == -1:
+        # 1st form
+        posBgn = 5
+    posBgn = posBgn + 2
+    posEnd = lineIn.find("</B>",posBgn)
+    if posEnd == -1:
+        # poorly constructed
+        return
+    html.write('<a id="OPT_' + lineIn[posBgn:posEnd] + '"></a>\n')
+    return
+
+
+def llnl_references(line):
+        manStr = "Refer to mc_support.html"
+        htmlStr = 'Refer to <a href="mc_support.html">mc_support</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/mc_support.html">http://www.schedmd.com/slurmdocs/mc_support.html</A>'
+        htmlStr = 'the <a href="mc_support.html">mc_support</a> document'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/dist_plane.html.">http://www.schedmd.com/slurmdocs/dist_plane.html.</A>'
+        htmlStr = 'the <a href="dist_plane.html">dist_plane</a> document'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '&lt;<A HREF="http://www.schedmd.com/slurmdocs/mpi_guide.html">http://www.schedmd.com/slurmdocs/mpi_guide.html</A>&gt;'
+        htmlStr = '<a href="mpi_guide.html">mpi_guide</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '(<A HREF="http://www.schedmd.com/slurmdocs/power_save.html).">http://www.schedmd.com/slurmdocs/power_save.html).</A>'
+        htmlStr = '<a href="power_save.html">power_save</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/cons_res.html">http://www.schedmd.com/slurmdocs/cons_res.html</A>'
+        htmlStr = '<a href="cons_res.html">cons_res</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/cons_res_share.html">http://www.schedmd.com/slurmdocs/cons_res_share.html</A>'
+        htmlStr = '<a href="cons_res_share.html">cons_res_share</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/gang_scheduling.html">http://www.schedmd.com/slurmdocs/gang_scheduling.html</A>'
+        htmlStr = '<a href="gang_scheduling.html">gang_scheduling</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        manStr = '<A HREF="http://www.schedmd.com/slurmdocs/preempt.html">http://www.schedmd.com/slurmdocs/preempt.html</A>'
+        htmlStr = '<a href="preempt.html">preempt</a>'
+        lineFix = line.replace(manStr,htmlStr)
+        if lineFix != line:
+            return lineFix
+        return line
+
+def relative_reference(lineIn):
+    fullRef = "/cgi-bin/man/man2html"
+    lenRef = len(fullRef)
+    refAnchor="<A HREF=";
+    lenRefAnchor = len(refAnchor)
+    lineOt = ""
+    cursor = 0
+
+    posHREF = lineIn.find(fullRef,cursor)
+    if posHREF == -1:
+        return lineIn
+    if lineIn[posHREF+lenRef] != "?":
+        pos = lineIn.find("Return to Main Contents",cursor)
+        if pos != -1:
+            return ""
+        return "<i>man2html</i> "
+    while posHREF != -1:
+        posRefAnchor = lineIn.find(refAnchor,cursor)
+        lineOt = lineOt + lineIn[cursor:posRefAnchor+lenRefAnchor]
+        cursor = posHREF + lenRef + 3
+        lineOt = lineOt + '"'
+        posQuote = lineIn.find('"',cursor)
+        lineOt = lineOt + lineIn[cursor:posQuote] + ".html"
+        cursor = posQuote
+        posHREF = lineIn.find(fullRef,cursor)
+    lineOt = lineOt + lineIn[cursor:]
+    return lineOt
+
+
+def include_virtual(matchobj):
+    global dirname
+    if dirname:
+        filename = dirname + '/' + matchobj.group(2)
+    else:
+        filename = matchobj.group(2)
+
+    if os.access(filename, os.F_OK):
+        #print 'Including file', filename
+        lines = file(filename, 'r').read()
+        return lines
+    else:
+        return matchobj.group(0)
+
+def url_rewrite(matchobj):
+    global dirname
+    if dirname:
+        localpath = dirname + '/' + matchobj.group(2)
+    else:
+        localpath = matchobj.group(2)
+
+    if matchobj.group(2)[-6:] == '.shtml' and os.access(localpath, os.F_OK):
+        location = matchobj.group(2)
+        if matchobj.group(3) is None:
+            newname = location[:-6] + '.html'
+        else:
+            newname = location[:-6] + '.html' + matchobj.group(3)
+        #print 'Rewriting', location, 'to', newname
+        return matchobj.group(1) + newname + matchobj.group(4)
+    else:
+        return matchobj.group(0)
+
+files = []
+for f in sys.argv[3:]:
+    posLastDot = f.rfind(".")
+    mhtmlname = f[:posLastDot] + ".mhtml"
+    cmd = "man2html " + f + "> " + mhtmlname
+    os.system(cmd)
+    print ">>>>>>> " + mhtmlname
+    files.append(mhtmlname)
+
+for filename in files:
+    dirname, basefilename = os.path.split(filename)
+#    newfilename = basefilename[:-6] + '.html'
+    newfilename = filename[:-6] + '.html'
+    print 'Converting', filename, '->', newfilename
+    shtml = file(filename, 'r')
+    html = file(newfilename, 'w')
+
+    lines = file(sys.argv[1], 'r').read()
+    lines = lines.replace(".shtml",".html")
+    html.write(lines)
+#    html.write(<!--#include virtual="header.txt"-->)
+    for line in shtml.readlines():
+        # Remove html header/footer created by man2html
+        if line == "Content-type: text/html\n":
+            continue
+        if line[:6] == "<HTML>":
+            continue
+        if line[:7] == "</HEAD>":
+            continue
+        if line[:7] == "</HTML>":
+            continue
+        if line[:7] == "</BODY>":
+            continue
+        line = include_regex.sub(include_virtual, line)
+        # Special case some html references
+        line = llnl_references(line)
+        #insert tags for some options
+        insert_tag(html, line)
+        # Make man2html links relative ones
+        line = relative_reference(line)
+
+        line = url_regex.sub(url_rewrite, line)
+        html.write(line)
+    lines = file(sys.argv[2], 'r').read()
+    html.write(lines)
+#    html.write(<!--#include virtual="footer.txt"-->)
+    html.close()
+    shtml.close()
+    os.remove(filename)
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index f7ec6db6f..886b8beaf 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -421,7 +421,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_checkpoint_error.3 b/doc/man/man3/slurm_checkpoint_error.3
index 6c8c01e1a..2fe18996c 100644
--- a/doc/man/man3/slurm_checkpoint_error.3
+++ b/doc/man/man3/slurm_checkpoint_error.3
@@ -263,7 +263,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_clear_trigger.3 b/doc/man/man3/slurm_clear_trigger.3
index 92435e359..9731064c1 100644
--- a/doc/man/man3/slurm_clear_trigger.3
+++ b/doc/man/man3/slurm_clear_trigger.3
@@ -94,7 +94,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_complete_job.3 b/doc/man/man3/slurm_complete_job.3
index 611cd1281..4f96ddb95 100644
--- a/doc/man/man3/slurm_complete_job.3
+++ b/doc/man/man3/slurm_complete_job.3
@@ -59,7 +59,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_free_ctl_conf.3 b/doc/man/man3/slurm_free_ctl_conf.3
index 6d1ec7acb..d1c198a59 100644
--- a/doc/man/man3/slurm_free_ctl_conf.3
+++ b/doc/man/man3/slurm_free_ctl_conf.3
@@ -82,6 +82,8 @@ SLURM controller.
 .LP
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -92,7 +94,7 @@ int main (int argc, char *argv[])
 .br
 	slurm_ctl_conf_t * conf_info_msg_ptr = NULL;
 .br
-	long version = slurm_api_version;
+	long version = slurm_api_version();
 .LP
 	/* We can use the SLURM version number to determine how
 .br
@@ -128,11 +130,11 @@ int main (int argc, char *argv[])
 .br
 	printf ("control_machine = %s\\n",
 .br
-	        slurm_ctl_conf_ptr\->control_machine);
+	        conf_info_msg_ptr\->control_machine);
 .br
-	printf ("server_timeout = %u\\n",
+	printf ("first_job_id = %u\\n",
 .br
-	        slurm_ctl_conf_ptr\->server_timeout);
+	        conf_info_msg_ptr\->first_job_id);
 .LP
 	slurm_free_ctl_conf (conf_info_msg_ptr);
 .br
@@ -151,7 +153,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_free_front_end_info_msg.3 b/doc/man/man3/slurm_free_front_end_info_msg.3
new file mode 100644
index 000000000..6c1132942
--- /dev/null
+++ b/doc/man/man3/slurm_free_front_end_info_msg.3
@@ -0,0 +1,214 @@
+.TH "Slurm API" "3" "December 2010" "Morris Jette" "Slurm front end node informational calls"
+
+.SH "NAME"
+slurm_free_front_end_info_msg, slurm_load_front_end,
+slurm_print_front_end_info_msg,
+slurm_print_front_end_table, slurm_sprint_front_end_table
+\- Slurm front end node information reporting functions
+
+.SH "SYNTAX"
+.LP
+#include <stdio.h>
+.br
+#include <slurm/slurm.h>
+.LP
+void \fBslurm_free_front_end_info_msg\fR (
+.br
+	front_end_info_msg_t *\fIfront_end_info_msg_ptr\fP
+.br
+);
+.LP
+int \fBslurm_load_front_end\fR (
+.br
+	time_t \fIupdate_time\fP,
+.br
+	front_end_info_msg_t **\fIfront_end_info_msg_pptr\fP,
+.br
+);
+.LP
+void \fBslurm_print_front_end_info_msg\fR (
+.br
+	FILE *\fIout_file\fp,
+.br
+	front_end_info_msg_t *\fIfront_end_info_msg_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br
+);
+.LP
+void \fBslurm_print_front_end_table\fR (
+.br
+	FILE *\fIout_file\fp,
+.br
+	front_end_info_t *\fIfront_end_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br
+);
+.LP
+char *\fBslurm_sprint_front_end_table\fR (
+.br
+	front_end_info_t *\fIfront_end_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br
+);
+
+.SH "ARGUMENTS"
+.LP
+.TP
+\fIfront_end_info_msg_ptr\fP
+Specifies the pointer to the structure created by \fBslurm_load_front_end\fR.
+.TP
+\fIfront_end_info_msg_pptr\fP
+Specifies the double pointer to the structure to be created and filled with
+the time of the last front end node update, a record count, and detailed
+information about each front_end node. Detailed front_end node information
+is written to fixed sized records and includes: name, state, etc.
+See slurm.h for full details on the data structure's contents.
+.TP
+\fIfront_end_ptr\fP
+Specifies a pointer to a single front end node record from the
+\fIfront_end_info_msg_ptr\fP data structure.
+.TP
+\fIone_liner\fP
+Print one record per line if non\-zero.
+.TP
+\fIout_file\fP
+Specifies the file to print data to.
+.TP
+\fIupdate_time\fP
+For all of the following informational calls, if update_time is equal to
+or greater than the last time changes where made to that information, new
+information is not returned.  Otherwise all the configuration. job, node,
+or partition records are returned.
+
+.SH "DESCRIPTION"
+.LP
+\fBslurm_free_front_end_info_msg\fR Release the storage generated by the
+\fBslurm_load_front_end\fR function.
+.LP
+\fBslurm_load_front_end\fR Returns a \front_end_info_msg_t\fP that contains an
+update time, record count, and array of records for all front end nodes.
+.LP
+\fBslurm_print_front_end_info_msg\fR Prints the contents of the data structure
+describing all front end node records from the data loaded by the
+\fBslurm_load_front_end\fR function.
+.LP
+\fBslurm_print_front_end_table\fR Prints to a file the contents of the data
+structure describing a single front end node record loaded by the
+\fBslurm_load_front_end\fR function.
+.LP
+\fBslurm_psrint_front_end_table\fR Prints to memory the contents of the data
+structure describing a single front end node record loaded by the
+\fBslurm_load_front_end\fR function.
+
+.SH "RETURN VALUE"
+.LP
+On success, zero is returned. On error, \-1 is returned, and Slurm error code
+is set appropriately.
+
+.SH "ERRORS"
+.LP
+\fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
+.LP
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link
+your code.
+.LP
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with
+SLURM controller.
+
+.SH "EXAMPLE"
+.LP
+#include <stdio.h>
+.br
+#include <slurm/slurm.h>
+.br
+#include <slurm/slurm_errno.h>
+.LP
+int main (int argc, char *argv[])
+.br
+{
+.br
+	int i;
+.br
+	front_end_info_msg_t *front_end_info_ptr = NULL;
+.br
+	front_end_info_t *front_end_ptr;
+.LP
+	/* get and dump some node information */
+.br
+	if ( slurm_load_front_end ((time_t) NULL,
+.br
+	                      &front_end_buffer_ptr) ) {
+.br
+		slurm_perror ("slurm_load_front_end error");
+.br
+		exit (1);
+.br
+	}
+.LP
+	/* The easy way to print... */
+.br
+	slurm_print_front_end_info_msg (stdout, front_end_buffer_ptr, 0);
+.LP
+	/* A harder way.. */
+.br
+	for (i = 0; i < front_end_buffer_ptr\->record_count; i++) {
+.br
+		front_end_ptr = &front_end_buffer_ptr\->front_end_array[i];
+.br
+		slurm_print_front_end_table(stdout, front_end_ptr, 0);
+.br
+	}
+.LP
+	/* The hardest way. */
+.br
+	for (i = 0; i < front_end_buffer_ptr\->front_end_count; i++) {
+.br
+		printf ("FrontEndName=%s StateCode=%u\\n",
+.br
+			front_end_buffer_ptr\->front_end_array[i].name,
+.br
+			front_end_buffer_ptr\->front_end_array[i].node_state);
+.br
+	}
+.br
+	slurm_free_front_end_info_msg (front_end_buffer_ptr);
+.br
+	exit (0);
+.br
+}
+
+.SH "NOTES"
+These functions are included in the libslurm library,
+which must be linked to your process for use
+(e.g. "cc \-lslurm myprog.c").
+.LP
+Some data structures contain index values to cross\-reference each other.
+If the \fIshow_flags\fP argument is not set to SHOW_ALL when getting this
+data, these index values will be invalid.
+
+.SH "COPYING"
+Copyright (C) 2010 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <http://www.schedmd.com/slurmdocs/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+.SH "SEE ALSO"
+.LP
+\fBscontrol\fR(1),
+\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3),
+\fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
+
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index b437569e8..d2c95e7d1 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -289,6 +289,8 @@ SLURM controller.
 .LP
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -301,7 +303,7 @@ int main (int argc, char *argv[])
 .br
 	int i;
 .br
-	job_info_msg_t	* job_info_msg = NULL;
+	job_info_msg_t	* job_buffer_ptr = NULL;
 .br
 	job_info_t * job_ptr;
 .br
@@ -321,7 +323,7 @@ int main (int argc, char *argv[])
 .LP
 	/* The easy way to print... */
 .br
-	slurm_print_job_info_msg (stdout, job_buffer_ptr);
+	slurm_print_job_info_msg (stdout, job_buffer_ptr, 0);
 .LP
 	/* A harder way.. */
 .br
@@ -329,7 +331,7 @@ int main (int argc, char *argv[])
 .br
 		job_ptr = &job_buffer_ptr\->job_array[i];
 .br
-		slurm_print_job_info(stdout, job_ptr);
+		slurm_print_job_info(stdout, job_ptr, 1);
 .br
 	}
 .LP
@@ -353,21 +355,21 @@ int main (int argc, char *argv[])
 .LP
 	if (job_buffer_ptr\->record_count >= 1) {
 .br
-		uint16_t rotate;
+		uint16_t nodes;
 .br
 		if (slurm_get_select_jobinfo(
 .br
 			job_buffer_ptr\->job_array[0].select_jobinfo,
 .br
-			SELECT_DATA_ROTATE,
+			SELECT_JOBDATA_NODE_CNT,
 .br
-			&rotate) == SLURM_SUCCESS)
+			&nodes) == SLURM_SUCCESS)
 .br
-			printf("JobId=%u Rotate=%u\\n",
+			printf("JobId=%u Nodes=%u\\n",
 .br
 				job_buffer_ptr\->job_array[0].job_id,
 .br
-				rotate);
+				nodes);
 .br
 	}
 .LP
@@ -380,7 +382,7 @@ int main (int argc, char *argv[])
 	else
 .br
 		printf ("Slurm job id = %u\\n", job_id);
-.br
+.LP
 	exit (0);
 .br
 }
@@ -404,7 +406,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_free_job_step_info_response_msg.3 b/doc/man/man3/slurm_free_job_step_info_response_msg.3
index 448d0b49b..a891eed86 100644
--- a/doc/man/man3/slurm_free_job_step_info_response_msg.3
+++ b/doc/man/man3/slurm_free_job_step_info_response_msg.3
@@ -123,6 +123,8 @@ SLURM controller.
 .LP
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -153,15 +155,15 @@ int main (int argc, char *argv[])
 .br
 	slurm_print_job_step_info_msg (stdout,
 .br
-	                               step_info_ptr);
+	                               step_info_ptr, 0);
 .LP
 	/* A harder way.. */
 .br
-	for (i = 0; i < step_info_ptr\->record_count; i++) {
+	for (i = 0; i < step_info_ptr\->job_step_count; i++) {
 .br
 		step_ptr = &step_info_ptr\->job_steps[i];
 .br
-		slurm_print_job_step_info(stdout, step_ptr);
+		slurm_print_job_step_info(stdout, step_ptr, 0);
 .br
 	}
 .LP
@@ -207,7 +209,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_free_node_info.3 b/doc/man/man3/slurm_free_node_info.3
index d1bd4bcb5..06057f24e 100644
--- a/doc/man/man3/slurm_free_node_info.3
+++ b/doc/man/man3/slurm_free_node_info.3
@@ -256,7 +256,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_free_partition_info.3 b/doc/man/man3/slurm_free_partition_info.3
index bdb53e401..cee2f32c2 100644
--- a/doc/man/man3/slurm_free_partition_info.3
+++ b/doc/man/man3/slurm_free_partition_info.3
@@ -194,7 +194,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_get_errno.3 b/doc/man/man3/slurm_get_errno.3
index ade9ebb15..8aab64865 100644
--- a/doc/man/man3/slurm_get_errno.3
+++ b/doc/man/man3/slurm_get_errno.3
@@ -77,7 +77,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_hostlist_create.3 b/doc/man/man3/slurm_hostlist_create.3
index 9d75f2cef..9625409c6 100644
--- a/doc/man/man3/slurm_hostlist_create.3
+++ b/doc/man/man3/slurm_hostlist_create.3
@@ -111,7 +111,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_init_update_front_end_msg.3 b/doc/man/man3/slurm_init_update_front_end_msg.3
new file mode 100644
index 000000000..8c2ed9814
--- /dev/null
+++ b/doc/man/man3/slurm_init_update_front_end_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_job_step_create.3 b/doc/man/man3/slurm_job_step_create.3
index 90a2b4ce5..c67cfd33b 100644
--- a/doc/man/man3/slurm_job_step_create.3
+++ b/doc/man/man3/slurm_job_step_create.3
@@ -78,7 +78,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index e599318f4..952e42172 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -128,7 +128,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_load_front_end.3 b/doc/man/man3/slurm_load_front_end.3
new file mode 100644
index 000000000..c93b1b844
--- /dev/null
+++ b/doc/man/man3/slurm_load_front_end.3
@@ -0,0 +1 @@
+.so man3/slurm_free_front_end_info_msg.3
diff --git a/doc/man/man3/slurm_load_reservations.3 b/doc/man/man3/slurm_load_reservations.3
index 0c103c049..da6835af3 100644
--- a/doc/man/man3/slurm_load_reservations.3
+++ b/doc/man/man3/slurm_load_reservations.3
@@ -194,7 +194,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_print_front_end_info_msg.3 b/doc/man/man3/slurm_print_front_end_info_msg.3
new file mode 100644
index 000000000..c93b1b844
--- /dev/null
+++ b/doc/man/man3/slurm_print_front_end_info_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_free_front_end_info_msg.3
diff --git a/doc/man/man3/slurm_print_front_end_table.3 b/doc/man/man3/slurm_print_front_end_table.3
new file mode 100644
index 000000000..c93b1b844
--- /dev/null
+++ b/doc/man/man3/slurm_print_front_end_table.3
@@ -0,0 +1 @@
+.so man3/slurm_free_front_end_info_msg.3
diff --git a/doc/man/man3/slurm_reconfigure.3 b/doc/man/man3/slurm_reconfigure.3
index 7779f35d0..0b1767401 100644
--- a/doc/man/man3/slurm_reconfigure.3
+++ b/doc/man/man3/slurm_reconfigure.3
@@ -19,7 +19,7 @@ int \fBslurm_create_partition\fR (
 .LP
 int \fBslurm_create_reservation\fR (
 .br
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+	resv_desc_msg_t *\fIupdate_resv_msg_ptr\fP
 .br
 );
 .LP
@@ -35,6 +35,12 @@ int \fBslurm_delete_reservation\fR (
 .br
 );
 .LP
+void \fBslurm_init_front_end_msg\fR (
+.br
+	update_front_end_msg_t *\fIupdate_front_end_msg_ptr\fP
+.br
+);
+.LP
 void \fBslurm_init_part_desc_msg\fR (
 .br
 	update_part_msg_t *\fIupdate_part_msg_ptr\fP
@@ -43,7 +49,13 @@ void \fBslurm_init_part_desc_msg\fR (
 .LP
 void \fBslurm_init_resv_desc_msg\fR (
 .br
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+	resv_desc_msg_t *\fIupdate_resv_msg_ptr\fP
+.br
+);
+.LP
+void \fBslurm_init_update_node_msg\fR(
+.br
+	update_node_msg_t *\fIupdate_node_msg_ptr\fP
 .br
 );
 .LP
@@ -57,9 +69,9 @@ int \fBslurm_shutdown\fR (
 .LP
 int \fBslurm_takeover\fR ( );
 .LP
-void \fBslurm_init_update_node_msg\fR(
+int \fBslurm_update_front_end\fR (
 .br
-	update_node_msg_t *\fIupdate_node_msg_ptr\fP
+	update_front_end_msg_t *\fIupdate_front_end_msg_ptr\fP
 .br
 );
 .LP
@@ -77,7 +89,7 @@ int \fBslurm_update_partition\fR (
 .LP
 int \fBslurm_update_reservation\fR (
 .br
-	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP
+	resv_desc_msg_t *\fIupdate_resv_msg_ptr\fP
 .br
 );
 .SH "ARGUMENTS"
@@ -98,6 +110,10 @@ See slurm.h for full details on the data structure's contents.
 Specifies the pointer to a reservation delete request specification.
 See slurm.h for full details on the data structure's contents.
 .TP
+\fIupdate_front_end_msg_ptr\fP
+Specifies the pointer to a front end node update request specification.
+See slurm.h for full details on the data structure's contents.
+.TP
 \fIupdate_node_msg_ptr\fP
 Specifies the pointer to a node update request specification. See slurm.h
 for full details on the data structure's contents.
@@ -109,6 +125,7 @@ See slurm.h for full details on the data structure's contents.
 \fIupdate_resv_msg_ptr\fP
 Specifies the pointer to a reservation create or update request specification.
 See slurm.h for full details on the data structure's contents.
+
 .SH "DESCRIPTION"
 .LP
 \fBslurm_create_partition\fR Request that a new partition be created.
@@ -135,6 +152,12 @@ purged.  This function may only be successfully executed by user root.
 \fBslurm_delete_reservation\fR Request that the specified reservation be
 deleted. This function may only be successfully executed by user root.
 .LP
+\fBslurm_init_update_front_end_msg\fR Initialize the contents of an update
+front end node descriptor with default values. Note:
+\fBslurm_init_update_front_end_msg\fR is not equivalent to setting the data
+structure values to zero. Execute this function before executing
+\fBslurm_update_front_end\fR.
+.LP
 \fBslurm_init_part_desc_msg\fR Initialize the contents of a partition
 descriptor with default values. Note: \fBslurm_init_part_desc_msg\fR is
 not equivalent to setting the data structure values to zero. Execute
@@ -147,6 +170,11 @@ not equivalent to setting the data structure values to zero. Execute this
 function before executing \fBslurm_create_reservation\fR or
 \fBslurm_update_reservation\fR.
 .LP
+\fBslurm_init_update_node_msg\fR Initialize the contents of an update node
+descriptor with default values. Note: \fBslurm_init_update_node_msg\fR is
+not equivalent to setting the data structure values to zero. Execute
+this function before executing \fBslurm_update_node\fR.
+.LP
 \fBslurm_reconfigure\fR Request that the Slurm controller re\-read its
 configuration file. The new configuration parameters take effect
 immediately. This function may only be successfully executed by user root.
@@ -158,10 +186,11 @@ function may only be successfully executed by user root.
 immediately and the backup controller take over.
 This function may only be successfully executed by user root.
 .LP
-\fBslurm_init_update_node_msg\fR Initialize the contents of an update mpde
-descriptor with default values. Note: \fBslurm_init_update_node_msg\fR is
-not equivalent to setting the data structure values to zero. Execute
-this function before executing \fBslurm_update_node\fR.
+\fBslurm_update_front_end\fR Request that the state of one or more front end
+nodes be updated.
+This function may only be successfully executed by user root.
+If used by some autonomous program, the state value most likely to be used is
+\fBNODE_STATE_DRAIN\fR.
 .LP
 \fBslurm_update_node\fR Request that the state of one or more nodes be updated.
 Note that the state of a node (e.g. DRAINING, IDLE, etc.) may be changed, but
@@ -194,6 +223,7 @@ reservation be updated.  Initialize the data structure using the
 the parameters to be changed. Note:  \fBslurm_init_resv_desc_msg\fR
 is not equivalent to setting the data structure values to zero. This
 function may only be successfully executed by user root.
+
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and the Slurm error
@@ -252,6 +282,8 @@ use an expired reservation.
 .LP
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -262,11 +294,11 @@ int main (int argc, char *argv[])
 .br
 	update_node_msg_t       update_node_msg;
 .br
-	partition_desc_msg_t    update_part_msg;
+	update_part_msg_t       update_part_msg;
 .br
 	delete_part_msg_t       delete_part_msg;
 .br
-	reserve_request_msg_t   resv_msg;
+	resv_desc_msg_t         resv_msg;
 .br
 	char                   *resv_name = NULL;
 .LP
@@ -316,7 +348,7 @@ int main (int argc, char *argv[])
 .br
 	update_node_msg.node_names = "lx[10\-12]";
 .br
-	update_node_msg.node_state = NODE_STATE_DRAINING ;
+	update_node_msg.node_state = NODE_STATE_DRAIN ;
 .br
 	if (slurm_update_node (&update_node_msg)) {
 .br
@@ -359,11 +391,12 @@ which must be linked to your process for use
 
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_resume.3 b/doc/man/man3/slurm_resume.3
index 676eddc7f..35544b63b 100644
--- a/doc/man/man3/slurm_resume.3
+++ b/doc/man/man3/slurm_resume.3
@@ -78,7 +78,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
index 009161099..285a243b2 100644
--- a/doc/man/man3/slurm_slurmd_status.3
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -55,7 +55,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_sprint_front_end_table.3 b/doc/man/man3/slurm_sprint_front_end_table.3
new file mode 100644
index 000000000..c93b1b844
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_front_end_table.3
@@ -0,0 +1 @@
+.so man3/slurm_free_front_end_info_msg.3
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index dd5063963..0b6f88e29 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -239,7 +239,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index 1e7bbff70..54bb61f04 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -233,7 +233,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man3/slurm_update_front_end.3 b/doc/man/man3/slurm_update_front_end.3
new file mode 100644
index 000000000..8c2ed9814
--- /dev/null
+++ b/doc/man/man3/slurm_update_front_end.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_update_job.3 b/doc/man/man3/slurm_update_job.3
index c49f6fea5..7c5ae7c6e 100644
--- a/doc/man/man3/slurm_update_job.3
+++ b/doc/man/man3/slurm_update_job.3
@@ -137,7 +137,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index 9853319ee..5b36c871f 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -1,12 +1,15 @@
-.TH "bluegene.conf" "5" "April 2008" "bluegene.conf 2.0" "Slurm configuration file"
+.TH "bluegene.conf" "5" "August 2011" "bluegene.conf 2.3" "SLURM configuration file"
+
 .SH "NAME"
-bluegene.conf \- Slurm configuration file for BlueGene systems
+bluegene.conf \- SLURM configuration file for BlueGene systems
+
 .SH "DESCRIPTION"
-\fB/etc/bluegene.conf\fP is an ASCII file which describes BlueGene specific
+\fBbluegene.conf\fP is an ASCII file which describes IBM BlueGene specific
 SLURM configuration information. This includes specifications for bgblock
 layout, configuration, logging, etc.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
 Parameter names are case insensitive.
@@ -17,33 +20,36 @@ Changes to the configuration file take only effect upon restart of
 the slurmctld daemon.  "scontrol reconfigure" does nothing with this file.
 Changes will only take place after a restart of the controller.
 .LP
+There are some differences between BlueGene/L, BlueGene/P and  BlueGene/Q
+systems with respects to the contents of the bluegene.conf file.
 
-There are some differences between Bluegene/L and Bluegene/P in respects to the contents of the bluegene.conf file.
-
-.SH "The Bluegene/L specific options are:"
+.SH "The BlueGene/L specific options are:"
 .TP
 \fBAltBlrtsImage\fR
-Alternative BlrtsImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
-groups. You can put as many alternative images as you want in the conf file.
+Alternative BlrtsImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
+groups. You can put as many alternative images as you want in the
+bluegene.conf file.
 
 .TP
 \fBAltLinuxImage\fR
-Alternative LinuxImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
-groups. You can put as many alternative images as you want in the conf file.
+Alternative LinuxImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
+groups. You can put as many alternative images as you want in the
+bluegene.conf file.
 
 .TP
 \fBAltRamDiskImage\fR
-Alternative RamDiskImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
-groups. You can put as many alternative images as you want in the conf file.
+Alternative RamDiskImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
+groups. You can put as many alternative images as you want in the
+bluegene.conf file.
 
 .TP
 \fBBlrtsImage\fR
@@ -60,21 +66,21 @@ There is no default value and this must be specified.
 RamDiskImage used for creation of all bgblocks.
 There is no default value and this must be specified.
 
-.SH "The Bluegene/P specific options are:"
+.SH "The BlueGene/P specific options are:"
 .TP
 \fBAltCnloadImage\fR
-Alternative CnloadImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
+Alternative CnloadImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltIoloadImage\fR
-Alternative IoloadImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
+Alternative IoloadImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
@@ -87,20 +93,19 @@ There is no default value and this must be specified.
 IoloadImage used for creation of all bgblocks.
 There is no default value and this must be specified.
 
-.SH "All options below are common on all Bluegene systems:"
+.SH "All options below are common on all BlueGene systems:"
 .TP
 \fBAltMloaderImage\fR
-Alternative MloaderImage.  This is an optional field only used for
-mulitple images on a system and should be followed by a Groups= with
-the user groups allowed to use this image (i.e. Groups=da,jette) if
-Groups= is not stated then this image will be able to be used by all
+Alternative MloaderImage.  This is an optional field only used for multiple
+images on a system and should be followed by a Groups option indicating
+the user groups allowed to use this image (i.e. Groups=da,jette). If
+Groups is not specified then this image will be usable by all
 groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBBasePartitionNodeCount\fR
-The number of c\-nodes per base partition.
-There is no default value and this must be specified. (For bgl systems this
-is usually 512)
+The number of c\-nodes (compute nodes) per base partition.
+There is no default value and this must be specified (usually 512).
 
 .TP
 \fBBridgeAPILogFile\fR
@@ -116,33 +121,39 @@ The default value is 0.
 .TP
 \fB0\fR: Log only error and warning messages
 .TP
-\fB1\fR: Log level 0 and information messasges
+\fB1\fR: Log level 0 plus information messages
 .TP
-\fB2\fR: Log level 1 and basic debug messages
+\fB2\fR: Log level 1 plus basic debug messages
 .TP
-\fB3\fR: Log level 2 and more debug message
+\fB3\fR: Log level 2 plus more debug message
 .TP
 \fB4\fR: Log all messages
 .RE
 
 .TP
 \fBDenyPassthrough\fR
-Specify which dimsions you do not want to allow pass throughs.  Valid options are X, Y, Z or all.
-example: If you don't want to allow passthroughs in the X and Y diminsions you would specify DenyPassthrough=X,Y
+Specify which dimensions you do not want to allow pass\-throughs.
+Valid options are A, X, Y, Z or all ("A" applies only to BlueGene/Q systems).
+For example, to prevent pass\-throughs in the X and Y dimensions you would
+specify "DenyPassthrough=X,Y".
+By default, pass\-throughs are enabled in every dimension.
 
 .TP
 \fBLayoutMode\fR
 Describes how SLURM should create bgblocks.
 .RS
+.TP 10
+\fBSTATIC\fR:
+Create and use the defined non\-overlapping bgblocks.
 .TP
-\fBSTATIC\fR: Create and use the defined non\-overlapping bgblocks.
-.TP
-\fBOVERLAP\fR: Create and use the defined bgblocks, which may overlap.
+\fBOVERLAP\fR:
+Create and use the defined bgblocks, which may overlap.
 It is highly recommended that none of the bgblocks have any passthroughs
 in the X\-dimension.
 \fBUse this mode with extreme caution.\fR
 .TP
-\fBDYNAMIC\fR: Create and use bglblocks as needed for each job.
+\fBDYNAMIC\fR:
+Create and use bgblocks as needed for each job.
 Bgblocks will not be defined in the bluegene.conf file.
 Dynamic partitioning may introduce fragmentation of resources
 and starvation of larger jobs.
@@ -157,25 +168,26 @@ There is no default value and this must be specified.
 .TP
 \fBNodeCardNodeCount\fR
 Number of c\-nodes per node card.
-There is no default value and this must be specified. (For bgl systems this
-is usually 32)
+There is no default value and this must be specified. For BlueGene/L systems
+this is usually 32.
 
 .TP
 \fBNumPsets\fR
 The Numpsets used for creation of all bgblocks.  This value really means the
-number of IOnodes on a base partition.  This number must be the smallest
+number of IO nodes on a base partition.  This number must be the smallest
 number if you have a heterogeneous system.
 There is no default value and this must be specified.  The typical settings
-for bgl systems goes as follows... For IO rich systems 64 is the value that
+for BlueGene/L systems are as follows: For IO rich systems, 64 is the value that
 should be used to create small blocks.  For systems that are not IO rich, or
-you do not wish to create small blocks, 8 is usually the number to use.
-For bgp IO rich systems 32 is the value that should be used to create small
-blocks since you can only have 2 ionodes per nodecard instead of 4 like on bgl.
+for which small blocks are not desirable, 8 is usually the number to use.
+For BlueGene/P IO rich systems, 32 is the value that should be used to create
+small blocks since there are only 2 IO nodes per nodecard instead of 4 as on
+BlueGene/L.
 
 .LP
 Each bgblock is defined by the base partitions used to construct it.
-Ordering is very important for laying out switch wires.  Please create
-blocks with smap, and once done don't change the order of blocks created.
+Ordering is very important for laying out switch wires.  Please use the smap
+tool to define blocks and do not change the order of blocks created.
 A bgblock is implicitly created containing all resources on the system.
 Bgblocks must not overlap in static mode (except for implicitly
 created bgblock). This will be the case when smap is used to create
@@ -186,23 +198,27 @@ will be based upon the NodeName defined in slurm.conf
 
 .TP
 \fBBPs\fR
-Define the XYZ coordinates of the bgblock end points.
+Define the coordinates of the bgblock end points.
+For BlueGene/L and BlueGene/P systems there will be three coordinates (X, Y, and Z).
+For BlueGene/Q systems there will be for coordinates (A, X, Y, and Z).
 
 .TP
 \fBType\fR
 Define the network connection type for the bgblock.
 The default value is TORUS.
 .RS
+.TP 8
+\fBMESH\fR:
+Communication occur over a mesh.
 .TP
-\fBMESH\fR: Communication occur over a mesh.
-.TP
-\fBSMALL\fR: The base partition is divided into more than one bgblock.
+\fBSMALL\fR:
+The base partition is divided into more than one bgblock.
 The administrator should define the number of single node cards and
 quarter base partition blocks using the options \fB32CNBlocks\fR and
-\fB128CNBlocks\fR respectively for a Bluegene L system.  \fB16CNBlocks\fR,
+\fB128CNBlocks\fR respectively for a BlueGene/L system.  \fB16CNBlocks\fR,
 \fB64CNBlocks\fR, and \fB256CNBlocks\fR are also available for
-Bluegene P systems.  Keep in mind you
-must have enough ionodes to make all these configurations possible.
+BlueGene/P systems.  Keep in mind you
+must have enough IO nodes to make all these configurations possible.
 
 The total number of c\-nodes in defined blocks must not exceed
 \fBBasePartitionNodeCnt\fR.
@@ -210,8 +226,8 @@ If not specified, the base partition will be divided into four
 blocks.
 See example below.
 .TP
-\fBTORUS\fR: Communications occur over a torus (end\-points of network
-directly connect.
+\fBTORUS\fR:
+Communications occur over a torus (end\-points of network directly connect.
 .RE
 
 .SH "EXAMPLE"
@@ -247,7 +263,7 @@ LayoutMode=STATIC
 .br
 ##################################################################
 .br
-# LEAVE AS COMMENT, Full\-system bglblock, implicitly created
+# LEAVE AS COMMENT, Full\-system bgblock, implicitly created
 .br
 # BPs=[000x333] Type=TORUS        # 4x4x4 = 64 midplanes
 .br
@@ -268,12 +284,12 @@ BPs=[332] Type=TORUS          # 1x1x1 =  1
 BPs=[333] Type=SMALL 32CNBlocks=4 128CNBlocks=3 # 1/16 * 4 + 1/4 * 3
 
 .SH "COPYING"
-Copyright (C) 2006 The Regents of the University of California.
+Copyright (C) 2006-2010 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -288,4 +304,4 @@ details.
 /etc/bluegene.conf
 .SH "SEE ALSO"
 .LP
-\fBslurm.conf\fR(5)
+\fBsmap\fR(1), \fBslurm.conf\fR(5)
diff --git a/doc/man/man5/cgroup.conf.5 b/doc/man/man5/cgroup.conf.5
index 998333082..eaf47b281 100644
--- a/doc/man/man5/cgroup.conf.5
+++ b/doc/man/man5/cgroup.conf.5
@@ -1,13 +1,17 @@
-.TH "cgroup.conf" "5" "February 2010" "cgroup.conf 2.2" "Slurm configuration file"
+.TH "cgroup.conf" "5" "December 2010" "cgroup.conf 2.2" \
+"Slurm cgroup configuration file"
 
 .SH "NAME"
 cgroup.conf \- Slurm configuration file for the cgroup support
 
 .SH "DESCRIPTION"
-\fB/etc/cgroup.conf\fP is an ASCII file which defines parameters used by 
-Slurm's proctrack/cgroup plugin in support of Linux cgroups being used as a
-job container. The file will always be located in the same directory as the 
-\fBslurm.conf\fP file.
+
+\fBcgroup.conf\fP is an ASCII file which defines parameters used by 
+Slurm's Linux cgroup related plugins.
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
+same directory as the \fBslurm.conf\fP file.
 .LP
 Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
@@ -17,78 +21,164 @@ Changes to the configuration file take effect upon restart of
 SLURM daemons, daemon receipt of the SIGHUP signal, or execution
 of the command "scontrol reconfigure" unless otherwise noted.
 .LP
-Slurm cgroup proctrack plugin creates a hierarchical set of
+Two cgroup plugins are currently available in SLURM. The first
+one is a proctrack plugin, the second one a task plugin.
+
+.LP
+The following cgroup.conf parameters are defined to control the general behavior
+of Slurm cgroup plugins.
+
+.TP
+\fBCgroupMountpoint\fR=\fIPATH\fR
+Specify the \fIPATH\fR under which cgroups should be mounted. This
+should be a writable directory which will contain cgroups mounted
+one per subsystem. The default \fIPATH\fR is /cgroup.
+
+.TP
+\fBCgroupAutomount\fR=<yes|no>
+Slurm cgroup plugins require valid and functional cgroup subsystem to be mounted
+under /cgroup/<subsystem_name>.
+When launched, plugins check their subsystem availability. If not available, 
+the plugin launch fails unless CgroupAutomount is set to yes. In that case, the 
+plugin will first try to mount the required subsystems.
+
+.TP
+\fBCgroupReleaseAgentDir\fR=<path_to_release_agent_directory>
+Used to tune the cgroup system behavior. This parameter identifies the location 
+of the directory containing Slurm cgroup release_agent files. A release_agent file
+is required for each mounted subsystem. The release_agent file name must have the
+following format: release_<subsystem_name>.  For instance, the release_agent file
+for the cpuset subsystem must be named release_cpuset.  See also CLEANUP OF
+CGROUPS below.
+
+.SH "PROCTRACK/CGROUP PLUGIN"
+
+Slurm \fBproctrack/cgroup\fP plugin is used to track processes using the
+freezer control group subsystem. It creates a hierarchical set of
 directories for each step, putting the step tasks into the leaf.
+.LP
 This directory structure is like the following:
 .br 
-/dev/cgroup/slurm/uid_%uid/job_%jobid/step_%stepid
+/cgroup/freezer/uid_%uid/job_%jobid/step_%stepid
 .LP
-Slurm cgroup proctrack plugin can constrain cores, RAM and swap space for 
-jobs and set a variety of default job and job step parameters for cgroups.
+Slurm cgroup proctrack plugin is enabled with the following parameter
+in slurm.conf:
+.br 
+ProctrackType=proctrack/cgroup
+
 .LP
-The configuration parameters available include:
+No particular cgroup.conf parameter is defined to control the behavior
+of this particular plugin.
 
 
-.TP
-\fBAllowedRAMSpace\fR=<number>
-Constrain the job cgroup RAM to this percentage of the allocated memory.
-The default value is 100.
-If the limit is exceeded, the job steps will be killed and a warning message
-will be written to standard error.
-Also see \fBConstrainRAMSpace\fR.
+.SH "TASK/CGROUP PLUGIN"
 
-.TP
-\fBAllowedSwapSpace\fR=<number>
-Constrain the job cgroup swap space to this percentage of the allocated memory.
-The default value is 0.
-If the limit is exceeded, the job steps will be killed and a warning message
-will be written to standard error.
-Also see \fBConstrainSwapSpace\fR.
+.LP
+Slurm \fBtask/cgroup\fP plugin is used to enforce allocated resources 
+constraints, thus avoiding tasks to use unallocated resources. It currently
+only uses cpuset subsystem but could use memory and devices subsystems in a 
+near future too.
 
-.TP
-\fBCgroupAutomount\fR=<yes|no>
-Slurm cgroup plugins require a valid and functional cgroup system mounted on 
-/dev/cgroup. When launched, plugins check cgroup availability. If cgroup is 
-not available, the plugin launch fails unless CgroupAutomount is set to yes. 
-In that case, the plugin will first try to mount the cgroup system.
+.LP
+It creates a hierarchical set of directories for each task and subsystem.
+The directory structure is like the following:
+.br 
+/cgroup/%subsys/uid_%uid/job_%jobid/step_%stepid/task_%taskid
 
-.TP
-\fBCgroupMountOptions\fR=<options>
-Used to tune the cgroup system behavior.
+.LP
+Slurm cgroup task plugin is enabled with the following parameter
+in slurm.conf:
+.br
+TaskPlugin=task/cgroup
 
-.TP
-\fBCgroupReleaseAgent\fR=<path_to_program>
-Used to tune the cgroup system behavior.
+.LP
+The following cgroup.conf parameters are defined to control the behavior
+of this particular plugin:
 
 .TP
 \fBConstrainCores\fR=<yes|no>
 If configured to "yes" then constrain allowed cores to the subset of 
-allocated resources.
+allocated resources. It uses the cpuset subsystem.
+The default value is "no".
+.TP
+\fBTaskAffinity\fR=<yes|no>
+If configured to "yes" then set a default task affinity to bind each step 
+task to a subset of the allocated cores using \fBsched_setaffinity\fP.
 The default value is "no".
 
+.LP
+The following cgroup.conf parameters could be defined to control the behavior
+of this particular plugin in a next version where memory and devices support
+would be added :
+
+.TP
+\fBAllowedRAMSpace\fR=<number>
+Constrain the job cgroup RAM to this percentage of the allocated memory.
+The default value is 100. If SLURM is not allocating memory to jobs,
+The percentage supplied may be expressed as floating point
+number, e.g. 98.5. If the \fBAllowedRAMSpace\fR limit is exceeded, the
+job steps will be killed and a warning message will be written to standard
+error.  Also see \fBConstrainRAMSpace\fR.
+
+.TP
+\fBAllowedSwapSpace\fR=<number>
+Constrain the job cgroup swap space to this percentage of the allocated
+memory.  The default value is 0, which means that RAM+Swap will be limited
+to \fBAllowedRAMSpace\fR. The supplied percentage may be expressed as a
+floating point number, e.g. 50.5.  If the limit is exceeded, the job steps
+will be killed and a warning message will be written to standard error.
+Also see \fBConstrainSwapSpace\fR.
+
 .TP
 \fBConstrainRAMSpace\fR=<yes|no>
-If configured to "yes" then constraing the job's RAM usage.
+If configured to "yes" then constrain the job's RAM usage.
 The default value is "no".
 Also see \fBAllowedRAMSpace\fR.
 
 .TP
 \fBConstrainSwapSpace\fR=<yes|no>
-If configured to "yes" then constraing the job's swap space usage.
+If configured to "yes" then constrain the job's swap space usage.
 The default value is "no".
 Also see \fBAllowedSwapSpace\fR.
 
 .TP
-\fBJobCgroupParams\fR=<options>
-Used to tune job cgroup. The format of the parameter is the following:
-"a=b c=d e=f" where a,c,d corresponds to files under the cgroup 
-directory and b,d,f the values to write in these files.
+\fBMaxRAMPercent\fR=\fIPERCENT\fR
+Set an upper bound in percent of total RAM on the RAM constraint for a job.
+This will be the memory constraint applied to jobs that are not explicitly
+allocated memory by SLURM. The \fIPERCENT\fR may be an arbitrary floating
+point number. The default value is 100.
 
 .TP
-\fBJobStepCgroupParams\fR=<options>
-Used to tune job step cgroup. The format of the parameter is the following:
-"a=b c=d e=f" where a,c,d corresponds to files under the cgroup 
-directory and b,d,f the values to write in these files.
+\fBMaxSwapPercent\fR=\fIPERCENT\fR
+Set an upper bound (in percent of total RAM) on the amount of RAM+Swap
+that may be used for a job. This will be the swap limit applied to jobs
+on systems where memory is not being explicitly allocated to job. The
+\fIPERCENT\fR may be an arbitrary floating point number between 0 and 100.
+The default value is 100.
+
+.TP
+\fBMinRAMSpace\fR=<number>
+Set a lower bound (in MB) on the memory limits defined by
+\fBAllowedRAMSpace\fR and \fBAllowedSwapSpace\fR. This prevents
+accidentally creating a memory cgroup with such a low limit that slurmstepd
+is immediately killed due to lack of RAM. The default limit is 30M.
+
+.TP
+\fBConstrainDevices\fR=<yes|no>
+If configured to "yes" then constrain the job's allowed devices based on GRES
+allocated resources. It uses the devices subsystem for that.
+The default value is "no".
+
+.TP
+\fBAllowedDevicesFile\fR=<path_to_allowed_devices_file>
+If the ConstrainDevices field is set to "yes" then this file has to be used to declare 
+the devices that need to be allowed by default for all the jobs. The current implementation 
+of cgroup devices subsystem works as a whitelist of entries, which means that in order to
+isolate the access of a job upon particular devices we need to allow the access on all
+the devices, supported by default and then deny on those that the job does not have the 
+permission to use. The default value is "/etc/slurm/cgroup_allowed_devices_file.conf". The syntax of 
+the file accepts one device per line and it permits lines like /dev/sda* or /dev/cpu/*/*. 
+See also an example of this file in etc/allowed_devices_file.conf.example.
 
 
 .SH "EXAMPLE"
@@ -102,41 +192,75 @@ directory and b,d,f the values to write in these files.
 .br
 CgroupAutomount=yes
 .br
-CgroupMountOptions="memory,cpuset"
-.br
-CgroupReleaseAgent="/etc/slurm/cgroup.release_agent"
+CgroupReleaseAgentDir="/etc/slurm/cgroup"
 .br
-JobCgroupParams="memory.swappiness=30"
-.br
-JobStepCgroupParams=""
+ConstrainCores=yes
 .br
 #
+
+.SH "NOTES"
+.LP
+Only one instance of a cgroup subsystem is valid at a time in the kernel.
+If you try to mount another cgroup hierarchy that uses the same cpuset 
+subsystem it will fail.
+However you can mount another cgroup hierarchy for a different cpuset 
+subsystem.
+
+.SH CLEANUP OF CGROUPS
+.LP
+To allow cgroups to be removed automatically when they are no longer in use
+the notify_on_release flag is set in each cgroup when the cgroup is
+instantiated. The release_agent file for each subsystem is set up when the
+subsystem is mounted.  The name of each release_agent file is 
+release_<subsystem name>. The directory is specified via the 
+CgroupReleaseAgentDir parameter in cgroup.conf. A simple release agent 
+mechanism to remove slurm cgroups when they become empty may be set up by 
+creating the release agent files for each required subsystem as symbolic 
+links to a common release agent script, as shown in the example below:
+
+[sulu] (slurm) etc> cat cgroup.conf | grep CgroupReleaseAgentDir
 .br
-# Constrain RAM at 100% of allocation and 
+CgroupReleaseAgentDir="/etc/slurm/cgroup"
 .br
-# Total space (RAM + swap) at 110% of allocation
+
+[sulu] (slurm) etc> ls \-al /etc/slurm/cgroup
 .br
-#
+total 12
 .br
-ConstrainRAMSpace=yes
+drwxr-xr-x 2 root root 4096 2010-04-23 14:55 .
 .br
-AllowedRAMSpace=100
+drwxr-xr-x 4 root root 4096 2010-07-22 14:48 ..
 .br
-ConstrainSwapSpace=yes
+\-rwxrwxrwx 1 root root  234 2010-04-23 14:52 release_common
 .br
-AllowedSwapSpace=10
+lrwxrwxrwx 1 root root   32 2010-04-23 11:04 release_cpuset -> /etc/slurm/cgroup/release_common
 .br
-ConstrainCores=yes
+lrwxrwxrwx 1 root root   32 2010-04-23 11:03 release_freezer -> /etc/slurm/cgroup/release_common
 
-.SH "NOTES"
-Proctrack/cgroup is not compatible with the task/affinity plugin configured
-to use cpusets, although this may be addressed in the future.
-.LP
-Only one instance of a cgroup subsystem is valid at a time in the kernel.
-If you try to mount another cgroup hierarchy that uses the same cpuset 
-subsystem it will fail.
-However you can mount another cgroup hierarchy for a different cpuset 
-subsystem.
+[sulu] (slurm) etc> cat /etc/slurm/cgroup/release_common
+.br
+#!/bin/bash
+.br
+base_path=/cgroup
+.br
+progname=$(basename $0)
+.br
+subsystem=${progname##*_}
+.br
+.br
+rmcg=${base_path}/${subsystem}$@
+.br
+uidcg=${rmcg%/job*}
+.br
+if [[ \-d ${base_path}/${subsystem} ]]
+.br
+then
+.br
+     flock \-x ${uidcg} \-c "rmdir ${rmcg}"
+.br
+fi
+.br
+[sulu] (slurm) etc>
 
 .SH "COPYING"
 Copyright (C) 2010 Lawrence Livermore National Security.
@@ -144,7 +268,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man5/cray.conf.5 b/doc/man/man5/cray.conf.5
new file mode 100644
index 000000000..718460341
--- /dev/null
+++ b/doc/man/man5/cray.conf.5
@@ -0,0 +1,112 @@
+.TH "cray.conf" "5" "August 2011" "cray.conf 2.3" "Slurm configuration file"
+
+.SH "NAME"
+cray.conf \- Slurm configuration file for the Cray\-specific information
+
+.SH "DESCRIPTION"
+\fBcray.conf\fP is an ASCII file which defines parameters used by 
+Slurm's select/cray plugin in support of Cray systems.
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
+same directory as the \fBslurm.conf\fP file.
+The default configuration parameters will work properly in a typical
+installation and this file will not be required.
+.LP
+Parameter names are case insensitive.
+Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
+The size of each line in the file is limited to 1024 characters.
+Changes to the configuration file take effect upon restart of
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution
+of the command "scontrol reconfigure" unless otherwise noted.
+.LP
+The configuration parameters available include:
+
+.TP
+\fBAlpsDir\fR=<pathname>
+Fully qualified pathname of the directory in which ALPS is installed.
+The default value is \fI/usr\fR.
+
+.TP
+\fBapbasil\fR=<pathname>
+Fully qualified pathname to the apbasil command.
+The default value is \fI/usr/bin/apbasil\fR.
+
+.TP
+\fBapkill\fR=<pathname>
+Fully qualified pathname to the apkill command.
+The default value is \fI/usr/bin/apbasil\fR.
+
+.TP
+\fBSDBdb\fR=<dbname>
+Name of the ALPS database.
+The default value is \fIXTAdmin\fR.
+
+.TP
+\fBSDBhost\fR=<dbname>
+Hostname of the database server.
+The default value is \fIsdb\fR.
+
+.TP
+\fBSDBpass\fR=<password>
+Password used to access the ALPS database.
+The default value is NULL, which will load the password from the \fImy.cnf\fR file.
+
+.TP
+\fBSDBport\fR=<port_number>
+Port used to access the ALPS database.
+The default value is 0.
+
+.TP
+\fBSDBuser\fR=<user_name>
+Name of user used to access the ALPS database.
+The default value is NULL, which will load the user name from the \fImy.cnf\fR file.
+
+.TP
+\fBSyncTimeout\fR=<seconds>
+SLURM does not normally schedule jobs while its job or node state information
+is out of synchronization with that of ALPS. This parameter specifies a maximum
+time to defer job scheduling while waiting for consistent state.  The
+inconsistent state might be caused by a variety of hardware or software
+failures and proceeding could result in more failures. The default value is
+3600 (one hour). A value of zero will wait indefinitely for consistent state.
+
+.SH "EXAMPLE"
+.LP
+.br
+###
+.br
+# Slurm Cray support configuration file
+.br
+###
+.br
+apbasil=/opt/alps_simulator_40_r6768/apbasil.sh
+.br
+SDBhost=localhost
+.br
+SDBuser=alps_user
+.br
+SDBdb=XT5istanbul
+
+.SH "COPYING"
+Copyright (C) 2011 SchedMD LLC.
+Produced at SchedMD LLC (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <http://www.schedmd.com/slurmdocs/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+.LP
+\fBslurm.conf\fR(5)
diff --git a/doc/man/man5/gres.conf.5 b/doc/man/man5/gres.conf.5
index 1b2382456..9cfb8f11e 100644
--- a/doc/man/man5/gres.conf.5
+++ b/doc/man/man5/gres.conf.5
@@ -1,14 +1,16 @@
-.TH "gres.conf" "5" "September 2010" "gres.conf 2.2" "Slurm configuration file"
+.TH "gres.conf" "5" "September 2011" "gres.conf 2.3" "Slurm configuration file"
 .SH "NAME"
 gres.conf \- Slurm configuration file for generic resource management.
 
 .SH "DESCRIPTION"
-\fB/etc/gres.conf\fP is an ASCII file which describes the configuration
+\fBgres.conf\fP is an ASCII file which describes the configuration
 of generic resources on each compute node. Each node must contain a
 gres.conf file if generic resources are to be scheduled by SLURM.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the
-same directory as the \fBslurm.conf\fP file.
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
+same directory as the \fBslurm.conf\fP file. If generic resource counts are
+set by the gres plugin function node_config_load(), this file may be optional.
 .LP
 Parameter names are case insensitive.
 Any text following a "#" in the configuration file is treated
@@ -30,7 +32,7 @@ to mulitply the number by 1024, 1048576 or 1073741824 respectively.
 .TP
 \fBCPUs\fR
 Specify the CPU index numbers for the specific CPUs which can
-use this resources. For example, it may be strongly preferable
+use this resource. For example, it may be strongly preferable
 to use specific CPUs with specific devices (e.g. on a NUMA
 architecture). Multiple CPUs may be specified using a comma
 delimited list or a range may be specified using a "\-" separator
@@ -42,7 +44,7 @@ If any CPU can be used with the resources, then do not specify the
 .TP
 \fBFile\fR
 Fully qualified pathname of the device files associated with a resource. 
-The name can include a numberic range suffix to be interpretted by SLURM
+The name can include a numberic range suffix to be interpreted by SLURM
 (e.g. \fIFile=/dev/nvidia[0\-3]\fR).
 This field is generally required if enforcement of generic resource
 allocations is to be supported (i.e. prevents a users from making
@@ -99,7 +101,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index a86c0c51a..1a582ae61 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,25 +1,20 @@
-.TH "slurm.conf" "5" "August 2010" "slurm.conf 2.2" "Slurm configuration file"
+.TH "slurm.conf" "5" "September 2011" "slurm.conf 2.3" "Slurm configuration file"
 
 .SH "NAME"
 slurm.conf \- Slurm configuration file
 .SH "DESCRIPTION"
-\fB/etc/slurm.conf\fP is an ASCII file which describes general SLURM
+\fBslurm.conf\fP is an ASCII file which describes general SLURM
 configuration information, the nodes to be managed, information about
 how those nodes are grouped into partitions, and various scheduling
 parameters associated with those partitions. This file should be
 consistent across all nodes in the cluster.
 .LP
-You can use the \fBSLURM_CONF\fR environment variable to override the built\-in
-location of this file. The SLURM daemons also allow you to override
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The SLURM daemons also allow you to override
 both the built\-in and environment\-provided location using the "\-f"
 option on the command line.
 .LP
-Note the while SLURM daemons create log files and other files as needed,
-it treats the lack of parent directories as a fatal error.
-This prevents the daemons from running if critical file systems are
-not mounted and will minimize the risk of cold\-starting (starting
-without preserving jobs).
-.LP
 The contents of the file are case insensitive except for the names of nodes
 and partitions. Any text following a "#" in the configuration file is treated
 as a comment through the end of that line.
@@ -37,7 +32,7 @@ Note on file permissions:
 The \fIslurm.conf\fR file must be readable by all users of SLURM, since it
 is used by many of the SLURM commands.  Other files that are defined
 in the \fIslurm.conf\fR file, such as log files and job accounting files,
-may need to be created/owned by the "SlurmUser" uid to be successfully
+may need to be created/owned by the user "SlurmUser" to be successfully
 accessed.  Use the "chown" and "chmod" commands to set the ownership
 and permissions appropriately.
 See the section \fBFILE AND DIRECTORY PERMISSIONS\fR for information
@@ -133,6 +128,12 @@ The user account for accessing the accounting storage database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStorageUser\fR.
 
+.TP
+\fBAccountingStoreJobComment\fR
+If set to "YES" then include the job's comment field in the job
+complete message sent to the Accounting Storage database.  The default
+is "YES".
+
 .TP
 \fBAuthType\fR
 The authentication method for communications between SLURM
@@ -214,9 +215,6 @@ no checkpoint support (default)
 .TP
 \fBcheckpoint/ompi\fR
 OpenMPI (version 1.3 or higher)
-.TP
-\fBcheckpoint/xlch\fR
-XLCH (requires that SlurmUser be root)
 .RE
 
 .TP
@@ -297,6 +295,9 @@ BlueGene block wiring (switch state details)
 \fBCPU_Bind\fR
 CPU binding details for jobs and steps
 .TP
+\fBFrontEnd\fR
+Front end node details
+.TP
 \fBGres\fR
 Generic resource details
 .TP
@@ -416,6 +417,7 @@ If set to "YES" then jobs which exceed a partition's size and/or time limits
 will be rejected at submission time. If set to "NO" then the job will be
 accepted and remain queued until the partition limits are altered.
 The default value is "NO".
+NOTE: If set, then a job's QOS can not be used to exceed partition limits.
 
 .TP
 \fBEpilog\fR
@@ -488,6 +490,7 @@ specific requested value. Job id values generated will incremented by 1
 for each subsequent job. This may be used to provide a meta\-scheduler
 with a job id space which is disjoint from the interactive jobs.
 The default value is 1.
+Also see \fBMaxJobId\fR
 
 .TP
 \fBGetEnvTimeout\fR
@@ -691,16 +694,17 @@ These are intended to be site\-specific plugins which can be used to set
 default job parameters and/or logging events.
 Sample plugins available in the distribution include "cnode", "defaults",
 "logging", "lua", and "partition".
-See the SLURM code in "src/plugins/job_submit" and modify the code to satisfy
-your needs.
+For examples of use, see the SLURM code in "src/plugins/job_submit" and
+"contribs/lua/job_submit*.lua" then modify the code to satisfy your needs.
 No job submission plugins are used by default.
 
 .TP
 \fBKillOnBadExit\fR
 If set to 1, the job will be terminated immediately when one of the
-processes is crashed or aborted. With default value of 0, if one of
+processes is crashed or aborted. With the default value of 0, if one of
 the processes is crashed or aborted the other processes will continue
-to run.
+to run. The user can override this configuration parameter by using srun's
+\fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR.
 
 .TP
 \fBKillWait\fR
@@ -739,6 +743,16 @@ jobs will fail. The default value is 10000 jobs. This value may not
 be reset via "scontrol reconfig". It only takes effect upon restart
 of the slurmctld daemon.
 
+.TP
+\fBMaxJobId\fR
+The maximum job id to be used for jobs submitted to SLURM without a
+specific requested value. Job id values generated will incremented by 1
+for each subsequent job. This may be used to provide a meta\-scheduler
+with a job id space which is disjoint from the interactive jobs.
+Once \fBMaxJobId\fR is reached, the next job will be assigned \fBFirstJobId\fR.
+The default value is 4294901760 (0xffff0000).
+Also see \fBFirstJobId\fR.
+
 .TP
 \fBMaxMemPerCPU\fR
 Maximum real memory size available per allocated CPU in MegaBytes.
@@ -766,6 +780,12 @@ NOTE: Enforcement of memory limits currently requires enabling of
 accounting, which samples memory use on a periodic basis (data need
 not be stored, just collected).
 
+.TP
+\fBMaxStepCount\fR
+The maximum number of steps that any job can initiate. This parameter
+is intended to limit the effect of bad batch scripts.
+The default value is 40000 steps.
+
 .TP
 \fBMaxTasksPerNode\fR
 Maximum number of tasks SLURM will allow a job step to spawn
@@ -801,7 +821,7 @@ Currently supported versions include:
 \fBnone\fR (default, which works for many other versions of MPI) and
 \fBopenmpi\fR.
 More information about MPI use is available here
-<https://computing.llnl.gov/linux/slurm/mpi_guide.html>.
+<http://www.schedmd.com/slurmdocs/mpi_guide.html>.
 
 .TP
 \fBMpiParams\fR
@@ -819,7 +839,7 @@ limit, at which point the job is canceled.
 This is particularly useful for backfill scheduling, which bases upon
 each job's soft time limit.
 The default value is zero.
-Man not exceed exceed 65533 minutes.
+May not exceed exceed 65533 minutes.
 A value of "UNLIMITED" is also supported.
 
 .TP
@@ -866,6 +886,9 @@ preempts jobs by checkpointing them (if possible) or canceling them.
 .TP
 \fBGANG\fR
 enables gang scheduling (time slicing) of jobs in the same partition.
+NOTE: Gang scheduling is performed independently for each partition, so
+configuring partitions with overlapping nodes and gang scheduling is generally
+not recommended.
 .TP
 \fBREQUEUE\fR
 preempts jobs by requeuing them (if possible) or canceling them.
@@ -875,7 +898,10 @@ preempts jobs by suspending them.
 A suspended job will resume execution once the high priority job
 preempting it completes.
 The \fBSUSPEND\fR may only be used with the \fBGANG\fR option
-(the gang scheduler module performs the job resume operation).
+(the gang scheduler module performs the job resume operation)
+and with \fBPreemptType=preempt/partition_prio\fR (the logic to 
+suspend and resume jobs current only has the data structures to 
+support partitions).
 .RE
 
 .TP
@@ -1073,12 +1099,15 @@ Acceptable values at present include:
 which uses an AIX kernel extension and is the default for AIX systems
 .TP
 \fBproctrack/cgroup\fR
-which uses linux cgroups to constrain and track processes. 
+which uses linux cgroups to constrain and track processes.
 NOTE: see "man cgroup.conf" for configuration details
 .TP
 \fBproctrack/linuxproc\fR
 which uses linux process tree using parent process IDs
 .TP
+\fBproctrack/lua\fR
+which uses a site\-specific LUA script to track processes
+.TP
 \fBproctrack/rms\fR
 which uses Quadrics kernel patch and is the default if "SwitchType=switch/elan"
 .TP
@@ -1165,7 +1194,7 @@ All limits listed below
 No limits listed below
 .TP
 \fBAS\fR
-The maximum address space for a processes
+The maximum address space for a process
 .TP
 \fBCORE\fR
 The maximum size of core file
@@ -1177,7 +1206,9 @@ The maximum amount of CPU time
 The maximum size of a process's data segment
 .TP
 \fBFSIZE\fR
-The maximum size of files created
+The maximum size of files created. Note that if the user sets FSIZE to less
+than the current size of the slurmd.log, job launches will fail with 
+a 'File size limit exceeded' error.
 .TP
 \fBMEMLOCK\fR
 The maximum size that may be locked into memory
@@ -1225,7 +1256,7 @@ Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR,
 \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
-(https://computing.llnl.gov/linux/slurm/power_save.html).
+( http://www.schedmd.com/slurmdocs/power_save.html ).
 
 .TP
 \fBResumeRate\fR
@@ -1251,7 +1282,7 @@ Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
 \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
-(https://computing.llnl.gov/linux/slurm/power_save.html).
+( http://www.schedmd.com/slurmdocs/power_save.html ).
 
 .TP
 \fBResvOverRun\fR
@@ -1280,12 +1311,13 @@ and resumes communications).
 A DOWN node will become available for use upon registration with a
 valid configuration only if it was set DOWN due to being non\-responsive.
 If the node was set DOWN for any other reason (low memory, prolog failure,
-epilog failure, silently rebooting, etc.), its state will not automatically
+epilog failure, unexpected reboot, etc.), its state will not automatically
 be changed.
 .TP
 \fB2\fR
 A DOWN node will become available for use upon registration with a
 valid configuration.  The node could have been set DOWN for any reason.
+(Disabled on Cray systems.)
 .RE
 
 .TP
@@ -1337,6 +1369,13 @@ Higher values result in less overhead and better responsiveness.
 The default value is 30 seconds.
 This option applies only to \fBSchedulerType=sched/backfill\fR.
 .TP
+\fBbf_resolution=#\fR
+The number of seconds in the resolution of data maintained about when jobs
+begin and end.
+Higher values result in less overhead and better responsiveness.
+The default value is 60 seconds.
+This option applies only to \fBSchedulerType=sched/backfill\fR.
+.TP
 \fBbf_window=#\fR
 The number of minutes into the future to look when considering jobs to schedule.
 Higher values result in more overhead and less responsiveness.
@@ -1354,6 +1393,10 @@ In the case of large clusters (more than 1000 nodes) configured with
 \fBSelectType=select/cons_res\fR, configuring a relatively small value may be
 desirable.
 This option applies only to \fBSchedulerType=sched/backfill\fR.
+.TP
+\fBmax_switch_wait=#\fR
+Maximum number of seconds that a job can delay execution waiting for the
+specified desired switch count. The default value is 60 seconds.
 .RE
 
 .TP
@@ -1375,6 +1418,7 @@ scheduling module "sched/backfill" (see \fBSchedulerType\fR).
 \fBSchedulerTimeSlice\fR
 Number of seconds in each time slice when gang scheduling is enabled
 (\fBPreemptMode=GANG\fR).
+The value must be between 5 seconds and 65533 seconds.
 The default value is 30 seconds.
 
 .TP
@@ -1419,6 +1463,9 @@ for the Wiki interface to the Moab Cluster Suite
 .TP
 \fBSelectType\fR
 Identifies the type of resource selection algorithm to be used.
+Changing this value can only be done by restarting the slurmctld daemon
+and will result in the loss of all job information (running and pending) 
+since the job state save format used by each plugin is different. 
 Acceptable values include
 .RS
 .TP
@@ -1438,6 +1485,10 @@ See the partition \fBShared\fR parameter for more information.
 \fBselect/bluegene\fR
 for a three\-dimensional BlueGene system.
 The default value is "select/bluegene" for BlueGene systems.
+.TP
+\fBselect/cray\fR
+for a Cray system.
+The default value is "select/cray" for all Cray systems.
 .RE
 
 .TP
@@ -1457,7 +1508,7 @@ CPUs are consumable resources.
 There is no notion of sockets, cores or threads;
 do not define those values in the node specification.  If these
 are defined, unexpected results will happen when hyper\-threading
-is enabled Procs= should be used instead.
+is enabled CPUs= should be used instead.
 On a multi\-core system, each core will be considered a CPU.
 On a multi\-core and hyper\-threaded system, each thread will be
 considered a CPU.
@@ -1468,7 +1519,7 @@ CPUs and memory are consumable resources.
 There is no notion of sockets, cores or threads;
 do not define those values in the node specification.  If these
 are defined, unexpected results will happen when hyper\-threading
-is enabled Procs= should be used instead.
+is enabled CPUs= should be used instead.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Core\fR
@@ -1545,7 +1596,8 @@ The default value is 3.
 Fully qualified pathname of a file into which the \fBslurmctld\fR daemon's
 logs are written.
 The default value is none (performs logging via syslog).
-
+.br
+See the section \fBLOGGING\fR if a pathname is specified.
 .TP
 \fBSlurmctldPidFile\fR
 Fully qualified pathname of a file into which the  \fBslurmctld\fR daemon
@@ -1585,6 +1637,8 @@ logs are written.
 The default value is none (performs logging via syslog).
 Any "%h" within the name is replaced with the hostname on which the
 \fBslurmd\fR is running.
+.br
+See the section \fBLOGGING\fR if a pathname is specified.
 
 .TP
 \fBSlurmdPidFile\fR
@@ -1750,7 +1804,7 @@ Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
 \fBResumeTimeout\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendProgram\fR,
 \fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
-(https://computing.llnl.gov/linux/slurm/power_save.html).
+( http://www.schedmd.com/slurmdocs/power_save.html ).
 
 .TP
 \fBSwitchType\fR
@@ -1776,18 +1830,30 @@ See \fBTaskProlog\fR for execution order details.
 \fBTaskPlugin\fR
 Identifies the type of task launch plugin, typically used to provide
 resource management within a node (e.g. pinning tasks to specific
-processors).
-Acceptable values include
-"task/none" for systems requiring no special handling and
-"task/affinity" to enable the \-\-cpu_bind and/or \-\-mem_bind
-srun options.
-The default value is "task/none".
-If you "task/affinity" and encounter problems, it may be due to
+processors). More than one task plugin can be specified in a comma separated
+list. The prefix of "task/" is optional. Acceptable values include:
+.RS
+.TP 15
+\fBtask/affinity\fR
+enables resource containment using CPUSETs.
+This enables the \-\-cpu_bind and/or \-\-mem_bind srun options.
+If you use "task/affinity" and encounter problems, it may be due to
 the variety of system calls used to implement task affinity on
 different operating systems.
-If that is the case, you may want to use Portable Linux
+If that is the case, you may want to install Portable Linux
 Process Affinity (PLPA, see http://www.open-mpi.org/software/plpa),
 which is supported by SLURM.
+.TP
+\fBtask/cgroup\fR
+enables resource containment using Linux control cgroups.
+This enables the \-\-cpu_bind and/or \-\-mem_bind srun options.
+NOTE: see "man cgroup.conf" for configuration details.
+.TP
+\fBtask/none\fR
+for systems requiring no special handling of user tasks.
+Lacks support for the \-\-cpu_bind and/or \-\-mem_bind srun options.
+The default value is "task/none".
+.RE
 
 .TP
 \fBTaskPluginParam\fR
@@ -1906,9 +1972,9 @@ default for Sun Constellation
 systems, best\-fit logic over three\-dimensional topology
 .TP
 \fBtopology/node_rank\fR
-default for Cray computers, orders nodes based upon information in the
-ALPS database and then performs a best\-fit algorithm over over those
-ordered nodes
+orders nodes based upon information a node_rank field in the node record
+as generated by a select plugin. SLURM performs a best\-fit algorithm over
+those ordered nodes
 .TP
 \fBtopology/none\fR
 default for other systems, best\-fit logic over one\-dimensional topology
@@ -1926,9 +1992,12 @@ Characterization Key.  Must be set to track wckey usage.
 \fBTreeWidth\fR
 \fBSlurmd\fR daemons use a virtual tree network for communications.
 \fBTreeWidth\fR specifies the width of the tree (i.e. the fanout).
-The default value is 50, meaning each slurmd daemon can communicate
-with up to 50 other slurmd daemons and over 2500 nodes can be contacted
-with two message hops.
+On architectures with a front end node running the slurmd daemon, the value
+must always be equal to or greater than the number of front end nodes which
+eliminates the need for message forwarding between the slurmd daemons.
+On other architectures the default value is 50, meaning each slurmd daemon can
+communicate with up to 50 other slurmd daemons and over 2500 nodes can be
+contacted with two message hops.
 The default value will work well for most clusters.
 Optimal system performance can typically be achieved if \fBTreeWidth\fR
 is set to the square root of the number of nodes in the cluster for
@@ -2018,7 +2087,7 @@ scheduling process by permitting it to compare job requirements
 against these (relatively few) configuration parameters and
 possibly avoid having to check job requirements
 against every individual node's configuration.
-The resources checked at node registration time are: Procs,
+The resources checked at node registration time are: CPUs,
 RealMemory and TmpDisk.
 While baseline values for each of these can be established
 in the configuration file, the actual values upon node
@@ -2136,6 +2205,13 @@ logical number of processors per socket.
 need to specify this parameter in order to optimize scheduling.
 The default value is 1.
 
+.TP
+\fBCPUs\fR
+Number of logical processors on the node (e.g. "2").
+If \fBCPUs\fR is omitted, it will set equal to the product of
+\fBSockets\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
+The default value is 1.
+
 .TP
 \fBFeature\fR
 A comma delimited list of arbitrary strings indicative of some
@@ -2152,9 +2228,9 @@ Also see \fBGres\fR.
 A comma delimited list of generic resources specifications for a node.
 Each resource specification consists of a name followed by an optional
 colon with a numeric value (default value is one)
-(e.g. "Gres=bandwidth:10000,gpus:2").
+(e.g. "Gres=bandwidth:10000,gpu:2").
 A suffix of "K", "M" or "G" may be used to mulitply the number by 1024,
-1048576 or 1073741824 respectively (e.g. "Gres=bandwidth:4G,gpus:4")..
+1048576 or 1073741824 respectively (e.g. "Gres=bandwidth:4G,gpu:4")..
 By default a node has no generic resources.
 Also see \fBFeature\fR.
 
@@ -2168,10 +2244,7 @@ recommended except for development or testing purposes.
 
 .TP
 \fBProcs\fR
-Number of logical processors on the node (e.g. "2").
-If \fBProcs\fR is omitted, it will set equal to the product of
-\fBSockets\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
-The default value is 1.
+See \fBCPUs\fR.
 
 .TP
 \fBRealMemory\fR
@@ -2188,7 +2261,7 @@ Use quotes to enclose a reason having more than one word.
 \fBSockets\fR
 Number of physical processor sockets/chips on the node (e.g. "2").
 If Sockets is omitted, it will be inferred from
-\fBProcs\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
+\fBCPUs\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
 \fBNOTE\fR: If you have multi\-core processors, you will likely
 need to specify these parameters.
 The default value is 1.
@@ -2218,7 +2291,7 @@ Note that the SLURM can allocate resources to jobs down to the
 resolution of a core. If your system is configured with more than
 one thread per core, execution of a different job on each thread
 is not supported unless you configure \fBSelectTypeParameters=CR_CPU\fR
-plus \fBProcs\fR; do not configure \fBSockets\fR, \fBCoresPerSocket\fR or
+plus \fBCPUs\fR; do not configure \fBSockets\fR, \fBCoresPerSocket\fR or
 \fBThreadsPerCore\fR.
 A job can execute a one task per thread from within one job step or
 execute a distinct job step on each of the threads.
@@ -2319,6 +2392,81 @@ registers.
 The default value is "UNKNOWN".
 .RE
 
+.LP
+On computers where frontend nodes are used to execute batch scripts
+rather than compute nodes (BlueGene or Cray systems), one may
+configure one or more frontend nodes using the configuration parameters
+defined below. These options are very similar to those used in configuring
+compute nodes. These options may only be used on systems configured and built
+with the appropriate parameters (\-\-have\-front\-end,
+\-\-enable\-bluegene\-emulation) or a system determined to have the
+appropriate architecture by the configure script (BlueGene or Cray systems).
+The front end configuration specifies the following information:
+
+.TP
+\fBFrontendName\fR
+Name that SLURM uses to refer to a frontend node.
+Typically this would be the string that "/bin/hostname \-s" returns.
+It may also be the fully qualified domain name as returned by "/bin/hostname \-f"
+(e.g. "foo1.bar.com"), or any valid domain name associated with the host
+through the host database (/etc/hosts) or DNS, depending on the resolver
+settings.  Note that if the short form of the hostname is not used, it
+may prevent use of hostlist expressions (the numeric portion in brackets
+must be at the end of the string).
+If the \fBFrontendName\fR is "DEFAULT", the values specified
+with that record will apply to subsequent node specifications
+unless explicitly set to other values in that frontend node record or
+replaced with a different set of default values.
+Note that since the naming of front end nodes would typically not follow that
+of the compute nodes (e.g. lacking X, Y and Z coordinates found in the compute
+node naming scheme), each front end node name should be listed separately and
+without a hostlist expression (i.e. frontend00,frontend01" rather than
+"frontend[00-01]").</p>
+
+.TP
+\fBFrontendAddr\fR
+Name that a frontend node should be referred to in establishing
+a communications path. This name will be used as an
+argument to the gethostbyname() function for identification.
+As with \fBFrontendName\fR, list the individual node addresses rather than
+using a hostlist expression.
+The number of \fBFrontendAddr\fR records per line must equal the number of
+\fBFrontendName\fR records per line (i.e. you can't map to node names to
+one address).
+\fBFrontendAddr\fR may also contain IP addresses.
+By default, the \fBFrontendAddr\fR will be identical in value to
+\fBFrontendName\fR.
+
+.TP
+\fBPort\fR
+The port number that the SLURM compute node daemon, \fBslurmd\fR, listens
+to for work on this particular frontend node. By default there is a single port
+number for all \fBslurmd\fR daemons on all frontend nodes as defined by the
+\fBSlurmdPort\fR configuration parameter. Use of this option is not generally
+recommended except for development or testing purposes.
+
+.TP
+\fBReason\fR
+Identifies the reason for a frontend node being in state "DOWN", "DRAINED"
+"DRAINING", "FAIL" or "FAILING".
+Use quotes to enclose a reason having more than one word.
+
+.TP
+\fBState\fR
+State of the frontend node with respect to the initiation of user jobs.
+Acceptable values are "DOWN", "DRAIN", "FAIL", "FAILING" and "UNKNOWN".
+"DOWN" indicates the frontend node has failed and is unavailable to be
+allocated work.
+"DRAIN" indicates the frontend node is unavailable to be allocated work.
+"FAIL" indicates the frontend node is expected to fail soon, has
+no jobs allocated to it, and will not be allocated to any new jobs.
+"FAILING" indicates the frontend node is expected to fail soon, has
+one or more jobs allocated to it, but will not be allocated to any new jobs.
+"UNKNOWN" indicates the frontend node's state is undefined (BUSY or IDLE),
+but will be established when the \fBslurmd\fR daemon on that node registers.
+The default value is "UNKNOWN".
+Also see the \fBDownNodes\fR parameter below.
+
 .LP
 The partition configuration permits you to establish different job
 limits or access controls for various groups (or partitions) of nodes.
@@ -2335,6 +2483,9 @@ configuration file and the default values can be reset multiple times
 in the configuration file with multiple entries where "PartitionName=DEFAULT".
 The "PartitionName=" specification must be placed on every line
 describing the configuration of partitions.
+If a partition that is in use is deleted from the configuration and slurm
+is restarted or reconfigured (scontrol reconfigure), jobs using the partition
+are canceled.
 \fBNOTE:\fR Put all parameters for each partition on a single line.
 Each line of partition configuration information should
 represent a different partition.
@@ -2350,7 +2501,7 @@ The default value is "ALL".
 
 .TP
 \fBAllowGroups\fR
-Comma separated list of group IDs which may execute jobs in the partition.
+Comma separated list of group names which may execute jobs in the partition.
 If at least one group associated with the user attempting to execute the
 job is in AllowGroups, he will be permitted to use this partition.
 Jobs executed as user root can use any partition without regard to
@@ -2381,6 +2532,33 @@ specification will utilize this partition.
 Possible values are "YES" and "NO".
 The default value is "NO".
 
+.TP
+\fBDefMemPerCPU\fR
+Default real memory size available per allocated CPU in MegaBytes.
+Used to avoid over\-subscribing memory and causing paging.
+\fBDefMemPerCPU\fR would generally be used if individual processors
+are allocated to jobs (\fBSelectType=select/cons_res\fR).
+If not set, the \fBDefMemPerCPU\fR value for the entire cluster will be used.
+Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
+\fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
+
+.TP
+\fBDefMemPerNode\fR
+Default real memory size available per allocated node in MegaBytes.
+Used to avoid over\-subscribing memory and causing paging.
+\fBDefMemPerNode\fR would generally be used if whole nodes
+are allocated to jobs (\fBSelectType=select/linear\fR) and
+resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
+If not set, the \fBDefMemPerNode\fR value for the entire cluster will be used.
+Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
+\fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
+
 .TP
 \fBDefaultTime\fR
 Run time limit used for jobs that don't specify a value. If not set
@@ -2395,6 +2573,14 @@ The default value will be the value of \fBDisableRootJobs\fR set
 outside of a partition specification (which is "NO", allowing user
 root to execute jobs).
 
+.TP
+\fIGraceTime\fP
+Specifies, in units of seconds, the preemption grace time
+to be extended to a job which has been selected for preemption.
+The default value is zero, no preemption grace time is allowed on
+this partition.
+(Meaningful only for PreemptMode=CANCEL)
+
 .TP
 \fBHidden\fR
 Specifies if the partition and its jobs are to be hidden by default.
@@ -2404,6 +2590,33 @@ The default value is "NO".
 Note that partitions that a user lacks access to by virtue of the
 \fBAllowGroups\fR parameter will also be hidden by default.
 
+.TP
+\fBMaxMemPerCPU\fR
+Maximum real memory size available per allocated CPU in MegaBytes.
+Used to avoid over\-subscribing memory and causing paging.
+\fBMaxMemPerCPU\fR would generally be used if individual processors
+are allocated to jobs (\fBSelectType=select/cons_res\fR).
+If not set, the \fBMaxMemPerCPU\fR value for the entire cluster will be used.
+Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
+\fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
+
+.TP
+\fBMaxMemPerNode\fR
+Maximum real memory size available per allocated node in MegaBytes.
+Used to avoid over\-subscribing memory and causing paging.
+\fBMaxMemPerNode\fR would generally be used if whole nodes
+are allocated to jobs (\fBSelectType=select/linear\fR) and
+resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
+If not set, the \fBMaxMemPerNode\fR value for the entire cluster will be used.
+Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
+\fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
+
 .TP
 \fBMaxNodes\fR
 Maximum count of nodes which may be allocated to any single job.
@@ -2499,13 +2712,13 @@ The default value is "NO".
 For more information see the following web pages:
 .br
 .na
-\fIhttps://computing.llnl.gov/linux/slurm/cons_res.html\fR,
+\fIhttp://www.schedmd.com/slurmdocs/cons_res.html\fR,
 .br
-\fIhttps://computing.llnl.gov/linux/slurm/cons_res_share.html\fR,
+\fIhttp://www.schedmd.com/slurmdocs/cons_res_share.html\fR,
 .br
-\fIhttps://computing.llnl.gov/linux/slurm/gang_scheduling.html\fR, and
+\fIhttp://www.schedmd.com/slurmdocs/gang_scheduling.html\fR, and
 .br
-\fIhttps://computing.llnl.gov/linux/slurm/preempt.html\fR.
+\fIhttp://www.schedmd.com/slurmdocs/preempt.html\fR.
 .ad
 
 .RS
@@ -2602,7 +2815,7 @@ to all of the programs.
 .TP
 \fBBASIL_RESERVATION_ID\fR
 Basil reservation ID.
-Available on Cray XT systems only.
+Available on Cray XT/XE systems only.
 .TP
 \fBMPIRUN_PARTITION\fR
 BlueGene partition name.
@@ -2660,7 +2873,7 @@ SLURM is able to optimize job allocations to minimize network contention.
 Special SLURM logic is used to optimize allocations on systems with a
 three\-dimensional interconnect (BlueGene, Sun Constellation, etc.)
 and information about configuring those systems are available on
-web pages available here: <https://computing.llnl.gov/linux/slurm/>.
+web pages available here: <http://www.schedmd.com/slurmdocs/>.
 For a hierarchical network, SLURM needs to have detailed information
 about how nodes are configured on the network switches.
 .LP
@@ -2673,7 +2886,7 @@ The \fBTopologyPlugin\fR parameter controls which plugin is used to
 collect network topology information.
 The only values presently supported are
 "topology/3d_torus" (default for IBM BlueGene, Sun Constellation and
-Cray XT systems, performs best\-fit logic over three\-dimensional topology),
+Cray XT/XE systems, performs best\-fit logic over three\-dimensional topology),
 "topology/none" (default for other systems,
 best\-fit logic over one\-dimensional topology),
 "topology/tree" (determine the network topology based
@@ -2791,7 +3004,7 @@ JobCredentialPublicCertificate=/usr/local/slurm/public.cert
 .br
 #
 .br
-NodeName=DEFAULT Procs=2 RealMemory=2000 TmpDisk=64000
+NodeName=DEFAULT CPUs=2 RealMemory=2000 TmpDisk=64000
 .br
 NodeName=DEFAULT State=UNKNOWN
 .br
@@ -2939,6 +3152,72 @@ The file must exist on every compute node.
 Must be executable by user \fBSlurmUser\fR.
 The file must be accessible by the primary and backup control machines.
 
+.SH "LOGGING"
+.LP
+Note that while SLURM daemons create log files and other files as needed,
+it treats the lack of parent directories as a fatal error.
+This prevents the daemons from running if critical file systems are
+not mounted and will minimize the risk of cold\-starting (starting
+without preserving jobs).
+.LP
+Log files and job accounting files,
+may need to be created/owned by the "SlurmUser" uid to be successfully
+accessed.  Use the "chown" and "chmod" commands to set the ownership
+and permissions appropriately.
+See the section \fBFILE AND DIRECTORY PERMISSIONS\fR for information
+about the various files and directories used by SLURM.
+.LP
+It is recommended that the logrotate utility be used to insure that
+various log files do not become too large. 
+This also applies to text files used for accounting,
+process tracking, and the slurmdbd log if they are used.
+.LP
+Here is a sample logrotate configuration. Make appropriate site modifications
+and save as /etc/logrotate.d/slurm on all nodes.
+See the \fBlogrotate\fR man page for more details.
+.LP
+## 
+.br
+# SLURM Logrotate Configuration 
+.br
+## 
+.br
+/var/log/slurm/*log {
+.br
+    compress 
+.br
+    missingok 
+.br
+    nocopytruncate 
+.br
+    nocreate 
+.br
+    nodelaycompress 
+.br
+    nomail 
+.br
+    notifempty 
+.br
+    noolddir 
+.br
+    rotate 5 
+.br
+    sharedscripts 
+.br
+    size=5M 
+.br
+    create 640 slurm root 
+.br
+    postrotate 
+.br
+        /etc/init.d/slurm reconfig 
+.br
+    endscript 
+.br
+}
+.br
+
+
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
@@ -2947,7 +3226,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -2964,8 +3243,8 @@ details.
 
 .SH "SEE ALSO"
 .LP
-\fBbluegene.conf\fR(5), \fBcgroup.conf\fR(5), \fBgethostbyname\fR(3),
-\fBgetrlimit\fR(2), \fBgres.conf\fR(5), \fBgroup\fR(5), \fBhostname\fR(1),
+\fBbluegene.conf\fR(5), \fBcgroup.conf\fR(5), \fBgethostbyname\fR (3),
+\fBgetrlimit\fR (2), \fBgres.conf\fR(5), \fBgroup\fR (5), \fBhostname\fR (1),
 \fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8),
 \fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBsrun(1)\fR,
-\fBspank(8)\fR, \fBsyslog\fR(2), \fBtopology.conf\fR(5), \fBwiki.conf\fR(5)
+\fBspank(8)\fR, \fBsyslog\fR (2), \fBtopology.conf\fR(5), \fBwiki.conf\fR(5)
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index f60fe70c5..bfcf88c66 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -3,10 +3,11 @@
 slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file
 
 .SH "DESCRIPTION"
-\fB/etc/slurmdb.conf\fP is an ASCII file which describes Slurm Database
+\fBslurmdb.conf\fP is an ASCII file which describes Slurm Database
 Daemon (SlurmDBD) configuration information.
-You can use the \fBSLURM_CONF\fR environment variable to override the built\-in
-location of this file.
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable.
 .LP
 The contents of the file are case insensitive except for the names of nodes
 and files. Any text following a "#" in the configuration file is treated
@@ -158,6 +159,9 @@ unless something is explicitly set by the admin with the create.
 Fully qualified pathname of a file into which the Slurm Database Daemon's
 logs are written.
 The default value is none (performs logging via syslog).
+.br
+See the section \fBLOGGING\fR in the slurm.conf man page
+if a pathname is specified.
 
 .TP
 \fBMessageTimeout\fR
@@ -324,6 +328,12 @@ with to store the job accounting data.
 Boolean yes or no.  Used to set display and track of the Workload
 Characterization Key. Must be set to track wckey usage.
 
+.TP
+\fBTrackSlurmctldDown\fR
+Boolean yes or no.  If set the slurmdbd will mark all idle resources on the
+cluster as down when a slurmctld disconnects or is no longer reachable.  The
+default is no.
+
 .SH "EXAMPLE"
 .LP
 #
@@ -376,7 +386,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
@@ -395,4 +405,4 @@ details.
 .LP
 \fBslurm.conf\fR(5),
 \fBslurmctld\fR(8), \fBslurmdbd\fR(8)
-\fBsyslog\fR(2)
+\fBsyslog\fR (2)
diff --git a/doc/man/man5/topology.conf.5 b/doc/man/man5/topology.conf.5
index f73a4ffe1..7bae1d7d4 100644
--- a/doc/man/man5/topology.conf.5
+++ b/doc/man/man5/topology.conf.5
@@ -4,10 +4,11 @@
 topology.conf \- Slurm configuration file for defining the network topology
 
 .SH "DESCRIPTION"
-\fB/etc/topology.conf\fP is an ASCII file which describes the
+\fBtopology.conf\fP is an ASCII file which describes the
 cluster's network topology for optimized job resource allocation.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
 Parameter names are case insensitive.
@@ -71,7 +72,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index 1cf069ad7..962fa873d 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -2,10 +2,11 @@
 .SH "NAME"
 wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins
 .SH "DESCRIPTION"
-\fB/etc/wiki.conf\fP is an ASCII file which describes wiki and wiki2
+\fBwiki.conf\fP is an ASCII file which describes wiki and wiki2
 scheduler specific SLURM configuration information.
 The file location can be modified at system build time using the
-DEFAULT_SLURM_CONF parameter. The file will always be located in the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable. The file will always be located in the
 same directory as the \fBslurm.conf\fP file.
 .LP
 Parameter names are case insensitive.
@@ -188,7 +189,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man8/slurmctld.8 b/doc/man/man8/slurmctld.8
index 6da4a41f5..2828f07d5 100644
--- a/doc/man/man8/slurmctld.8
+++ b/doc/man/man8/slurmctld.8
@@ -97,7 +97,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man8/slurmd.8 b/doc/man/man8/slurmd.8
index 09e42ceae..985273195 100644
--- a/doc/man/man8/slurmd.8
+++ b/doc/man/man8/slurmd.8
@@ -40,9 +40,9 @@ Help; print a brief summary of command options.
 Write log messages to the specified file.
 .TP
 \fB\-M\fR
-Lock slurmd pages into system memory using mlockall(2) to disable
+Lock slurmd pages into system memory using mlockall (2) to disable
 paging of the slurmd process. This may help in cases where nodes are
-marked DOWN during periods of heavy swap activity. If the mlockall(2)
+marked DOWN during periods of heavy swap activity. If the mlockall (2)
 system call is not available, an error will be printed to the log
 and slurmd will continue as normal.
 
@@ -89,7 +89,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man8/slurmdbd.8 b/doc/man/man8/slurmdbd.8
index 9e881e7fb..077bdfa10 100644
--- a/doc/man/man8/slurmdbd.8
+++ b/doc/man/man8/slurmdbd.8
@@ -42,7 +42,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man8/slurmstepd.8 b/doc/man/man8/slurmstepd.8
index 3bbd3a711..3ad55a124 100644
--- a/doc/man/man8/slurmstepd.8
+++ b/doc/man/man8/slurmstepd.8
@@ -16,7 +16,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index 34eef669e..233edffc7 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -73,12 +73,12 @@ Called for each task just after fork, but before all elevated privileges
 are dropped. (remote context only)
 .TP
 \fBslurm_spank_task_init\fR
-Called for each task just before execve(2). (remote context only)
+Called for each task just before execve (2). (remote context only)
 .TP
 \fBslurm_spank_task_post_fork\fR
-Called for each task from parent process after fork(2) is complete.
+Called for each task from parent process after fork (2) is complete.
 Due to the fact that \fBslurmd\fR does not exec any tasks until all
-tasks have completed fork(2), this call is guaranteed to run before
+tasks have completed fork (2), this call is guaranteed to run before
 the user task is executed. (remote context only)
 .TP
 \fBslurm_spank_task_exit\fR
@@ -166,8 +166,8 @@ the job's environment. The prototypes are:
 .fi
 .LP
 These are only necessary in remote context since modifications of
-the standard process environment using \fBsetenv\fR(3), \fBgetenv\fR(3),
-and \fBunsetenv\fR(3) may be used in local context.
+the standard process environment using \fBsetenv\fR (3), \fBgetenv\fR (3),
+and \fBunsetenv\fR (3) may be used in local context.
 .LP
 Functions are also available from within the \fBSPANK\fR plugins to
 establish environment variables to be exported to the SLURM
@@ -244,7 +244,7 @@ is a short description of the option suitable for \-\-help output.
 .TP
 .I has_arg
 0 if option takes no argument, 1 if option takes an argument, and
-2 if the option takes an optional argument. (See \fBgetopt_long\fR(3)).
+2 if the option takes an optional argument. (See \fBgetopt_long\fR (3)).
 .TP
 .I val
 A plugin\-local value to return to the option callback function.
@@ -549,7 +549,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
-For details, see <https://computing.llnl.gov/linux/slurm/>.
+For details, see <http://www.schedmd.com/slurmdocs/>.
 .LP
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/etc/bluegene.conf.example b/etc/bluegene.conf.example
index d35e068aa..f94fe3896 100644
--- a/etc/bluegene.conf.example
+++ b/etc/bluegene.conf.example
@@ -99,7 +99,7 @@ BridgeAPIVerbose=0
 #   will be based upon the NodeName defined in slurm.conf.
 ###############################################################################
 # LEAVE NEXT LINE AS A COMMENT, Full-system bgblock, implicitly created
-# BP=[000x333] Type=TORUS         # 4x4x4 = 64 midplanes
+# BPs=[000x333] Type=TORUS         # 4x4x4 = 64 midplanes
 ###############################################################################
 # smap bgblock layout here:
 BPs=[000x133] Type=TORUS          # 2x4x4 = 32
diff --git a/etc/cgroup.conf.example b/etc/cgroup.conf.example
index 615faa6c9..816c18283 100644
--- a/etc/cgroup.conf.example
+++ b/etc/cgroup.conf.example
@@ -2,70 +2,11 @@
 #
 # Slurm cgroup support configuration file
 #
-###
-
-#--
-# Slurm cgroup plugins require a valid and functional 
-# cgroup system mounted on /dev/cgroup
-# When launched, plugins check cgroup availability
-# If cgroup is not available, the plugin launch fails
-# unless CgroupAutomount is set to yes. In that case,
-# the plugin will first try to mount the cgroup system.
-# CgroupMountOptions and CgroupReleaseAgent can be used to
-# tune the cgroup system behavior
+# See man slurm.conf and man cgroup.conf for further
+# information on cgroup configuration parameters
 #--
 CgroupAutomount=yes
-CgroupMountOptions="memory,cpuset"
-CgroupReleaseAgent="/etc/slurm/cgroup.release_agent"
-
-#--
-# Slurm cgroup proctrack plugin creates a hierarchical set of
-# directories for each step, putting the step tasks into the leaf
-#
-# This directory structure is like the following : 
-#     /dev/cgroup/slurm/uid_%uid/job_%jobid/step_%stepid
-#
-# job cgroup and jobstep cgroup can be tuned using the two next 
-# parameters. The format of the parameter is the following :
-#
-# "a=b c=d e=f" where a,c,d corresponds to files under the cgroup 
-# directory and b,d,f the values to write in these files
-#--
-JobCgroupParams="memory.swappiness=30"
-JobStepCgroupParams=""
+CgroupReleaseAgent="/etc/slurm/cgroup"
 
-#--
-# Slurm cgroup proctrack plugin can constrain memory usage at the job
-# level. The constraints correspond to the amount of RAM space allowed
-# to the whole job as well as the amount of additional swap space.
-#
-# The amount of space of these two notions are expressed in percent of
-# the memory limit set to the job on the execution node.
-#
-# Thus, the following configuration :
-#     ConstrainRAMSpace=no
-#     ConstrainSwapSpace=yes
-# will request that no constraint are set for the jobs,
-#     ConstrainRAMSpace=yes
-#     AllowedRAMSpace=100
-#     ConstrainSwapSpace=yes
-#     AllowedSwapSpace=10
-# will request to constrain RAM and Swap space letting the job use
-# as many RAM space than memory asked in slurm but not more than
-# 110% of this limit in both RAM+Swap space
-#
-# Warning: setting ConstrainSwapSpace to yes automatically set 
-# ConstrainRAMSpace to yes and the corresponding limit to 100%
-# of the memory limit + the configured percent of Swap space
-#--
-ConstrainRAMSpace=yes
-AllowedRAMSpace=100
-ConstrainSwapSpace=yes
-AllowedSwapSpace=10
-
-#--
-# Slurm cgroup proctrack plugin can constrain allowed cores to 
-# the subset of allocated resources.
-# To do that, you just have to set to yes the following parameter
-#--
-ConstrainCores=yes
+ConstrainCores=no
+ConstrainRAMSpace=no
diff --git a/etc/cgroup.release_agent b/etc/cgroup.release_agent
deleted file mode 100644
index cd8679c9e..000000000
--- a/etc/cgroup.release_agent
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-rmdir /dev/cgroup/$@
diff --git a/etc/cgroup.release_common.example b/etc/cgroup.release_common.example
new file mode 100644
index 000000000..f431d2685
--- /dev/null
+++ b/etc/cgroup.release_common.example
@@ -0,0 +1,155 @@
+#!/bin/bash
+#
+# Generic release agent for SLURM cgroup usage
+#
+# Manage cgroup hierarchy like :
+#
+# /cgroup/subsystem/uid_%/job_%/step_%/task_%
+#
+# Automatically sync uid_% cgroups to be coherent
+# with remaining job childs when one of them is removed
+# by a call to this release agent.
+# The synchronisation is made in a flock on the root cgroup
+# to ensure coherency of the cgroups contents.
+#
+
+progname=$(basename $0)
+subsystem=${progname##*_}
+
+get_mount_dir()
+{
+    local lssubsys=$(type -p lssubsys)
+    if [ -x $lssubsys ]; then
+        $lssubsys -m $subsystem | awk '{print $2}'
+    else
+        awk "/release_agent=$0/ { print \$2 }"
+    fi
+}
+
+mountdir=$(get_mount_dir)
+
+if [[ $# -eq 0 ]]
+then
+    echo "Usage: $(basename $0) [sync] cgroup"
+    exit 1
+fi
+
+# build orphan cg path
+if [[ $# -eq 1 ]]
+then
+    rmcg=${mountdir}$1
+else
+    rmcg=${mountdir}$2
+fi
+slurmcg=${rmcg%/uid_*}
+if [[ ${slurmcg} == ${rmcg} ]]
+then
+    # not a slurm job pattern, perhaps the slurmcg, just remove 
+    # the dir with a lock and exit
+    flock -x ${mountdir} -c "rmdir ${rmcg}"
+    exit $?
+fi
+orphancg=${slurmcg}/orphan
+
+# make sure orphan cgroup is existing
+if [[ ! -d ${orphancg} ]]
+then
+    mkdir ${orphancg}
+    case ${subsystem} in 
+	cpuset)
+	    cat ${mountdir}/cpuset.cpus > ${orphancg}/cpuset.cpus
+	    cat ${mountdir}/cpuset.mems > ${orphancg}/cpuset.mems
+	    ;;
+	*)
+	    ;;
+    esac
+fi
+    
+# kernel call
+if [[ $# -eq 1 ]]
+then
+
+    rmcg=${mountdir}$@
+
+    # try to extract the uid cgroup from the input one
+    # ( extract /uid_% from /uid%/job_*...)
+    uidcg=${rmcg%/job_*}
+    if [[ ${uidcg} == ${rmcg} ]]
+    then
+	# not a slurm job pattern, perhaps the uidcg, just remove 
+	# the dir with a lock and exit
+	flock -x ${mountdir} -c "rmdir ${rmcg}"
+	exit $?
+    fi
+
+    if [[ -d ${mountdir} ]]
+    then
+	flock -x ${mountdir} -c "$0 sync $@"
+    fi
+
+    exit $?
+
+# sync subcall (called using flock by the kernel hook to be sure
+# that no one is manipulating the hierarchy, i.e. PAM, SLURM, ...)
+elif [[ $# -eq 2 ]] && [[ $1 == "sync" ]]
+then
+
+    shift
+    rmcg=${mountdir}$@
+    uidcg=${rmcg%/job_*}
+
+    # remove this cgroup
+    if [[ -d ${rmcg} ]]
+    then
+        case ${subsystem} in
+            memory)
+		# help to correctly remove lazy cleaning memcg
+		# but still not perfect
+                sleep 1
+                ;;
+            *)
+		;;
+        esac
+	rmdir ${rmcg}
+    fi
+    if [[ ${uidcg} == ${rmcg} ]]
+    then
+	## not a slurm job pattern exit now do not sync
+	exit 0
+    fi
+
+    # sync the user cgroup based on targeted subsystem
+    # and the remaining job
+    if [[ -d ${uidcg} ]]
+    then
+	case ${subsystem} in 
+	    cpuset)
+		cpus=$(cat ${uidcg}/job_*/cpuset.cpus 2>/dev/null)
+		if [[ -n ${cpus} ]]
+		then
+		    cpus=$(scontrol show hostnames $(echo ${cpus} | tr ' ' ','))
+		    cpus=$(echo ${cpus} | tr ' ' ',')
+		    echo ${cpus} > ${uidcg}/cpuset.cpus
+		else
+		    # first move the remaining processes to 
+		    # a cgroup reserved for orphaned processes
+		    for t in $(cat ${uidcg}/tasks)
+		    do
+			echo $t > ${orphancg}/tasks
+		    done
+		    # then remove the remaining cpus from the cgroup
+		    echo "" > ${uidcg}/cpuset.cpus
+		fi
+		;;
+	    *)
+		;;
+	esac
+    fi
+
+# error
+else
+    echo "Usage: $(basename $0) [sync] cgroup"
+    exit 1
+fi
+
+exit 0
diff --git a/etc/cgroup_allowed_devices_file.conf.example b/etc/cgroup_allowed_devices_file.conf.example
new file mode 100644
index 000000000..dcd22949d
--- /dev/null
+++ b/etc/cgroup_allowed_devices_file.conf.example
@@ -0,0 +1,6 @@
+/dev/null
+/dev/urandom
+/dev/zero
+/dev/sda*
+/dev/cpu/*/*
+/dev/pts/*
diff --git a/etc/init.d.slurm b/etc/init.d.slurm
index be89800c7..74a52a93f 100644
--- a/etc/init.d.slurm
+++ b/etc/init.d.slurm
@@ -67,6 +67,10 @@ if [ -d /bgl/BlueLight/ppcfloor ]; then
 fi
 
 # Source slurm specific configuration
+# This can be used to alter limits for users jobs or set daemon options.
+# For example, the limits for user jobs could be higher or lower than the
+# default limits for user root (e.g. "ulimit -t unlimited" sets an unlimited
+# CPU time limit for spawned user jobs).
 # SLURMCTLD_OPTIONS defines slurmctld command line options. See "man slurmctld"
 # SLURMD_OPTIONS defines slurmd command line options. See "man slurmd"
 if [ -f /etc/sysconfig/slurm ] ; then
@@ -106,7 +110,15 @@ stop() {
 startall() {
     for prog in `$BINDIR/scontrol show daemons`; do
 	optvar=`echo ${prog}_OPTIONS | tr "a-z" "A-Z"`
-	start $prog ${!optvar}
+	if [[ ${MULTIPLE_SLURMD} == yes ]] && [[ ${prog} == slurmd ]]
+	then
+	    for node in $(scontrol show aliases)
+	    do
+		start $prog -N ${node} ${!optvar}
+	    done
+	else
+	    start $prog ${!optvar}
+	fi
     done
 }
 
@@ -120,6 +132,8 @@ slurmstatus() {
     local pid
     local rpid
     local pidfile
+    local pidfiles
+    local rc
 
     pidfile=`grep -i ${base}pid $CONFDIR/slurm.conf | grep -v '^ *#'`
     if [ $? = 0 ]; then
@@ -132,36 +146,64 @@ slurmstatus() {
     pid=`pidof -o $$ -o $$PPID -o %PPID -x $1 || \
 	 pidof -o $$ -o $$PPID -o %PPID -x ${base}`
 
-    if [ -f $pidfile ]; then
-	read rpid < $pidfile
-	if [ "$rpid" != "" -a "$pid" != "" ]; then
-	    for i in $pid ; do
-		if [ "$i" = "$rpid" ]; then
-		    echo $"${base} (pid $pid) is running..."
-		    return 0
-		fi
+    if [ "$base" == "slurmd" ] ; then
+	echo ${pidfile} | grep -q %n
+	if [[ $? -eq 0 ]]
+	then
+	    for n in $(scontrol show aliases)
+	    do
+		pidfiles="${pidfiles} $(echo ${pidfile} | sed "s/%n/$n/g")"
 	    done
-	elif [ "$rpid" != "" -a "$pid" = "" ]; then
+	else
+	    pidfiles=${pidfile}
+	fi
+    else
+	pidfiles=${pidfile}
+    fi
+
+    RETVAL=0
+    for pidfile in ${pidfiles}
+    do
+	rc=1
+	if [ -f $pidfile ]; then
+	    read rpid < $pidfile
+	    if [ "$rpid" != "" -a "$pid" != "" ]; then
+		for i in $pid ; do
+		    if [ "$i" = "$rpid" ]; then
+			echo $"${base} (pid $rpid) is running..."
+			rc=0
+			break
+		    fi
+		done
+	    elif [ "$rpid" != "" -a "$pid" = "" ]; then
 #           Due to change in user id, pid file may persist
 #           after slurmctld terminates
-	    if [ "$base" != "slurmctld" ] ; then
-	       echo $"${base} dead but pid file exists"
-	    else
-	       echo $"${base} is stopped"
+		if [ "$base" != "slurmctld" ] ; then
+		    echo $"${base} dead but pid file exists"
+		else
+		    echo $"${base} is stopped"
+		fi
+		RETVAL=1
 	    fi
-	    return 1
+	    
 	fi
 
-    fi
+	if [[ $rc -eq 0 ]]
+	then
+	    continue
+	fi
 
-    if [ "$base" = "slurmctld" -a "$pid" != "" ] ; then
-	echo $"${base} (pid $pid) is running..."
-	return 0
-    fi
+	if [ "$base" = "slurmctld" -a "$pid" != "" ] ; then
+	    echo $"${base} (pid $pid) is running..."
+	    continue
+	fi
+	
+	echo $"${base} is stopped"
+	RETVAL=1
 
-    echo $"${base} is stopped"
+    done
 
-    return 3
+    return $RETVAL
 }
 
 #
@@ -221,7 +263,17 @@ case "$1" in
 	if [ -f /var/lock/subsys/slurm ]; then
 	    for prog in `$BINDIR/scontrol show daemons`; do
 		 stop $prog
-		 start $prog
+		 sleep 1
+		 optvar=`echo ${prog}_OPTIONS | tr "a-z" "A-Z"`
+		 if [[ ${MULTIPLE_SLURMD} == yes ]] && [[ ${prog} == slurmd ]]
+		 then
+		     for node in $(scontrol show aliases)
+		     do
+			 start $prog -N ${node}
+		     done
+		 else
+		     start $prog ${!optvar}
+		 fi
 	    done
 	fi
 	;;
diff --git a/slurm.spec b/slurm.spec
index 37a89df7e..58e92eae6 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -22,6 +22,7 @@
 # --without readline %_without_readline 1    don't require readline-devel RPM to be installed
 # --with sgijob      %_with_sgijob      1    build proctrack-sgi-job RPM
 # --with sun_const   %_with_sun_const   1    build for Sun Constellation system
+# --with-srun2aprun  %_with_srun2aprun  1    build srun as aprun wrapper
 
 #
 #  Allow defining --with and --without build options or %_with and %without in .rpmmacors
@@ -43,6 +44,7 @@
 %slurm_without_opt debug
 %slurm_without_opt elan
 %slurm_without_opt sun_const
+%slurm_without_opt srun2aprun
 
 # These options are only here to force there to be these on the build.
 # If they are not set they will still be compiled if the packages exist.
@@ -86,16 +88,16 @@
 %endif
 
 Name:    slurm
-Version: 2.2.7
+Version: 2.3.2
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL
 Group: System Environment/Base
-Source: slurm-2.2.7.tar.bz2
+Source: slurm-2.3.2.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
-URL: https://computing.llnl.gov/linux/slurm/
+URL: http://www.schedmd.com/slurmdocs/
 
 Requires: slurm-plugins
 
@@ -288,6 +290,15 @@ Requires: slurm-perlapi
 %description torque
 Torque wrapper scripts used for helping migrate from Torque/PBS to SLURM.
 
+%if %{slurm_with srun2aprun}
+%package srun2aprun
+Summary: SLURM srun command is a wrapper for Cray/ALPS aprun command.
+Group: Development/System
+Requires: slurm-perlapi
+%description srun2aprun
+SLURM srun command is a wrapper for Cray/ALPS aprun command.
+%endif
+
 %package sjobexit
 Summary: SLURM job exit code management tools.
 Group: Development/System
@@ -368,7 +379,7 @@ Gives the ability for SLURM to use Berkeley Lab Checkpoint/Restart
 #############################################################################
 
 %prep
-%setup -n slurm-2.2.7
+%setup -n slurm-2.3.2
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
@@ -406,9 +417,13 @@ DESTDIR="$RPM_BUILD_ROOT" make install-contrib
       install -D -m755 etc/init.d.slurmdbd $RPM_BUILD_ROOT/etc/init.d/slurmdbd
    fi
 %endif
-install -D -m644 etc/cgroup.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.conf.example
-install -D -m755 etc/cgroup.release_agent ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.release_agent
 install -D -m644 etc/slurm.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.conf.example
+install -D -m644 etc/cgroup.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.conf.example
+install -D -m755 etc/cgroup_allowed_devices_file.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup_allowed_devices_file.conf.example
+install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.release_common.example
+install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_freezer
+install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_cpuset
+install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_memory
 install -D -m644 etc/slurmdbd.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurmdbd.conf.example
 install -D -m755 etc/slurm.epilog.clean ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.epilog.clean
 install -D -m755 contribs/sjstat ${RPM_BUILD_ROOT}%{_bindir}/sjstat
@@ -429,7 +444,18 @@ rm -f $RPM_BUILD_ROOT/lib64/security/pam_slurm.{a,la}
 rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/auth_none.so
 %endif
 %if ! %{slurm_with bluegene}
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/job_submit_cnode.so
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if.so
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if64.so
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/runjob_plugin.so
 rm -f $RPM_BUILD_ROOT/%{_mandir}/man5/bluegene*
+rm -f $RPM_BUILD_ROOT/%{_sbindir}/sfree
+rm -f $RPM_BUILD_ROOT/%{_sbindir}/slurm_epilog
+rm -f $RPM_BUILD_ROOT/%{_sbindir}/slurm_prolog
+%endif
+%if ! %{slurm_with munge}
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/auth_munge.so
+rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_munge.so
 %endif
 rm -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/.packlist
 rm -f $RPM_BUILD_ROOT/%{_perlarchlibdir}/perllocal.pod
@@ -438,25 +464,32 @@ rm -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurmdb/.packlist
 
 %if ! %{slurm_with blcr}
 # remove these if they exist
-rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/srun_cr* ${RPM_BUILD_ROOT}%{_bindir}/srun_cr ${RPM_BUILD_ROOT}%{_libexecdir}/slurm/cr_*
+rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/srun_cr*
+rm -f ${RPM_BUILD_ROOT}%{_bindir}/srun_cr
+rm -f ${RPM_BUILD_ROOT}%{_libexecdir}/slurm/cr_*
 %endif
 
 # Build man pages that are generated directly by the tools
 rm -f $RPM_BUILD_ROOT/%{_mandir}/man1/sjobexitmod.1
 ${RPM_BUILD_ROOT}%{_bindir}/sjobexitmod --roff > $RPM_BUILD_ROOT/%{_mandir}/man1/sjobexitmod.1
+%if %{slurm_with srun2aprun}
+    rm -f $RPM_BUILD_ROOT/%{_mandir}/man1/srun.1
+    pod2man --section=1 contribs/cray/srun.pl > $RPM_BUILD_ROOT/%{_mandir}/man1/srun.1
+%endif
 
 # Build conditional file list for main package
 LIST=./slurm.files
 touch $LIST
-test -f $RPM_BUILD_ROOT/etc/init.d/slurm                       &&
-  echo /etc/init.d/slurm                               >> $LIST
+test -f $RPM_BUILD_ROOT/etc/init.d/slurm			&&
+  echo /etc/init.d/slurm				>> $LIST
+test -f $RPM_BUILD_ROOT/%{_bindir}/sview			&&
+  echo %{_bindir}/sview					>> $LIST
 
 %if %{slurm_with aix}
 install -D -m644 etc/federation.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/federation.conf.example
 %endif
 
 %if %{slurm_with bluegene}
-rm -f ${RPM_BUILD_ROOT}%{_bindir}/srun
 install -D -m644 etc/bluegene.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/bluegene.conf.example
 mkdir -p ${RPM_BUILD_ROOT}/etc/ld.so.conf.d
 echo "%{_libdir}/slurm" > ${RPM_BUILD_ROOT}/etc/ld.so.conf.d/slurm.conf
@@ -468,6 +501,8 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if.so &&
    echo %{_libdir}/slurm/libsched_if.so >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/libsched_if64.so &&
    echo %{_libdir}/slurm/libsched_if64.so >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/runjob_plugin.so &&
+   echo %{_libdir}/slurm/runjob_plugin.so >> $LIST
 
 %endif
 
@@ -495,10 +530,14 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_pgsql.so            &&
    echo %{_libdir}/slurm/jobcomp_pgsql.so            >> $LIST
 
 LIST=./plugins.files
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/select_bluegene.so          &&
+   echo %{_libdir}/slurm/select_bluegene.so          >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so           &&
    echo %{_libdir}/slurm/crypto_openssl.so           >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so            &&
    echo %{_libdir}/slurm/task_affinity.so            >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_cgroup.so              &&
+   echo %{_libdir}/slurm/task_cgroup.so              >> $LIST
 
 LIST=./pam.files
 touch $LIST
@@ -523,7 +562,7 @@ rm -rf $RPM_BUILD_ROOT
 %defattr(-,root,root,0755)
 %doc AUTHORS
 %doc NEWS
-%doc README
+%doc README.rst
 %doc RELEASE_NOTES
 %doc DISCLAIMER
 %doc COPYING
@@ -539,6 +578,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/src/*
 %{_mandir}/man1/*
 %{_mandir}/man5/cgroup.*
+%{_mandir}/man5/cray.*
 %{_mandir}/man5/gres.*
 %{_mandir}/man5/slurm.*
 %{_mandir}/man5/topology.*
@@ -549,15 +589,22 @@ rm -rf $RPM_BUILD_ROOT
 %{_mandir}/man8/spank*
 %dir %{_sysconfdir}
 %dir %{_libdir}/slurm/src
-%config %{_sysconfdir}/cgroup.conf.example
-%config %{_sysconfdir}/cgroup.release_agent
 %config %{_sysconfdir}/slurm.conf.example
+%config %{_sysconfdir}/cgroup.conf.example
+%config %{_sysconfdir}/cgroup_allowed_devices_file.conf.example
+%config %{_sysconfdir}/cgroup.release_common.example
+%config %{_sysconfdir}/cgroup/release_freezer
+%config %{_sysconfdir}/cgroup/release_cpuset
+%config %{_sysconfdir}/cgroup/release_memory
 %config %{_sysconfdir}/slurm.epilog.clean
 %exclude %{_mandir}/man1/sjobexit*
 %if %{slurm_with blcr}
 %exclude %{_mandir}/man1/srun_cr*
 %exclude %{_bindir}/srun_cr
 %endif
+%if %{slurm_with srun2aprun}
+%exclude %{_bindir}/srun*
+%endif
 #############################################################################
 
 %files devel
@@ -653,7 +700,6 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/accounting_storage_slurmdbd.so
 %{_libdir}/slurm/checkpoint_none.so
 %{_libdir}/slurm/checkpoint_ompi.so
-%{_libdir}/slurm/checkpoint_xlch.so
 %{_libdir}/slurm/gres_gpu.so
 %{_libdir}/slurm/gres_nic.so
 %{_libdir}/slurm/jobacct_gather_aix.so
@@ -686,9 +732,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/sched_hold.so
 %{_libdir}/slurm/sched_wiki.so
 %{_libdir}/slurm/sched_wiki2.so
-%{_libdir}/slurm/select_bluegene.so
 %{_libdir}/slurm/select_cray.so
-%{_libdir}/slurm/select_bgq.so
 %{_libdir}/slurm/select_cons_res.so
 %{_libdir}/slurm/select_linear.so
 %{_libdir}/slurm/switch_none.so
@@ -710,6 +754,14 @@ rm -rf $RPM_BUILD_ROOT
 %{_bindir}/mpiexec
 #############################################################################
 
+%if %{slurm_with srun2aprun}
+%files srun2aprun
+
+%defattr(-,root,root)
+%{_bindir}/srun
+%endif
+#############################################################################
+
 %files sjobexit
 %defattr(-,root,root)
 %{_bindir}/sjobexitmod
@@ -786,27 +838,14 @@ if [ -x /sbin/ldconfig ]; then
 	[ -x /sbin/chkconfig ] && /sbin/chkconfig --add slurm
     fi
 fi
-if [ ! -f %{_sysconfdir}/slurm.conf ]; then
-    echo "You need to build and install a slurm.conf file"
-    echo "Edit %{_sysconfdir}/slurm.conf.example and copy it to slurm.conf or"
-    echo "Build a new one using http://www.llnl.gov/linux/slurm/configurator.html"
-fi
 
 %post slurmdbd
-if [ ! -f %{_sysconfdir}/slurmdbd.conf ]; then
-    echo "You need to build and install a slurmdbd.conf file"
-    echo "Edit %{_sysconfdir}/slurmdbd.conf.example and copy it to slurmdbd.conf"
-fi
 
 %if %{slurm_with bluegene}
 %post bluegene
 if [ -x /sbin/ldconfig ]; then
     /sbin/ldconfig %{_libdir}/slurm
 fi
-if [ ! -f %{_sysconfdir}/bluegene.conf ]; then
-    echo "You need to build and install a bluegene.conf file"
-    echo "Edit %{_sysconfdir}/bluegene.conf.example and copy it to bluegene.conf"
-fi
 %endif
 
 %preun
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index b3d60be87..2293f57bc 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -77,7 +77,7 @@
 
 /* Number of dimensions the system has */
 #define SYSTEM_DIMENSIONS 1
-#define HIGHEST_DIMENSIONS 4
+#define HIGHEST_DIMENSIONS 5
 
 /* Define to 1 if you have the `elan3' library (-lelan3). */
 #undef HAVE_ELAN
@@ -231,6 +231,7 @@ enum job_states {
 	JOB_FAILED,		/* completed execution unsuccessfully */
 	JOB_TIMEOUT,		/* terminated on reaching time limit */
 	JOB_NODE_FAIL,		/* terminated on node failure */
+	JOB_PREEMPTED,		/* terminated due to preemption */
 	JOB_END			/* not a real state, last entry in table */
 };
 #define	JOB_STATE_BASE	0x00ff	/* Used for job_states above */
@@ -286,7 +287,10 @@ enum job_state_reason {
 	FAIL_INACTIVE_LIMIT,	/* reached slurm InactiveLimit */
 	FAIL_ACCOUNT,   	/* invalid account */
 	FAIL_QOS,        	/* invalid QOS */
-	WAIT_QOS_THRES        	/* required QOS threshold has been breached */
+	WAIT_QOS_THRES,        	/* required QOS threshold has been breached */
+	WAIT_QOS_JOB_LIMIT,	/* QOS job limit reached */
+	WAIT_QOS_RESOURCE_LIMIT,/* QOS resource limit reached */
+	WAIT_QOS_TIME_LIMIT	/* QOS time limit reached */
 };
 
 enum job_acct_types {
@@ -330,7 +334,7 @@ enum select_jobdata_type {
 	SELECT_JOBDATA_BLOCK_ID,	/* data-> char *bg_block_id */
 	SELECT_JOBDATA_NODES,	/* data-> char *nodes */
 	SELECT_JOBDATA_IONODES,	/* data-> char *ionodes */
-	SELECT_JOBDATA_NODE_CNT,	/* data-> uint32_t node_cnt */
+	SELECT_JOBDATA_NODE_CNT,	/* data-> uint32_t cnode_cnt */
 	SELECT_JOBDATA_ALTERED,    /* data-> uint16_t altered */
 	SELECT_JOBDATA_BLRTS_IMAGE,/* data-> char *blrtsimage */
 	SELECT_JOBDATA_LINUX_IMAGE,/* data-> char *linuximage */
@@ -338,7 +342,13 @@ enum select_jobdata_type {
 	SELECT_JOBDATA_RAMDISK_IMAGE,/* data-> char *ramdiskimage */
 	SELECT_JOBDATA_REBOOT,	/* data-> uint16_t reboot */
 	SELECT_JOBDATA_RESV_ID,	/* data-> uint32_t reservation_id */
+	SELECT_JOBDATA_PAGG_ID,	/* data-> uint64_t job container ID */
 	SELECT_JOBDATA_PTR,	/* data-> select_jobinfo_t *jobinfo */
+	SELECT_JOBDATA_BLOCK_PTR, /* data-> bg_record_t *bg_record */
+	SELECT_JOBDATA_DIM_CNT, /* data-> uint16_t dim_cnt */
+	SELECT_JOBDATA_BLOCK_NODE_CNT,	/* data-> uint32_t block_cnode_cnt */
+	SELECT_JOBDATA_START_LOC, /* data-> uint16_t
+				   * start_loc[SYSTEM_DIMENSIONS] */
 };
 
 enum select_nodedata_type {
@@ -369,17 +379,18 @@ enum select_print_mode {
 	SELECT_PRINT_RAMDISK_IMAGE,/* Print just the RAMDISK IMAGE */
 	SELECT_PRINT_REBOOT,	/* Print just the REBOOT */
 	SELECT_PRINT_RESV_ID,	/* Print just Cray/BASIL reservation ID */
+	SELECT_PRINT_START_LOC,	/* Print just the start location */
 };
 
 enum select_node_cnt {
 	SELECT_GET_NODE_SCALING,      /* Give scaling factor for node count */
 	SELECT_GET_NODE_CPU_CNT,      /* Give how many cpus are on a node */
-	SELECT_GET_BP_CPU_CNT,        /* Give how many cpus are on a
+	SELECT_GET_MP_CPU_CNT,        /* Give how many cpus are on a
 				       * base partition */
 	SELECT_APPLY_NODE_MIN_OFFSET, /* Apply min offset to variable */
 	SELECT_APPLY_NODE_MAX_OFFSET, /* Apply max offset to variable */
 	SELECT_SET_NODE_CNT,	      /* Set altered node cnt */
-	SELECT_SET_BP_CNT             /* Given a node cnt return the
+	SELECT_SET_MP_CNT             /* Given a node cnt return the
 				       * base partition count */
 };
 
@@ -888,7 +899,7 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 				 * 0 otherwise,default=0 */
 	char *cpu_bind;		/* binding map for map/mask_cpu */
 	uint16_t cpu_bind_type;	/* see cpu_bind_type_t */
-	char *dependency;	/* syncrhonize job execution with other jobs */
+	char *dependency;	/* synchronize job execution with other jobs */
 	time_t end_time;	/* time by which job must complete, used for
 				 * job update only now, possible deadline
 				 * scheduling in the future */
@@ -990,7 +1001,7 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
  */
 	uint16_t geometry[HIGHEST_DIMENSIONS];	/* node count in various
 						 * dimensions, e.g. X, Y, and Z */
-	uint16_t conn_type;	/* see enum connection_type */
+	uint16_t conn_type[HIGHEST_DIMENSIONS];	/* see enum connection_type */
 	uint16_t reboot;	/* force node reboot before startup */
 	uint16_t rotate;	/* permit geometry rotation if set */
 	char *blrtsimage;       /* BlrtsImage for block */
@@ -999,12 +1010,13 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *ramdiskimage;     /* RamDiskImage for block */
 
 /* End of Blue Gene specific values */
-
+	uint32_t req_switch;    /* Minimum number of switches */
 	dynamic_plugin_data_t *select_jobinfo; /* opaque data type,
 					   * SLURM internal use only */
 	char *std_err;		/* pathname of stderr */
 	char *std_in;		/* pathname of stdin */
 	char *std_out;		/* pathname of stdout */
+	uint32_t wait4switch;   /* Maximum time to wait for minimum switches */
 	char *wckey;            /* wckey for job */
 } job_desc_msg_t;
 
@@ -1014,12 +1026,14 @@ typedef struct job_info {
 	uint32_t alloc_sid;	/* local sid making resource alloc */
 	uint32_t assoc_id;	/* association id for job */
 	uint16_t batch_flag;	/* 1 if batch: queued job with script */
+	char *batch_host;	/* name of host running batch script */
+	char *batch_script;	/* contents of batch script */
 	char *command;		/* command to be executed */
 	char *comment;		/* arbitrary comment (used by Moab scheduler) */
 	uint16_t contiguous;	/* 1 if job requires contiguous nodes */
 	uint16_t cpus_per_task;	/* number of processors required for
 				 * each task */
-	char *dependency;	/* syncrhonize job execution with other jobs */
+	char *dependency;	/* synchronize job execution with other jobs */
 	uint32_t derived_ec;	/* highest exit code of all job steps */
 	time_t eligible_time;	/* time job is eligible for running */
 	time_t end_time;	/* time of termination, actual or expected */
@@ -1064,6 +1078,7 @@ typedef struct job_info {
 	int *req_node_inx;	/* required list index pairs into node_table:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
+	uint32_t req_switch;    /* Minimum number of switches */
 	uint16_t requeue;       /* enable or disable job requeue option */
 	time_t resize_time;	/* time of latest size change */
 	uint16_t restart_cnt;	/* count of job restarts */
@@ -1084,6 +1099,8 @@ typedef struct job_info {
 	uint32_t time_limit;	/* maximum run time in minutes or INFINITE */
 	uint32_t time_min;	/* minimum run time in minutes or INFINITE */
 	uint32_t user_id;	/* user the job runs as */
+	time_t 	preempt_time;	/* preemption signal time */
+	uint32_t wait4switch;   /* Maximum time to wait for minimum switches */
 	char *wckey;            /* wckey for job */
 	char *work_dir;		/* pathname of working directory */
 } job_info_t;
@@ -1101,6 +1118,9 @@ typedef struct step_update_request_msg {
 } step_update_request_msg_t;
 
 typedef struct slurm_step_layout {
+	char *front_end;	/* If a front-end architecture, the name of
+				 * of the node running all tasks,
+				 * NULL otherwise */
 	uint32_t node_cnt;	/* node count */
 	char *node_list;        /* list of nodes in step */
 	uint16_t plane_size;	/* plane size when task_dist =
@@ -1178,6 +1198,16 @@ typedef struct srun_step_missing_msg {
 	uint32_t step_id;	/* step_id or NO_VAL */
 } srun_step_missing_msg_t;
 
+enum suspend_opts {
+	SUSPEND_JOB,		/* Suspend a job now */
+	RESUME_JOB		/* Resume a job now */
+};
+
+typedef struct suspend_msg {
+	uint16_t op;            /* suspend operation, see enum suspend_opts */
+	uint32_t job_id;        /* slurm job_id */
+} suspend_msg_t;
+
 typedef struct {
 	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
 	uint32_t cpu_count;	/* number of required processors */
@@ -1273,6 +1303,7 @@ typedef struct {
 	void (*timeout)(srun_timeout_msg_t *);
 	void (*user_msg)(srun_user_msg_t *);
 	void (*node_fail)(srun_node_fail_msg_t *);
+	void (*job_suspend)(suspend_msg_t *);
 } slurm_allocation_callbacks_t;
 
 typedef struct {
@@ -1299,6 +1330,10 @@ typedef struct {
 	char *partition;	/* name of assigned partition */
 	char *resv_ports;	/* ports allocated for MPI */
 	time_t run_time;	/* net run time (factor out time suspended) */
+	dynamic_plugin_data_t *select_jobinfo; /* opaque data type,
+						* process using
+						* slurm_get_select_jobinfo()
+						*/
 	time_t start_time;	/* step start time */
 	uint32_t step_id;	/* step ID */
 	uint32_t time_limit;	/* step time limit */
@@ -1344,7 +1379,9 @@ typedef struct node_info {
 				 * the node */
 	char *features;		/* list of a node's features */
 	char *gres;		/* list of a node's generic resources */
-	char *name;		/* node name */
+	char *name;		/* node name to slurm */
+	char *node_addr;	/* communication name (optional) */
+	char *node_hostname;	/* node's hostname (optional) */
 	uint16_t node_state;	/* see enum node_states */
 	char *os;		/* operating system currently running */
 	uint32_t real_memory;	/* configured MB of real memory on the node */
@@ -1376,6 +1413,26 @@ typedef struct node_info_msg {
 	node_info_t *node_array;	/* the node records */
 } node_info_msg_t;
 
+typedef struct front_end_info {
+	time_t boot_time;		/* Time of node boot,
+					 * computed from up_time */
+	char *name;			/* node name */
+	uint16_t node_state;		/* see enum node_states */
+	char *reason;			/* reason for node being DOWN or
+					 * DRAINING */
+	time_t reason_time;		/* Time stamp when reason was set,
+					 * ignore if no reason is set. */
+	uint32_t reason_uid;   		/* User that set the reason,
+					 * ignore if no reason is set. */
+	time_t slurmd_start_time;	/* Time of slurmd startup */
+} front_end_info_t;
+
+typedef struct front_end_info_msg {
+	time_t last_update;		/* time of latest info */
+	uint32_t record_count;		/* number of records */
+	front_end_info_t *front_end_array;	/* the front_end records */
+} front_end_info_msg_t;
+
 typedef struct topo_info {
 	uint16_t level;			/* level in hierarchy, leaf=0 */
 	uint32_t link_speed;		/* link speed, arbitrary units */
@@ -1413,8 +1470,11 @@ typedef struct partition_info {
 	char *allow_groups;	/* comma delimited list of groups,
 				 * null indicates all */
 	char *alternate; 	/* name of alternate partition */
+	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
 	uint16_t flags;		/* see PART_FLAG_* above */
+	uint32_t grace_time; 	/* preemption grace time in seconds */
+	uint32_t max_mem_per_cpu; /* maximum MB memory per allocated CPU */
 	uint32_t max_nodes;	/* per job or INFINITE */
 	uint16_t max_share;	/* number of jobs to gang schedule */
 	uint32_t max_time;	/* minutes or INFINITE */
@@ -1447,6 +1507,9 @@ typedef struct resource_allocation_response_msg {
 						 * use
 						 * slurm_get_select_jobinfo()
 						 * to access contents */
+	uint32_t pn_min_memory;  /* minimum real memory per node OR
+				  * real memory per CPU | MEM_PER_CPU,
+				  * default=0 (no limit) */
 } resource_allocation_response_msg_t;
 
 typedef struct job_alloc_info_response_msg {
@@ -1473,23 +1536,42 @@ typedef struct partition_info_msg {
 
 /* BLUEGENE specific information */
 
+typedef struct {
+	char *cnodes;           /* used for sub-block jobs */
+	int *cnode_inx;         /* list index pairs for cnodes in the
+				 * node listed for *cnodes:
+				 * start_range_1, end_range_1,
+				 * start_range_2, .., -1  used for
+				 * sub-block jobs */
+	uint32_t job_id;        /* job id */
+	void *job_ptr;          /* internal use only, not packed. */
+	uint32_t user_id;       /* user running the job's ID */
+	char *user_name;        /* user running the job's name */
+} block_job_info_t;
+
 typedef struct {
 	char *bg_block_id;
 	char *blrtsimage;       /* BlrtsImage for this block */
-	int *bp_inx;            /* list index pairs into node_table for *nodes:
-				 * start_range_1, end_range_1,
-				 * start_range_2, .., -1  */
-	uint16_t conn_type;
-	char *ionodes;
+	uint16_t conn_type[HIGHEST_DIMENSIONS];
+	uint32_t cnode_cnt;
 	int *ionode_inx;        /* list index pairs for ionodes in the
-				 * node listed for *ionodes:
+				 * node listed for *ionode_str:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
+	char *ionode_str;
+	List job_list;          /* List of running jobs on each block */
 	uint32_t job_running;
 	char *linuximage;       /* LinuxImage for this block */
 	char *mloaderimage;     /* mloaderImage for this block */
-	char *nodes;
-	uint32_t node_cnt;
+	int *mp_inx;            /* list index pairs into node_table for *mp_str:
+				 * start_range_1, end_range_1,
+				 * start_range_2, .., -1  */
+	char *mp_str;
+	int *mp_used_inx;       /* list index pairs into node_table
+				 * for used *mp_str:
+				 * start_range_1, end_range_1,
+				 * start_range_2, .., -1  */
+	char *mp_used_str;
 	uint16_t node_use;
 	char *owner_name;
 	char *ramdiskimage;     /* RamDiskImage for this block */
@@ -1579,6 +1661,8 @@ void slurm_init_update_block_msg PARAMS((update_block_msg_t *update_block_msg));
 #define RESERVE_FLAG_NO_WEEKLY	0x0020	/* Clear WEEKLY flag */
 #define RESERVE_FLAG_IGN_JOBS	0x0040	/* Ignore running jobs */
 #define RESERVE_FLAG_NO_IGN_JOB	0x0080	/* Clear ignore running jobs */
+#define RESERVE_FLAG_LIC_ONLY	0x0100	/* Reserve licenses only, any nodes */
+#define RESERVE_FLAG_NO_LIC_ONLY 0x0200	/* Clear reserve licenses only flag */
 #define RESERVE_FLAG_OVERLAP	0x4000	/* Permit to overlap others */
 #define RESERVE_FLAG_SPEC_NODES	0x8000	/* Contains specific nodes */
 
@@ -1648,6 +1732,7 @@ typedef struct reservation_name_msg {
 #define DEBUG_FLAG_BACKFILL	0x00001000	/* debug for sched/backfill */
 #define DEBUG_FLAG_GANG		0x00002000	/* debug gang scheduler */
 #define DEBUG_FLAG_RESERVATION	0x00004000	/* advanced reservations */
+#define DEBUG_FLAG_FRONT_END	0x00008000	/* front-end nodes */
 
 #define GROUP_FORCE		0x8000	/* if set, update group membership
 					 * info even if no updates to
@@ -1679,6 +1764,7 @@ typedef struct slurm_ctl_conf {
 	uint32_t accounting_storage_port;/* node accountinging storage port */
 	char *accounting_storage_type; /* accounting storage type */
 	char *accounting_storage_user; /* accounting storage user */
+	uint16_t acctng_store_job_comment; /* send job comment to accounting */
 	char *authtype;		/* authentication type */
 	char *backup_addr;	/* comm path of slurmctld secondary server */
 	char *backup_controller;/* name of slurmctld secondary server */
@@ -1734,7 +1820,9 @@ typedef struct slurm_ctl_conf {
 	char *licenses;		/* licenses available on this cluster */
 	char *mail_prog;	/* pathname of mail program */
 	uint32_t max_job_cnt;	/* maximum number of active jobs */
+	uint32_t max_job_id;	/* maximum job id before using first_job_id */
 	uint32_t max_mem_per_cpu; /* maximum MB memory per allocated CPU */
+	uint32_t max_step_cnt;	/* maximum number of steps per job */
 	uint16_t max_tasks_per_node; /* maximum tasks per node */
 	uint16_t min_job_age;	/* COMPLETED jobs over this age (secs)
 				 * purged from in memory records */
@@ -1885,6 +1973,14 @@ typedef struct slurm_update_node_msg {
 	uint32_t weight;	/* new weight for node */
 } update_node_msg_t;
 
+typedef struct slurm_update_front_end_msg {
+	char *name;		/* comma separated list of front end nodes */
+	uint16_t node_state;	/* see enum node_states */
+	char *reason;		/* reason for node being DOWN or DRAINING */
+	uint32_t reason_uid;	/* user ID of sending (needed if user
+				 * root is sending message) */
+} update_front_end_msg_t;
+
 typedef struct partition_info update_part_msg_t;
 
 typedef struct job_sbcast_cred_msg {
@@ -1903,6 +1999,7 @@ typedef struct slurm_step_ctx_struct slurm_step_ctx_t;
 #define TRIGGER_RES_TYPE_SLURMCTLD      0x0003
 #define TRIGGER_RES_TYPE_SLURMDBD       0x0004
 #define TRIGGER_RES_TYPE_DATABASE       0x0005
+#define TRIGGER_RES_TYPE_FRONT_END      0x0006
 #define TRIGGER_TYPE_UP                 0x00000001
 #define TRIGGER_TYPE_DOWN               0x00000002
 #define TRIGGER_TYPE_FAIL               0x00000004
@@ -2612,6 +2709,7 @@ extern void slurm_print_job_step_info PARAMS(
  */
 extern slurm_step_layout_t *slurm_job_step_layout_get PARAMS(
 	(uint32_t job_id, uint32_t step_id));
+
 /*
  * slurm_sprint_job_step_info - output information about a specific Slurm
  *	job step based upon message as loaded using slurm_get_job_steps
@@ -2674,7 +2772,7 @@ extern int slurm_load_node PARAMS(
 	 uint16_t show_flags));
 
 /*
- * slurm_free_node_info - free the node information response message
+ * slurm_free_node_info_msg - free the node information response message
  * IN msg - pointer to node information response message
  * NOTE: buffer is loaded by slurm_load_node.
  */
@@ -2731,6 +2829,77 @@ void slurm_init_update_node_msg PARAMS((update_node_msg_t * update_node_msg));
 extern int slurm_update_node PARAMS((update_node_msg_t * node_msg));
 
 
+/*****************************************************************************\
+ *	SLURM FRONT_END CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_load_front_end - issue RPC to get slurm all front_end configuration
+ *	information if changed since update_time
+ * IN update_time - time of current configuration data
+ * IN front_end_info_msg_pptr - place to store a front_end configuration pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_front_end_info_msg
+ */
+extern int slurm_load_front_end PARAMS(
+	(time_t update_time, front_end_info_msg_t **resp));
+
+/*
+ * slurm_free_front_end_info_msg - free the front_end information response
+ *	message
+ * IN msg - pointer to front_end information response message
+ * NOTE: buffer is loaded by slurm_load_front_end.
+ */
+extern void slurm_free_front_end_info_msg PARAMS(
+	(front_end_info_msg_t * front_end_buffer_ptr));
+
+/*
+ * slurm_print_front_end_info_msg - output information about all Slurm
+ *	front_ends based upon message as loaded using slurm_load_front_end
+ * IN out - file to write to
+ * IN front_end_info_msg_ptr - front_end information message pointer
+ * IN one_liner - print as a single line if true
+ */
+extern void slurm_print_front_end_info_msg PARAMS(
+	(FILE * out, front_end_info_msg_t * front_end_info_msg_ptr,
+	 int one_liner));
+/*
+ * slurm_print_front_end_table - output information about a specific Slurm
+ *	front_ends based upon message as loaded using slurm_load_front_end
+ * IN out - file to write to
+ * IN front_end_ptr - an individual front_end information record pointer
+ * IN one_liner - print as a single line if true
+ */
+extern void slurm_print_front_end_table PARAMS(
+	(FILE * out, front_end_info_t * front_end_ptr, int one_liner));
+
+/*
+ * slurm_sprint_front_end_table - output information about a specific Slurm
+ *	front_end based upon message as loaded using slurm_load_front_end
+ * IN front_end_ptr - an individual front_end information record pointer
+ * IN one_liner - print as a single line if true
+ * RET out - char * containing formatted output (must be freed after call)
+ *           NULL is returned on failure.
+ */
+extern char *slurm_sprint_front_end_table PARAMS(
+	(front_end_info_t * front_end_ptr, int one_liner));
+
+/*
+ * slurm_init_update_front_end_msg - initialize front_end node update message
+ * OUT update_front_end_msg - user defined node descriptor
+ */
+void slurm_init_update_front_end_msg PARAMS(
+	(update_front_end_msg_t * update_front_end_msg));
+
+/*
+ * slurm_update_front_end - issue RPC to a front_end node's configuration per
+ *	request, only usable by user root
+ * IN front_end_msg - description of front_end node updates
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_update_front_end PARAMS(
+	(update_front_end_msg_t * front_end_msg));
+
 /*****************************************************************************\
  *	SLURM SWITCH TOPOLOGY CONFIGURATION READ/PRINT FUNCTIONS
 \*****************************************************************************/
@@ -3021,6 +3190,15 @@ extern int slurm_shutdown PARAMS((uint16_t options));
  */
 extern int slurm_takeover PARAMS((void));
 
+/*
+ * slurm_set_debugflags - issue RPC to set slurm controller debug flags
+ * IN debug_flags_plus  - debug flags to be added
+ * IN debug_flags_minus - debug flags to be removed
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_set_debugflags PARAMS((uint32_t debug_flags_plus,
+					uint32_t debug_flags_minus));
+
 /*
  * slurm_set_debug_level - issue RPC to set slurm controller debug level
  * IN debug_level - requested debug level
@@ -3247,6 +3425,7 @@ extern int slurm_pull_trigger PARAMS((trigger_info_t * trigger_pull));
  * slurm_get_triggers()
  */
 extern void slurm_free_trigger_msg PARAMS((trigger_info_msg_t * trigger_free));
+
 END_C_DECLS
 
 #endif
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index 144d69c04..e26b5a933 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -121,7 +121,7 @@ enum {
 	ESLURM_ERROR_ON_DESC_TO_RECORD_COPY,
 	ESLURM_JOB_MISSING_SIZE_SPECIFICATION,
 	ESLURM_JOB_SCRIPT_MISSING,
-	ESLURM_USER_ID_MISSING,
+	ESLURM_USER_ID_MISSING =			2010,
 	ESLURM_DUPLICATE_JOB_ID,
 	ESLURM_PATHNAME_TOO_LONG,
 	ESLURM_NOT_TOP_PRIORITY,
@@ -131,7 +131,7 @@ enum {
 	ESLURM_INVALID_JOB_ID,
 	ESLURM_INVALID_NODE_NAME,
 	ESLURM_WRITING_TO_FILE,
-	ESLURM_TRANSITION_STATE_NO_UPDATE,
+	ESLURM_TRANSITION_STATE_NO_UPDATE =		2020,
 	ESLURM_ALREADY_DONE,
 	ESLURM_INTERCONNECT_FAILURE,
 	ESLURM_BAD_DIST,
@@ -141,7 +141,7 @@ enum {
 	ESLURM_IN_STANDBY_MODE,
 	ESLURM_INVALID_NODE_STATE,
 	ESLURM_INVALID_FEATURE,
-	ESLURM_INVALID_AUTHTYPE_CHANGE,
+	ESLURM_INVALID_AUTHTYPE_CHANGE =		2030,
 	ESLURM_INVALID_CHECKPOINT_TYPE_CHANGE,
 	ESLURM_INVALID_SCHEDTYPE_CHANGE,
 	ESLURM_INVALID_SELECTTYPE_CHANGE,
@@ -151,7 +151,7 @@ enum {
 	ESLURM_DISABLED,
 	ESLURM_DEPENDENCY,
 	ESLURM_BATCH_ONLY,
-	ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED,
+	ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED =		2040,
 	ESLURM_TASKDIST_REQUIRES_OVERCOMMIT,
 	ESLURM_JOB_HELD,
 	ESLURM_INVALID_CRYPTO_TYPE_CHANGE,
@@ -161,7 +161,7 @@ enum {
 	ESLURM_SAME_PARENT_ACCOUNT,
 	ESLURM_INVALID_LICENSES,
 	ESLURM_NEED_RESTART,
-	ESLURM_ACCOUNTING_POLICY,
+	ESLURM_ACCOUNTING_POLICY =			2050,
 	ESLURM_INVALID_TIME_LIMIT,
 	ESLURM_RESERVATION_ACCESS,
 	ESLURM_RESERVATION_INVALID,
@@ -171,7 +171,7 @@ enum {
 	ESLURM_INVALID_WCKEY,
 	ESLURM_RESERVATION_OVERLAP,
 	ESLURM_PORTS_BUSY,
-	ESLURM_PORTS_INVALID,
+	ESLURM_PORTS_INVALID =				2060,
 	ESLURM_PROLOG_RUNNING,
 	ESLURM_NO_STEPS,
 	ESLURM_INVALID_BLOCK_STATE,
@@ -181,11 +181,14 @@ enum {
 	ESLURM_QOS_PREEMPTION_LOOP,
 	ESLURM_NODE_NOT_AVAIL,
 	ESLURM_INVALID_CPU_COUNT,
-	ESLURM_PARTITION_NOT_AVAIL,
+	ESLURM_PARTITION_NOT_AVAIL =			2070,
 	ESLURM_CIRCULAR_DEPENDENCY,
 	ESLURM_INVALID_GRES,
 	ESLURM_JOB_NOT_PENDING,
 	ESLURM_QOS_THRES,
+	ESLURM_PARTITION_IN_USE,
+	ESLURM_STEP_LIMIT,
+	ESLURM_JOB_SUSPENDED,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
diff --git a/slurm/slurmdb.h b/slurm/slurmdb.h
index 0e9b3c839..608a83002 100644
--- a/slurm/slurmdb.h
+++ b/slurm/slurmdb.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -529,7 +529,7 @@ typedef struct {
 	char	*blockid;
 	char    *cluster;
 	uint32_t derived_ec;
-	char	*derived_es;
+	char	*derived_es; /* aka "comment" */
 	uint32_t elapsed;
 	time_t eligible;
 	time_t end;
@@ -572,6 +572,7 @@ typedef struct {
 	uint32_t id;
 	uint32_t flags; /* flags for various things to enforce or
 			   override other limits */
+	uint32_t grace_time; /* preemption grace time */
 	uint64_t grp_cpu_mins; /* max number of cpu minutes all jobs
 				* running under this qos can run for */
 	uint64_t grp_cpu_run_mins; /* max number of cpu minutes all jobs
@@ -594,10 +595,14 @@ typedef struct {
 				   * using this qos */
 	uint32_t max_cpus_pj; /* max number of cpus a job can
 			       * allocate with this qos */
+	uint32_t max_cpus_pu; /* max number of cpus a user can
+			       * allocate with this qos at one time */
 	uint32_t max_jobs_pu;	/* max number of jobs a user can
 				 * run with this qos at one time */
 	uint32_t max_nodes_pj; /* max number of nodes a job can
 				* allocate with this qos at one time */
+	uint32_t max_nodes_pu; /* max number of nodes a user can
+				* allocate with this qos at one time */
 	uint32_t max_submit_jobs_pu; /* max number of jobs a user can
 					submit with this qos at once */
 	uint32_t max_wall_pj; /* longest time this
@@ -720,7 +725,9 @@ typedef struct {
 typedef struct {
 	uint64_t cpu_run_mins; /* how many cpu mins are allocated
 				* currently */
+	uint32_t cpus; /* count of CPUs allocated */
 	uint32_t jobs;	/* count of active jobs */
+	uint32_t nodes;	/* count of nodes allocated */
 	uint32_t submit_jobs; /* count of jobs pending or running */
 	uint32_t uid;
 } slurmdb_used_limits_t;
@@ -731,7 +738,7 @@ typedef struct {
 	slurmdb_association_cond_t *assoc_cond; /* use user_list here for
 						   names and acct_list for
 						   default accounts */
-	List def_acct_list; /* list of char * (We can't readly use
+	List def_acct_list; /* list of char * (We can't really use
 			     * the assoc_cond->acct_list for this
 			     * because then it is impossible for us
 			     * to tell which accounts are defaults
@@ -1102,7 +1109,7 @@ extern List slurmdb_coord_remove(void *db_conn, List acct_list,
 
 /*
  * get info from the storage
- * RET: List of config_key_pairs_t *
+ * RET: List of config_key_pair_t *
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_config_get(void *db_conn);
diff --git a/slurm/spank.h b/slurm/spank.h
index eca9ea6a7..8881ffc0d 100644
--- a/slurm/spank.h
+++ b/slurm/spank.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/Makefile.am b/src/Makefile.am
index 92aec49a4..f9c8dcc04 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1,14 +1,18 @@
-if WITH_BLCR
-SRUN_CR = srun_cr
-else
-SRUN_CR =
-endif
-
 SUBDIRS = common api db_api database \
-	slurmctld slurmd slurmdbd plugins srun sbcast \
+	slurmctld slurmd slurmdbd plugins sbcast \
 	scontrol scancel squeue sinfo smap sview salloc \
 	sbatch sattach strigger sacct sacctmgr sreport sstat \
-	sshare sprio $(SRUN_CR)
+	sshare sprio
+
+if !BUILD_SRUN2APRUN
+if !REAL_BG_L_P_LOADED
+SUBDIRS += srun
+endif
+endif
+
+if WITH_BLCR
+SUBDIRS += srun_cr
+endif
 
 
 
diff --git a/src/Makefile.in b/src/Makefile.in
index 1f61d756b..1e37bab4e 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -34,6 +34,8 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+@BUILD_SRUN2APRUN_FALSE@@REAL_BG_L_P_LOADED_FALSE@am__append_1 = srun
+@WITH_BLCR_TRUE@am__append_2 = srun_cr
 subdir = src
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -60,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -95,9 +99,9 @@ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
 ETAGS = etags
 CTAGS = ctags
 DIST_SUBDIRS = common api db_api database slurmctld slurmd slurmdbd \
-	plugins srun sbcast scontrol scancel squeue sinfo smap sview \
-	salloc sbatch sattach strigger sacct sacctmgr sreport sstat \
-	sshare sprio srun_cr
+	plugins sbcast scontrol scancel squeue sinfo smap sview salloc \
+	sbatch sattach strigger sacct sacctmgr sreport sstat sshare \
+	sprio srun srun_cr
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
 am__relativize = \
   dir0=`pwd`; \
@@ -134,7 +138,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -171,6 +178,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -228,6 +236,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -263,6 +272,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -315,14 +325,10 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-@WITH_BLCR_FALSE@SRUN_CR = 
-@WITH_BLCR_TRUE@SRUN_CR = srun_cr
-SUBDIRS = common api db_api database \
-	slurmctld slurmd slurmdbd plugins srun sbcast \
-	scontrol scancel squeue sinfo smap sview salloc \
-	sbatch sattach strigger sacct sacctmgr sreport sstat \
-	sshare sprio $(SRUN_CR)
-
+SUBDIRS = common api db_api database slurmctld slurmd slurmdbd plugins \
+	sbcast scontrol scancel squeue sinfo smap sview salloc sbatch \
+	sattach strigger sacct sacctmgr sreport sstat sshare sprio \
+	$(am__append_1) $(am__append_2)
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/api/Makefile.am b/src/api/Makefile.am
index 12eac1e6f..b03bf4eef 100644
--- a/src/api/Makefile.am
+++ b/src/api/Makefile.am
@@ -44,7 +44,7 @@ current = $(SLURM_API_CURRENT)
 age     = $(SLURM_API_AGE)
 rev     = $(SLURM_API_REVISION)
 
-# libpmi version informaiton
+# libpmi version information
 #
 # The libpmi interface shouldn't be changing any time soon, so for SLURM's
 # libpmi only the library REVISION and AGE should change (and it is debatable
@@ -75,19 +75,22 @@ BUILT_SOURCES = $(VERSION_SCRIPT) $(PMI_VERSION_SCRIPT) libslurm.la
 # Also, libslurmhelper, libslurm.o are for convenience, they are not installed.
 noinst_LTLIBRARIES = libslurmhelper.la
 noinst_PROGRAMS = libslurm.o
+# This is needed if compiling on windows
+EXEEXT=
 
 slurmapi_src =           \
 	allocate.c       \
 	allocate_msg.c   \
+	block_info.c     \
 	cancel.c         \
 	checkpoint.c     \
 	complete.c       \
 	config_info.c    \
+	front_end_info.c \
 	init_msg.c       \
 	job_info.c job_info.h \
 	job_step_info.c  \
 	node_info.c      \
-	block_info.c \
 	partition_info.c \
 	reservation_info.c \
 	signal.c         \
@@ -159,6 +162,7 @@ $(VERSION_SCRIPT) :
 	(echo "{ global:";   \
 	 echo "   islurm_*;"; \
 	 echo "   slurm_*;"; \
+	 echo "   slurmdb_*;"; \
 	 echo "  local: *;"; \
 	 echo "};") > $(VERSION_SCRIPT)
 
diff --git a/src/api/Makefile.in b/src/api/Makefile.in
index 134310305..71bb6c0e5 100644
--- a/src/api/Makefile.in
+++ b/src/api/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,13 +124,13 @@ libslurm_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 am__DEPENDENCIES_1 = $(common_dir)/libcommon.la \
 	$(common_dir)/libspank.la $(common_dir)/libeio.la
 libslurmhelper_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
-am__objects_1 = allocate.lo allocate_msg.lo cancel.lo checkpoint.lo \
-	complete.lo config_info.lo init_msg.lo job_info.lo \
-	job_step_info.lo node_info.lo block_info.lo partition_info.lo \
-	reservation_info.lo signal.lo slurm_hostlist.lo slurm_pmi.lo \
-	step_ctx.lo step_io.lo step_launch.lo pmi_server.lo submit.lo \
-	suspend.lo topo_info.lo triggers.lo reconfigure.lo \
-	update_config.lo
+am__objects_1 = allocate.lo allocate_msg.lo block_info.lo cancel.lo \
+	checkpoint.lo complete.lo config_info.lo front_end_info.lo \
+	init_msg.lo job_info.lo job_step_info.lo node_info.lo \
+	partition_info.lo reservation_info.lo signal.lo \
+	slurm_hostlist.lo slurm_pmi.lo step_ctx.lo step_io.lo \
+	step_launch.lo pmi_server.lo submit.lo suspend.lo topo_info.lo \
+	triggers.lo reconfigure.lo update_config.lo
 am_libslurmhelper_la_OBJECTS = $(am__objects_1)
 libslurmhelper_la_OBJECTS = $(am_libslurmhelper_la_OBJECTS)
 PROGRAMS = $(noinst_PROGRAMS)
@@ -167,7 +169,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -193,7 +198,8 @@ ECHO_N = @ECHO_N@
 ECHO_T = @ECHO_T@
 EGREP = @EGREP@
 ELAN_LIBS = @ELAN_LIBS@
-EXEEXT = @EXEEXT@
+# This is needed if compiling on windows
+EXEEXT = 
 FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
 FGREP = @FGREP@
 GREP = @GREP@
@@ -204,6 +210,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -261,6 +268,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -296,6 +304,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -391,7 +400,7 @@ current = $(SLURM_API_CURRENT)
 age = $(SLURM_API_AGE)
 rev = $(SLURM_API_REVISION)
 
-# libpmi version informaiton
+# libpmi version information
 #
 # The libpmi interface shouldn't be changing any time soon, so for SLURM's
 # libpmi only the library REVISION and AGE should change (and it is debatable
@@ -423,15 +432,16 @@ noinst_LTLIBRARIES = libslurmhelper.la
 slurmapi_src = \
 	allocate.c       \
 	allocate_msg.c   \
+	block_info.c     \
 	cancel.c         \
 	checkpoint.c     \
 	complete.c       \
 	config_info.c    \
+	front_end_info.c \
 	init_msg.c       \
 	job_info.c job_info.h \
 	job_step_info.c  \
 	node_info.c      \
-	block_info.c \
 	partition_info.c \
 	reservation_info.c \
 	signal.c         \
@@ -601,6 +611,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/complete.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/config_info.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/front_end_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/init_msg.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_step_info.Plo@am__quote@
@@ -868,6 +879,7 @@ $(VERSION_SCRIPT) :
 	(echo "{ global:";   \
 	 echo "   islurm_*;"; \
 	 echo "   slurm_*;"; \
+	 echo "   slurmdb_*;"; \
 	 echo "  local: *;"; \
 	 echo "};") > $(VERSION_SCRIPT)
 
diff --git a/src/api/allocate.c b/src/api/allocate.c
index ca4c9c5a9..022c25b43 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,9 +55,9 @@
 extern pid_t getsid(pid_t pid);		/* missing from <unistd.h> */
 #endif
 
-#include <slurm/slurm.h>
 #include <stdlib.h>
 
+#include "slurm/slurm.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/hostlist.h"
diff --git a/src/api/allocate_msg.c b/src/api/allocate_msg.c
index 5513517ad..a4342b307 100644
--- a/src/api/allocate_msg.c
+++ b/src/api/allocate_msg.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,7 +50,7 @@
 #include <signal.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_protocol_api.h"
@@ -74,9 +74,9 @@ static void _handle_msg(void *arg, slurm_msg_t *msg);
 static pthread_mutex_t msg_thr_start_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t msg_thr_start_cond = PTHREAD_COND_INITIALIZER;
 static struct io_operations message_socket_ops = {
-	readable:	&eio_message_socket_readable,
-	handle_read:	&eio_message_socket_accept,
-	handle_msg:     &_handle_msg
+	.readable = &eio_message_socket_readable,
+	.handle_read = &eio_message_socket_accept,
+	.handle_msg = &_handle_msg
 };
 
 static void *_msg_thr_internal(void *arg)
@@ -224,6 +224,7 @@ static void _handle_ping(struct allocation_msg_thread *msg_thr,
 
 	slurm_free_srun_ping_msg(msg->data);
 }
+
 static void _handle_job_complete(struct allocation_msg_thread *msg_thr,
 				 slurm_msg_t *msg)
 {
@@ -236,6 +237,18 @@ static void _handle_job_complete(struct allocation_msg_thread *msg_thr,
 	slurm_free_srun_job_complete_msg(msg->data);
 }
 
+static void _handle_suspend(struct allocation_msg_thread *msg_thr,
+			    slurm_msg_t *msg)
+{
+	suspend_msg_t *sus_msg = (suspend_msg_t *)msg->data;
+	debug3("received suspend message");
+
+	if (msg_thr->callback.job_suspend != NULL)
+		(msg_thr->callback.job_suspend)(sus_msg);
+
+	slurm_free_suspend_msg(msg->data);
+}
+
 static void
 _handle_msg(void *arg, slurm_msg_t *msg)
 {
@@ -266,6 +279,9 @@ _handle_msg(void *arg, slurm_msg_t *msg)
 	case SRUN_NODE_FAIL:
 		_handle_node_fail(msg_thr, msg);
 		break;
+	case SRUN_REQUEST_SUSPEND:
+		_handle_suspend(msg_thr, msg);
+		break;
 	default:
 		error("received spurious message type: %d",
 		      msg->msg_type);
diff --git a/src/api/block_info.c b/src/api/block_info.c
index b50629571..06aa3f86a 100644
--- a/src/api/block_info.c
+++ b/src/api/block_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,15 +55,13 @@
 #include <arpa/inet.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/plugins/select/bluegene/plugin/bluegene.h"
-
 
 /*
  * slurm_print_block_info_msg - output information about all Bluegene
@@ -117,7 +115,7 @@ char *slurm_sprint_block_info(
 	block_info_t * block_ptr, int one_liner)
 {
 	int j;
-	char tmp1[16];
+	char tmp1[16], *tmp_char = NULL;
 	char *out = NULL;
 	char *line_end = "\n   ";
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
@@ -126,7 +124,7 @@ char *slurm_sprint_block_info(
 		line_end = " ";
 
 	/****** Line 1 ******/
-	convert_num_unit((float)block_ptr->node_cnt, tmp1, sizeof(tmp1),
+	convert_num_unit((float)block_ptr->cnode_cnt, tmp1, sizeof(tmp1),
 			 UNIT_NONE);
 
 	out = xstrdup_printf("BlockName=%s TotalNodes=%s State=%s%s",
@@ -139,10 +137,10 @@ char *slurm_sprint_block_info(
 		xstrfmtcat(out, "JobRunning=%u ", block_ptr->job_running);
 	else
 		xstrcat(out, "JobRunning=NONE ");
-
+	tmp_char = conn_type_string_full(block_ptr->conn_type);
 	xstrfmtcat(out, "User=%s ConnType=%s",
-		   block_ptr->owner_name,
-		   conn_type_string(block_ptr->conn_type));
+		   block_ptr->owner_name, tmp_char);
+	xfree(tmp_char);
 	if(cluster_flags & CLUSTER_FLAG_BGL)
 		xstrfmtcat(out, " NodeUse=%s",
 			   node_use_string(block_ptr->node_use));
@@ -150,19 +148,19 @@ char *slurm_sprint_block_info(
 	xstrcat(out, line_end);
 
 	/****** Line 3 ******/
-	if(block_ptr->ionodes)
-		xstrfmtcat(out, "BasePartitions=%s[%s] BPIndices=",
-			   block_ptr->nodes, block_ptr->ionodes);
+	if(block_ptr->ionode_str)
+		xstrfmtcat(out, "MidPlanes=%s[%s] MPIndices=",
+			   block_ptr->mp_str, block_ptr->ionode_str);
 	else
-		xstrfmtcat(out, "BasePartitions=%s BPIndices=",
-			   block_ptr->nodes);
+		xstrfmtcat(out, "MidPlanes=%s MPIndices=",
+			   block_ptr->mp_str);
 	for (j = 0;
-	     (block_ptr->bp_inx && (block_ptr->bp_inx[j] != -1));
+	     (block_ptr->mp_inx && (block_ptr->mp_inx[j] != -1));
 	     j+=2) {
 		if (j > 0)
 			xstrcat(out, ",");
-		xstrfmtcat(out, "%d-%d", block_ptr->bp_inx[j],
-			   block_ptr->bp_inx[j+1]);
+		xstrfmtcat(out, "%d-%d", block_ptr->mp_inx[j],
+			   block_ptr->mp_inx[j+1]);
 	}
 	xstrcat(out, line_end);
 
@@ -170,7 +168,7 @@ char *slurm_sprint_block_info(
 	xstrfmtcat(out, "MloaderImage=%s%s",
 		   block_ptr->mloaderimage, line_end);
 
-	if(cluster_flags & CLUSTER_FLAG_BGL) {
+	if (cluster_flags & CLUSTER_FLAG_BGL) {
 		/****** Line 5 ******/
 		xstrfmtcat(out, "BlrtsImage=%s%s", block_ptr->blrtsimage,
 			   line_end);
@@ -179,13 +177,14 @@ char *slurm_sprint_block_info(
 			   line_end);
 		/****** Line 7 ******/
 		xstrfmtcat(out, "RamdiskImage=%s", block_ptr->ramdiskimage);
-	} else {
+	} else if (cluster_flags & CLUSTER_FLAG_BGP) {
 		/****** Line 5 ******/
 		xstrfmtcat(out, "CnloadImage=%s%s", block_ptr->linuximage,
 			   line_end);
 		/****** Line 6 ******/
 		xstrfmtcat(out, "IoloadImage=%s", block_ptr->ramdiskimage);
 	}
+
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
diff --git a/src/api/cancel.c b/src/api/cancel.c
index 3633bf8d3..1f6f366dc 100644
--- a/src/api/cancel.c
+++ b/src/api/cancel.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/macros.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/checkpoint.c b/src/api/checkpoint.c
index edb0550c0..33c783b27 100644
--- a/src/api/checkpoint.c
+++ b/src/api/checkpoint.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,8 +47,8 @@
 #include <sys/stat.h>
 #include <unistd.h>
 #include <stdlib.h>
-#include <slurm/slurm.h>
 
+#include "slurm/slurm.h"
 #include "src/common/checkpoint.h"
 #include "src/common/slurm_protocol_api.h"
 
diff --git a/src/api/complete.c b/src/api/complete.c
index a6277de17..c124884ca 100644
--- a/src/api/complete.c
+++ b/src/api/complete.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/config_info.c b/src/api/config_info.c
index e52d499e2..c0b9359ab 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/read_config.h"
@@ -68,48 +68,6 @@ extern long slurm_api_version (void)
 	return (long) SLURM_API_VERSION;
 }
 
-
-static char *
-_select_info(uint16_t select_type_param)
-{
-	static char select_str[64];
-
-	select_str[0] = '\0';
-	if ((select_type_param & CR_CPU) &&
-	    (select_type_param & CR_MEMORY))
-		strcat(select_str, "CR_CPU_MEMORY");
-	else if ((select_type_param & CR_CORE) &&
-		 (select_type_param & CR_MEMORY))
-		strcat(select_str, "CR_CORE_MEMORY");
-	else if ((select_type_param & CR_SOCKET) &&
-		 (select_type_param & CR_MEMORY))
-		strcat(select_str, "CR_SOCKET_MEMORY");
-	else if (select_type_param & CR_CPU)
-		strcat(select_str, "CR_CPU");
-	else if (select_type_param & CR_CORE)
-		strcat(select_str, "CR_CORE");
-	else if (select_type_param & CR_SOCKET)
-		strcat(select_str, "CR_SOCKET");
-	else if (select_type_param & CR_MEMORY)
-		strcat(select_str, "CR_MEMORY");
-
-	if (select_type_param & CR_ONE_TASK_PER_CORE) {
-		if (select_str[0])
-			strcat(select_str, ",");
-		strcat(select_str, "CR_ONE_TASK_PER_CORE");
-	}
-	if (select_type_param & CR_CORE_DEFAULT_DIST_BLOCK) {
-		if (select_str[0])
-			strcat(select_str, ",");
-		strcat(select_str, "CR_CORE_DEFAULT_DIST_BLOCK");
-	}
-
-	if (select_str[0] == '\0')
-		strcat(select_str, "NONE");
-
-	return select_str;
-}
-
 static char *
 _reset_period_str(uint16_t reset_period)
 {
@@ -226,6 +184,14 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_user);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("AccountingStoreJobComment");
+	if (slurm_ctl_conf_ptr->acctng_store_job_comment)
+		key_pair->value = xstrdup("YES");
+	else
+		key_pair->value = xstrdup("NO");
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("AuthType");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->authtype);
@@ -297,22 +263,24 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("DebugFlags");
-	key_pair->value = debug_flags2str(slurm_ctl_conf_ptr->debug_flags);;
+	key_pair->value = debug_flags2str(slurm_ctl_conf_ptr->debug_flags);
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	list_append(ret_list, key_pair);
-	key_pair->name = xstrdup("DefMemPerCPU");
 	if (slurm_ctl_conf_ptr->def_mem_per_cpu & MEM_PER_CPU) {
+		key_pair->name = xstrdup("DefMemPerCPU");
 		snprintf(tmp_str, sizeof(tmp_str), "%u",
 			 slurm_ctl_conf_ptr->def_mem_per_cpu &
 			 (~MEM_PER_CPU));
 		key_pair->value = xstrdup(tmp_str);
 	} else if (slurm_ctl_conf_ptr->def_mem_per_cpu) {
+		key_pair->name = xstrdup("DefMemPerNode");
 		snprintf(tmp_str, sizeof(tmp_str), "%u",
 			 slurm_ctl_conf_ptr->def_mem_per_cpu);
 		key_pair->value = xstrdup(tmp_str);
 	} else {
+		key_pair->name = xstrdup("DefMemPerNode");
 		key_pair->value = xstrdup("UNLIMITED");
 	}
 
@@ -391,8 +359,8 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	if(slurm_ctl_conf_ptr->hash_val != NO_VAL) {
-		if(slurm_ctl_conf_ptr->hash_val == slurm_get_hash_val())
+	if (slurm_ctl_conf_ptr->hash_val != NO_VAL) {
+		if (slurm_ctl_conf_ptr->hash_val == slurm_get_hash_val())
 			snprintf(tmp_str, sizeof(tmp_str), "Match");
 		else {
 			snprintf(tmp_str, sizeof(tmp_str),
@@ -401,7 +369,7 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 				 slurm_ctl_conf_ptr->hash_val);
 		}
 		key_pair = xmalloc(sizeof(config_key_pair_t));
-		key_pair->name = xstrdup("HashVal");
+		key_pair->name = xstrdup("HASH_VAL");
 		key_pair->value = xstrdup(tmp_str);
 		list_append(ret_list, key_pair);
 	}
@@ -538,22 +506,38 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
+	snprintf(tmp_str, sizeof(tmp_str), "%u",
+		 slurm_ctl_conf_ptr->max_job_id);
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MaxJobId");
+	key_pair->value = xstrdup(tmp_str);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	list_append(ret_list, key_pair);
-	key_pair->name = xstrdup("MaxMemPerCPU");
 	if (slurm_ctl_conf_ptr->max_mem_per_cpu & MEM_PER_CPU) {
+		key_pair->name = xstrdup("MaxMemPerCPU");
 		snprintf(tmp_str, sizeof(tmp_str), "%u",
 			 slurm_ctl_conf_ptr->max_mem_per_cpu & (~MEM_PER_CPU));
 		key_pair->value = xstrdup(tmp_str);
 
 	} else if (slurm_ctl_conf_ptr->max_mem_per_cpu) {
+		key_pair->name = xstrdup("MaxMemPerNode");
 		snprintf(tmp_str, sizeof(tmp_str), "%u",
 			 slurm_ctl_conf_ptr->max_mem_per_cpu);
 		key_pair->value = xstrdup(tmp_str);
 	} else {
+		key_pair->name = xstrdup("MaxMemPerNode");
 		key_pair->value = xstrdup("UNLIMITED");
 	}
 
+	snprintf(tmp_str, sizeof(tmp_str), "%u",
+		 slurm_ctl_conf_ptr->max_step_cnt);
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MaxStepCount");
+	key_pair->value = xstrdup(tmp_str);
+	list_append(ret_list, key_pair);
+
 	snprintf(tmp_str, sizeof(tmp_str), "%u",
 		 slurm_ctl_conf_ptr->max_tasks_per_node);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -834,7 +818,8 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 		key_pair = xmalloc(sizeof(config_key_pair_t));
 		key_pair->name = xstrdup("SelectTypeParameters");
 		key_pair->value = xstrdup(
-			_select_info(slurm_ctl_conf_ptr->select_type_param));
+			sched_param_type_string(slurm_ctl_conf_ptr->
+						select_type_param));
 		list_append(ret_list, key_pair);
 	}
 
diff --git a/src/api/front_end_info.c b/src/api/front_end_info.c
new file mode 100644
index 000000000..dfa21d719
--- /dev/null
+++ b/src/api/front_end_info.c
@@ -0,0 +1,218 @@
+/*****************************************************************************\
+ *  front_end_info.c - get/print the state information of slurm
+ *****************************************************************************
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov> et. al.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_SYS_SYSLOG_H
+#  include <sys/syslog.h>
+#endif
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/parse_time.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/*
+ * slurm_print_front_end_info_msg - output information about all Slurm
+ *	front_ends based upon message as loaded using slurm_load_front_end
+ * IN out - file to write to
+ * IN front_end_info_msg_ptr - front_end information message pointer
+ * IN one_liner - print as a single line if true
+ */
+void
+slurm_print_front_end_info_msg (FILE * out,
+				front_end_info_msg_t * front_end_info_msg_ptr,
+				int one_liner)
+{
+	int i;
+	front_end_info_t *front_end_ptr;
+	char time_str[32];
+
+	front_end_ptr = front_end_info_msg_ptr->front_end_array;
+	slurm_make_time_str((time_t *)&front_end_info_msg_ptr->last_update,
+			    time_str, sizeof(time_str));
+	fprintf(out, "front_end data as of %s, record count %d\n",
+		time_str, front_end_info_msg_ptr->record_count);
+
+	for (i = 0; i < front_end_info_msg_ptr-> record_count; i++) {
+		slurm_print_front_end_table(out, &front_end_ptr[i],
+					    one_liner ) ;
+	}
+}
+
+
+/*
+ * slurm_print_front_end_table - output information about a specific Slurm
+ *	front_ends based upon message as loaded using slurm_load_front_end
+ * IN out - file to write to
+ * IN front_end_ptr - an individual front_end information record pointer
+ * IN one_liner - print as a single line if true
+ */
+void
+slurm_print_front_end_table (FILE * out, front_end_info_t * front_end_ptr,
+			     int one_liner)
+{
+	char *print_this = slurm_sprint_front_end_table(front_end_ptr,
+							one_liner);
+	fprintf(out, "%s", print_this);
+	xfree(print_this);
+}
+
+/*
+ * slurm_sprint_front_end_table - output information about a specific Slurm
+ *	front_end based upon message as loaded using slurm_load_front_end
+ * IN front_end_ptr - an individual front_end information record pointer
+ * IN one_liner - print as a single line if true
+ * RET out - char * containing formatted output (must be freed after call)
+ *           NULL is returned on failure.
+ */
+char *
+slurm_sprint_front_end_table (front_end_info_t * front_end_ptr,
+			      int one_liner)
+{
+	uint16_t my_state = front_end_ptr->node_state;
+	char *drain_str = "";
+	char tmp_line[512], time_str[32];
+	char *out = NULL;
+
+	if (my_state & NODE_STATE_DRAIN) {
+		my_state &= (~NODE_STATE_DRAIN);
+		drain_str = "+DRAIN";
+	}
+
+	/****** Line 1 ******/
+	snprintf(tmp_line, sizeof(tmp_line), "FrontendName=%s ",
+		 front_end_ptr->name);
+	xstrcat(out, tmp_line);
+	snprintf(tmp_line, sizeof(tmp_line), "State=%s%s ",
+		 node_state_string(my_state), drain_str);
+	xstrcat(out, tmp_line);
+	if (front_end_ptr->reason_time) {
+		char *user_name = uid_to_string(front_end_ptr->reason_uid);
+		slurm_make_time_str((time_t *)&front_end_ptr->reason_time,
+				    time_str, sizeof(time_str));
+		snprintf(tmp_line, sizeof(tmp_line), "Reason=%s [%s@%s]",
+			 front_end_ptr->reason, user_name, time_str);
+		xstrcat(out, tmp_line);
+		xfree(user_name);
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line), "Reason=%s",
+			 front_end_ptr->reason);
+		xstrcat(out, tmp_line);
+	}
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 2 ******/
+	slurm_make_time_str((time_t *)&front_end_ptr->boot_time,
+			    time_str, sizeof(time_str));
+	snprintf(tmp_line, sizeof(tmp_line), "BootTime=%s ", time_str);
+	xstrcat(out, tmp_line);
+	slurm_make_time_str((time_t *)&front_end_ptr->slurmd_start_time,
+			    time_str, sizeof(time_str));
+	snprintf(tmp_line, sizeof(tmp_line), "SlurmdStartTime=%s", time_str);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, "\n");
+	else
+		xstrcat(out, "\n\n");
+
+	return out;
+}
+
+
+/*
+ * slurm_load_front_end - issue RPC to get slurm all front_end configuration
+ *	information if changed since update_time
+ * IN update_time - time of current configuration data
+ * IN front_end_info_msg_pptr - place to store a front_end configuration pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_front_end_info_msg
+ */
+int
+slurm_load_front_end (time_t update_time, front_end_info_msg_t **resp)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+	front_end_info_request_msg_t req;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+	req.last_update  = update_time;
+	req_msg.msg_type = REQUEST_FRONT_END_INFO;
+	req_msg.data     = &req;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_FRONT_END_INFO:
+		*resp = (front_end_info_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		*resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
diff --git a/src/api/init_msg.c b/src/api/init_msg.c
index 305f664b9..80711797f 100644
--- a/src/api/init_msg.c
+++ b/src/api/init_msg.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <errno.h>
 #include <stdio.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/forward.h"
@@ -57,32 +57,23 @@
  */
 void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 {
-	int i;
-	int dims = slurmdb_setup_cluster_dims();
-
 	memset(job_desc_msg, 0, sizeof(job_desc_msg_t));
 	job_desc_msg->acctg_freq	= (uint16_t) NO_VAL;
 	job_desc_msg->alloc_sid		= NO_VAL;
-	job_desc_msg->conn_type		= (uint16_t) NO_VAL;
+	job_desc_msg->conn_type[0]	= (uint16_t) NO_VAL;
 	job_desc_msg->contiguous	= (uint16_t) NO_VAL;
+	job_desc_msg->cores_per_socket	= (uint16_t) NO_VAL;
 	job_desc_msg->cpu_bind_type	= (uint16_t) NO_VAL;
 	job_desc_msg->cpus_per_task	= (uint16_t) NO_VAL;
-	for (i=0; i<dims; i++)
-		job_desc_msg->geometry[i] = (uint16_t) NO_VAL;
+	job_desc_msg->geometry[0]       = (uint16_t) NO_VAL;
 	job_desc_msg->group_id		= NO_VAL;
 	job_desc_msg->job_id		= NO_VAL;
-	job_desc_msg->pn_min_cpus	= (uint16_t) NO_VAL;
-	job_desc_msg->pn_min_memory    = NO_VAL;
-	job_desc_msg->pn_min_tmp_disk  = NO_VAL;
 	job_desc_msg->kill_on_node_fail = (uint16_t) NO_VAL;
 	job_desc_msg->max_cpus		= NO_VAL;
 	job_desc_msg->max_nodes		= NO_VAL;
 	job_desc_msg->mem_bind_type	= (uint16_t) NO_VAL;
 	job_desc_msg->min_cpus		= NO_VAL;
 	job_desc_msg->min_nodes		= NO_VAL;
-	job_desc_msg->sockets_per_node	= (uint16_t) NO_VAL;
-	job_desc_msg->cores_per_socket	= (uint16_t) NO_VAL;
-	job_desc_msg->threads_per_core	= (uint16_t) NO_VAL;
 	job_desc_msg->nice		= (uint16_t) NO_VAL;
 	job_desc_msg->ntasks_per_core	= (uint16_t) NO_VAL;
 	job_desc_msg->ntasks_per_node	= (uint16_t) NO_VAL;
@@ -90,16 +81,23 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->num_tasks		= NO_VAL;
 	job_desc_msg->overcommit	= (uint8_t) NO_VAL;
 	job_desc_msg->plane_size	= (uint16_t) NO_VAL;
+	job_desc_msg->pn_min_cpus	= (uint16_t) NO_VAL;
+	job_desc_msg->pn_min_memory	= NO_VAL;
+	job_desc_msg->pn_min_tmp_disk	= NO_VAL;
 	job_desc_msg->priority		= NO_VAL;
 	job_desc_msg->reboot		= (uint16_t) NO_VAL;
 	job_desc_msg->requeue		= (uint16_t) NO_VAL;
+	job_desc_msg->req_switch	= NO_VAL;
 	job_desc_msg->rotate		= (uint16_t) NO_VAL;
 	job_desc_msg->shared		= (uint16_t) NO_VAL;
+	job_desc_msg->sockets_per_node	= (uint16_t) NO_VAL;
 	job_desc_msg->task_dist		= (uint16_t) NO_VAL;
+	job_desc_msg->threads_per_core	= (uint16_t) NO_VAL;
 	job_desc_msg->time_limit	= NO_VAL;
 	job_desc_msg->time_min		= NO_VAL;
 	job_desc_msg->user_id		= NO_VAL;
 	job_desc_msg->wait_all_nodes	= (uint16_t) NO_VAL;
+	job_desc_msg->wait4switch	= NO_VAL;
 }
 
 /*
@@ -124,6 +122,9 @@ void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg)
 {
 	memset(update_part_msg, 0, sizeof(update_part_msg_t));
 	update_part_msg->default_time   = (uint32_t) NO_VAL;
+	update_part_msg->def_mem_per_cpu = (uint32_t) NO_VAL;
+	update_part_msg->grace_time     = (uint32_t) NO_VAL;
+	update_part_msg->max_mem_per_cpu = (uint32_t) NO_VAL;
 	update_part_msg->max_nodes 	= NO_VAL;
 	update_part_msg->max_share 	= (uint16_t) NO_VAL;
 	update_part_msg->min_nodes 	= NO_VAL;
@@ -159,6 +160,17 @@ void slurm_init_update_node_msg (update_node_msg_t * update_node_msg)
 	update_node_msg->weight = (uint32_t) NO_VAL;
 }
 
+/*
+ * slurm_init_update_front_end_msg - initialize front_end node update message
+ * OUT update_front_end_msg - user defined node descriptor
+ */
+void slurm_init_update_front_end_msg (update_front_end_msg_t *
+				      update_front_end_msg)
+{
+	memset(update_front_end_msg, 0, sizeof(update_front_end_msg_t));
+	update_front_end_msg->node_state = (uint16_t) NO_VAL;
+}
+
 /*
  * slurm_init_update_block_msg - initialize block update message
  * OUT update_block_msg - user defined block descriptor
@@ -166,10 +178,9 @@ void slurm_init_update_node_msg (update_node_msg_t * update_node_msg)
 void slurm_init_update_block_msg (update_block_msg_t *update_block_msg)
 {
 	memset(update_block_msg, 0, sizeof(update_block_msg_t));
-	update_block_msg->conn_type = (uint16_t)NO_VAL;
+	update_block_msg->conn_type[0] = (uint16_t)NO_VAL;
 	update_block_msg->job_running = NO_VAL;
-	update_block_msg->node_cnt = NO_VAL;
+	update_block_msg->cnode_cnt = NO_VAL;
 	update_block_msg->node_use = (uint16_t)NO_VAL;
 	update_block_msg->state = (uint16_t)NO_VAL;
-
 }
diff --git a/src/api/job_info.c b/src/api/job_info.c
index 659caad55..b872d37ff 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,8 +52,8 @@
 #include <sys/wait.h>
 #include <time.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/forward.h"
 #include "src/common/node_select.h"
@@ -160,7 +160,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	char select_buf[122];
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	if(cluster_flags & CLUSTER_FLAG_BG) {
+	if (cluster_flags & CLUSTER_FLAG_BG) {
 		nodelist = "BP_List";
 		select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_IONODES,
@@ -209,7 +209,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 	if (job_ptr->state_desc) {
 		/* Replace white space with underscore for easier parsing */
 		for (j=0; job_ptr->state_desc[j]; j++) {
-			if (isspace(job_ptr->state_desc[j]))
+			if (isspace((int)job_ptr->state_desc[j]))
 				job_ptr->state_desc[j] = '_';
 		}
 		tmp6_ptr = job_ptr->state_desc;
@@ -356,6 +356,15 @@ line6:
 		xstrcat(out, "\n   ");
 
 	/****** Line 10 ******/
+	if (job_ptr->preempt_time == 0)
+		sprintf(tmp_line, "PreemptTime=None ");
+	else {
+		slurm_make_time_str((time_t *)&job_ptr->preempt_time,
+				    time_str, sizeof(time_str));
+		snprintf(tmp_line, sizeof(tmp_line), "PreemptTime=%s ",
+			 time_str);
+	}
+	xstrcat(out, tmp_line);
 	if (job_ptr->suspend_time) {
 		slurm_make_time_str ((time_t *)&job_ptr->suspend_time,
 				     time_str, sizeof(time_str));
@@ -403,8 +412,19 @@ line6:
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 14 ******/
-	if(cluster_flags & CLUSTER_FLAG_BG) {
+	/****** Line 14 (optional) ******/
+	if (job_ptr->batch_host) {
+		snprintf(tmp_line, sizeof(tmp_line), "BatchHost=%s",
+			 job_ptr->batch_host);
+		xstrcat(out, tmp_line);
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+	}
+
+	/****** Line 15 ******/
+	if (cluster_flags & CLUSTER_FLAG_BG) {
 		select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_NODE_CNT,
 					    &min_nodes);
@@ -457,7 +477,7 @@ line6:
 					if (i < job_resrcs->cpu_array_cnt - 1) {
 						continue;
 					}
-					/* add elipsis before last entry */
+					/* add ellipsis before last entry */
 					xstrcat(out, "...,");
 					length += 4;
 				}
@@ -778,6 +798,31 @@ line15:
 		xstrcat(out, tmp_line);
 	}
 
+	/****** Line 27 (optional) ******/
+	if (job_ptr->batch_script) {
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+		xstrcat(out, "BatchScript=\n");
+		xstrcat(out, job_ptr->batch_script);
+	}
+
+	/****** Line 28 (optional) ******/
+	if (job_ptr->req_switch) {
+		char time_buf[32];
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+		secs2time_str((time_t) job_ptr->wait4switch, time_buf,
+			      sizeof(time_buf));
+		snprintf(tmp_line, sizeof(tmp_line), "Switches=%u@%s\n",
+			 job_ptr->req_switch, time_buf);
+		xstrcat(out, tmp_line);
+	}
+
+	/****** Line 29 (optional) ******/
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
diff --git a/src/api/job_step_info.c b/src/api/job_step_info.c
index 66c858fe9..d644d35a2 100644
--- a/src/api/job_step_info.c
+++ b/src/api/job_step_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,8 +46,9 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
+#include "src/common/node_select.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xmalloc.h"
@@ -137,6 +138,7 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 	char limit_str[32];
 	char tmp_line[128];
 	char *out = NULL;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
 	/****** Line 1 ******/
 	slurm_make_time_str ((time_t *)&job_step_ptr->start_time, time_str,
@@ -157,10 +159,23 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 2 ******/
-	snprintf(tmp_line, sizeof(tmp_line),
-		"Partition=%s Nodes=%s Gres=%s",
-		job_step_ptr->partition, job_step_ptr->nodes,
-		job_step_ptr->gres);
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		char *io_nodes;
+		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
+					    SELECT_JOBDATA_IONODES,
+					    &io_nodes);
+		snprintf(tmp_line, sizeof(tmp_line),
+			"Partition=%s BP_List=%s[%s] Gres=%s",
+			job_step_ptr->partition,
+			job_step_ptr->nodes, io_nodes,
+			job_step_ptr->gres);
+		xfree(io_nodes);
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"Partition=%s Nodes=%s Gres=%s",
+			job_step_ptr->partition, job_step_ptr->nodes,
+			job_step_ptr->gres);
+	}
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
diff --git a/src/api/node_info.c b/src/api/node_info.c
index f6283854d..4f8f0c87b 100644
--- a/src/api/node_info.c
+++ b/src/api/node_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,7 +55,7 @@
 #include <arpa/inet.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
@@ -131,7 +131,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	int total_used = node_ptr->cpus;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	if(node_scaling)
+	if (node_scaling)
 		cpus_per_node = node_ptr->cpus / node_scaling;
 
 	if (my_state & NODE_STATE_COMPLETING) {
@@ -150,10 +150,10 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 				  SELECT_NODEDATA_SUBCNT,
 				  NODE_STATE_ALLOCATED,
 				  &alloc_cpus);
-	if(cluster_flags & CLUSTER_FLAG_BG) {
-		if(!alloc_cpus
-		   && (IS_NODE_ALLOCATED(node_ptr)
-		       || IS_NODE_COMPLETING(node_ptr)))
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		if (!alloc_cpus &&
+		    (IS_NODE_ALLOCATED(node_ptr) ||
+		     IS_NODE_COMPLETING(node_ptr)))
 			alloc_cpus = node_ptr->cpus;
 		else
 			alloc_cpus *= cpus_per_node;
@@ -164,7 +164,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 				  SELECT_NODEDATA_SUBCNT,
 				  NODE_STATE_ERROR,
 				  &err_cpus);
-	if(cluster_flags & CLUSTER_FLAG_BG)
+	if (cluster_flags & CLUSTER_FLAG_BG)
 		err_cpus *= cpus_per_node;
 	total_used -= err_cpus;
 
@@ -177,7 +177,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	/****** Line 1 ******/
 	snprintf(tmp_line, sizeof(tmp_line), "NodeName=%s ", node_ptr->name);
 	xstrcat(out, tmp_line);
-	if (node_ptr->arch ) {
+	if (node_ptr->arch) {
 		snprintf(tmp_line, sizeof(tmp_line), "Arch=%s ",
 			 node_ptr->arch);
 		xstrcat(out, tmp_line);
@@ -208,7 +208,19 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 4 ******/
+	/****** Line 4 (optional) ******/
+	if (node_ptr->node_hostname || node_ptr->node_addr) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "NodeAddr=%s NodeHostName=%s",
+			 node_ptr->node_addr, node_ptr->node_hostname);
+		xstrcat(out, tmp_line);	
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+	}
+
+	/****** Line 5 ******/
 	if (node_ptr->os) {
 		snprintf(tmp_line, sizeof(tmp_line), "OS=%s ", node_ptr->os);
 		xstrcat(out, tmp_line);
@@ -221,7 +233,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 5 ******/
+	/****** Line 6 ******/
 
 	snprintf(tmp_line, sizeof(tmp_line),
 		 "State=%s%s%s%s ThreadsPerCore=%u TmpDisk=%u Weight=%u",
@@ -233,7 +245,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 6 ******/
+	/****** Line 7 ******/
 	if (node_ptr->boot_time) {
 		slurm_make_time_str ((time_t *)&node_ptr->boot_time,
 				     time_str, sizeof(time_str));
@@ -256,7 +268,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 7 ******/
+	/****** Line 8 ******/
 	if (node_ptr->reason_time) {
 		char *user_name = uid_to_string(node_ptr->reason_uid);
 		slurm_make_time_str ((time_t *)&node_ptr->reason_time,
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index afae5f832..fa19e1229 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
@@ -185,7 +185,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	else
 		sprintf(tmp_line, " DisableRootJobs=NO");
 	xstrcat(out, tmp_line);
-
+	sprintf(tmp_line, " GraceTime=%u", part_ptr->grace_time);
+	xstrcat(out, tmp_line);
 	if (part_ptr->flags & PART_FLAG_HIDDEN)
 		sprintf(tmp_line, " Hidden=YES");
 	else
@@ -311,6 +312,33 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 
 	sprintf(tmp_line, " TotalNodes=%s", tmp2);
 	xstrcat(out, tmp_line);
+
+	if (part_ptr->def_mem_per_cpu & MEM_PER_CPU) {
+		snprintf(tmp_line, sizeof(tmp_line), " DefMemPerCPU=%u",
+			 part_ptr->def_mem_per_cpu & (~MEM_PER_CPU));
+		xstrcat(out, tmp_line);
+
+	} else if (part_ptr->def_mem_per_cpu == 0) {
+		xstrcat(out, " DefMemPerNode=UNLIMITED");
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line), " DefMemPerNode=%u",
+			 part_ptr->def_mem_per_cpu);
+		xstrcat(out, tmp_line);
+	}
+
+	if (part_ptr->max_mem_per_cpu & MEM_PER_CPU) {
+		snprintf(tmp_line, sizeof(tmp_line), " MaxMemPerCPU=%u",
+			 part_ptr->max_mem_per_cpu & (~MEM_PER_CPU));
+		xstrcat(out, tmp_line);
+
+	} else if (part_ptr->max_mem_per_cpu == 0) {
+		xstrcat(out, " MaxMemPerNode=UNLIMITED");
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line), " MaxMemPerNode=%u",
+			 part_ptr->max_mem_per_cpu);
+		xstrcat(out, tmp_line);
+	}
+
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
diff --git a/src/api/pmi.c b/src/api/pmi.c
index 5693e28a0..d1b26b9d0 100644
--- a/src/api/pmi.c
+++ b/src/api/pmi.c
@@ -53,7 +53,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -90,9 +90,10 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <slurm/pmi.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/pmi.h"
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/api/slurm_pmi.h"
 #include "src/common/macros.h"
diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c
index 4b9b19d77..d4998b7ba 100644
--- a/src/api/pmi_server.c
+++ b/src/api/pmi_server.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -32,7 +32,8 @@
 
 #include <pthread.h>
 #include <stdlib.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/api/slurm_pmi.h"
 #include "src/common/macros.h"
@@ -64,8 +65,8 @@ struct barrier_resp {
 	char *hostname;
 };				/* details for barrier task communcations */
 struct barrier_resp *barrier_ptr = NULL;
-uint16_t barrier_resp_cnt = 0;	/* tasks having reached barrier */
-uint16_t barrier_cnt = 0;	/* tasks needing to reach barrier */
+uint32_t barrier_resp_cnt = 0;	/* tasks having reached barrier */
+uint32_t barrier_cnt = 0;	/* tasks needing to reach barrier */
 
 pthread_mutex_t agent_mutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_cond_t  agent_cond  = PTHREAD_COND_INITIALIZER;
@@ -144,7 +145,7 @@ static void _kvs_xmit_tasks(void)
 static void *_msg_thread(void *x)
 {
 	struct msg_arg *msg_arg_ptr = (struct msg_arg *) x;
-	int rc, success = 0, timeout;
+	int rc, timeout;
 	slurm_msg_t msg_send;
 
 	slurm_msg_t_init(&msg_send);
@@ -166,7 +167,6 @@ static void *_msg_thread(void *x)
 			msg_arg_ptr->bar_ptr->hostname, rc);
 	} else {
 		/* successfully transmitted KVS keypairs */
-		success = 1;
 	}
 
 	slurm_mutex_lock(&agent_mutex);
@@ -518,7 +518,7 @@ fini:	pthread_mutex_unlock(&kvs_mutex);
 
 /*
  * Set the maximum number of threads to be used by the PMI server code.
- * The PMI server code is used interally by the slurm_step_launch() function
+ * The PMI server code is used internally by the slurm_step_launch() function
  * to support MPI libraries that bootstrap themselves using PMI.
  */
 extern void pmi_server_max_threads(int max_threads)
diff --git a/src/api/pmi_server.h b/src/api/pmi_server.h
index ac90958be..2ab7fabf4 100644
--- a/src/api/pmi_server.h
+++ b/src/api/pmi_server.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@ extern int pmi_kvs_get(kvs_get_msg_t *kvs_get_ptr);
 
 /*
  * Set the maximum number of threads to be used by the PMI server code.
- * The PMI server code is used interally by the slurm_step_launch() function
+ * The PMI server code is used internally by the slurm_step_launch() function
  * to support MPI libraries that bootstrap themselves using PMI.
  */
 extern void pmi_server_max_threads(int max_threads);
diff --git a/src/api/reconfigure.c b/src/api/reconfigure.c
index cb9ff603f..2fd0e027c 100644
--- a/src/api/reconfigure.c
+++ b/src/api/reconfigure.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,7 +48,7 @@
 #include <stdlib.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/forward.h"
@@ -189,12 +189,52 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req)
         return rc;
 }
 
+/*
+ * slurm_set_debugflags - issue RPC to set slurm controller debug flags
+ * IN debug_flags_plus  - debug flags to be added
+ * IN debug_flags_minus - debug flags to be removed
+ * IN debug_flags_set   - new debug flags value
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int
+slurm_set_debugflags (uint32_t debug_flags_plus, uint32_t debug_flags_minus)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+	set_debug_flags_msg_t req;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+	req.debug_flags_minus = debug_flags_minus;
+	req.debug_flags_plus  = debug_flags_plus;
+	req_msg.msg_type = REQUEST_SET_DEBUG_FLAGS;
+	req_msg.data     = &req;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+        return SLURM_PROTOCOL_SUCCESS;
+}
+
 /*
  * slurm_set_debug_level - issue RPC to set slurm controller debug level
  * IN debug_level - requested debug level
  * RET 0 on success, otherwise return -1 and set errno to indicate the error
  */
-int
+extern int
 slurm_set_debug_level (uint32_t debug_level)
 {
 	int rc;
diff --git a/src/api/reservation_info.c b/src/api/reservation_info.c
index 897c1ad75..d82a248d3 100644
--- a/src/api/reservation_info.c
+++ b/src/api/reservation_info.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
@@ -105,8 +105,10 @@ char *slurm_sprint_reservation_info ( reserve_info_t * resv_ptr,
 {
 	char tmp1[32], tmp2[32], tmp3[32], *flag_str = NULL;
 	char tmp_line[MAXHOSTRANGELEN];
+	char *state="INACTIVE";
 	char *out = NULL;
 	uint32_t duration;
+	time_t now = time(NULL);
 
 	/****** Line 1 ******/
 	slurm_make_time_str(&resv_ptr->start_time, tmp1, sizeof(tmp1));
@@ -138,9 +140,11 @@ char *slurm_sprint_reservation_info ( reserve_info_t * resv_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 3 ******/
+	if ((resv_ptr->start_time <= now) && (resv_ptr->end_time >= now))
+		state = "ACTIVE";
 	snprintf(tmp_line, sizeof(tmp_line),
-		 "Users=%s Accounts=%s Licenses=%s",
-		 resv_ptr->users, resv_ptr->accounts, resv_ptr->licenses);
+		 "Users=%s Accounts=%s Licenses=%s State=%s",
+		 resv_ptr->users, resv_ptr->accounts, resv_ptr->licenses, state);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, "\n");
diff --git a/src/api/signal.c b/src/api/signal.c
index 8e8b0c2ae..4ea0934e2 100644
--- a/src/api/signal.c
+++ b/src/api/signal.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,8 +46,8 @@
 #include <stdlib.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/xmalloc.h"
 #include "src/common/hostlist.h"
@@ -56,18 +56,150 @@
 #include "src/common/slurm_protocol_api.h"
 
 static int _local_send_recv_rc_msgs(const char *nodelist,
-				    slurm_msg_type_t type,
-				    void *data);
-static int _signal_job_step(
-	const job_step_info_t *step,
-	const resource_allocation_response_msg_t *allocation,
-	uint16_t signal);
-static int _signal_batch_script_step(
-	const resource_allocation_response_msg_t *allocation, uint16_t signal);
+				    slurm_msg_type_t type, void *data)
+{
+	List ret_list = NULL;
+	int temp_rc = 0, rc = 0;
+	ret_data_info_t *ret_data_info = NULL;
+	slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t));
+
+	slurm_msg_t_init(msg);
+	msg->msg_type = type;
+	msg->data = data;
+
+	if ((ret_list = slurm_send_recv_msgs(nodelist, msg, 0, false))) {
+		while ((ret_data_info = list_pop(ret_list))) {
+			temp_rc = slurm_get_return_code(ret_data_info->type,
+							ret_data_info->data);
+			if (temp_rc)
+				rc = temp_rc;
+		}
+	} else {
+		error("slurm_signal_job: no list was returned");
+		rc = SLURM_ERROR;
+	}
+
+	slurm_free_msg(msg);
+	return rc;
+}
+
+static int _signal_batch_script_step(const resource_allocation_response_msg_t 
+				     *allocation, uint16_t signal)
+{
+	slurm_msg_t msg;
+	kill_tasks_msg_t rpc;
+	int rc = SLURM_SUCCESS;
+	char *name = nodelist_nth_host(allocation->node_list, 0);
+	if (!name) {
+		error("_signal_batch_script_step: "
+		      "can't get the first name out of %s",
+		      allocation->node_list);
+		return -1;
+	}
+	rpc.job_id = allocation->job_id;
+	rpc.job_step_id = SLURM_BATCH_SCRIPT;
+	rpc.signal = (uint32_t)signal;
+
+	slurm_msg_t_init(&msg);
+	msg.msg_type = REQUEST_SIGNAL_TASKS;
+	msg.data = &rpc;
+	if (slurm_conf_get_addr(name, &msg.address) == SLURM_ERROR) {
+		error("_signal_batch_script_step: "
+		      "can't find address for host %s, check slurm.conf",
+		      name);
+		free(name);
+		return -1;
+	}
+	free(name);
+	if (slurm_send_recv_rc_msg_only_one(&msg, &rc, 0) < 0) {
+		error("_signal_batch_script_step: %m");
+		rc = -1;
+	}
+	return rc;
+}
+
+static int _signal_job_step(const job_step_info_t *step,
+			    const resource_allocation_response_msg_t *
+			    allocation, uint16_t signal)
+{
+	kill_tasks_msg_t rpc;
+	int rc = SLURM_SUCCESS;
+
+	/* same remote procedure call for each node */
+	rpc.job_id = step->job_id;
+	rpc.job_step_id = step->step_id;
+	rpc.signal = (uint32_t)signal;
+	rc = _local_send_recv_rc_msgs(allocation->node_list,
+				      REQUEST_SIGNAL_TASKS, &rpc);
+	return rc;
+}
+
+static int _terminate_batch_script_step(const resource_allocation_response_msg_t
+					* allocation)
+{
+	slurm_msg_t msg;
+	kill_tasks_msg_t rpc;
+	int rc = SLURM_SUCCESS;
+	int i;
+	char *name = nodelist_nth_host(allocation->node_list, 0);
+	if (!name) {
+		error("_terminate_batch_script_step: "
+		      "can't get the first name out of %s",
+		      allocation->node_list);
+		return -1;
+	}
+
+	rpc.job_id = allocation->job_id;
+	rpc.job_step_id = SLURM_BATCH_SCRIPT;
+	rpc.signal = (uint32_t)-1; /* not used by slurmd */
+
+	slurm_msg_t_init(&msg);
+	msg.msg_type = REQUEST_TERMINATE_TASKS;
+	msg.data = &rpc;
+
+	if (slurm_conf_get_addr(name, &msg.address) == SLURM_ERROR) {
+		error("_terminate_batch_script_step: "
+		      "can't find address for host %s, check slurm.conf",
+		      name);
+		free(name);
+		return -1;
+	}
+	free(name);
+	i = slurm_send_recv_rc_msg_only_one(&msg, &rc, 0);
+	if (i != 0)
+		rc = i;
+
+	return rc;
+}
+
+/*
+ * Send a REQUEST_TERMINATE_TASKS rpc to all nodes in a job step.
+ *
+ * RET Upon successful termination of the job step, 0 shall be returned.
+ * Otherwise, -1 shall be returned and errno set to indicate the error.
+ */
 static int _terminate_job_step(const job_step_info_t *step,
-		       const resource_allocation_response_msg_t *allocation);
-static int _terminate_batch_script_step(
-	const resource_allocation_response_msg_t *allocation);
+			       const resource_allocation_response_msg_t *
+			       allocation)
+{
+	kill_tasks_msg_t rpc;
+	int rc = SLURM_SUCCESS;
+
+	/*
+	 *  Send REQUEST_TERMINATE_TASKS to all nodes of the step
+	 */
+	rpc.job_id = step->job_id;
+	rpc.job_step_id = step->step_id;
+	rpc.signal = (uint32_t)-1; /* not used by slurmd */
+	rc = _local_send_recv_rc_msgs(allocation->node_list,
+				      REQUEST_TERMINATE_TASKS, &rpc);
+	if ((rc == -1) && (errno == ESLURM_ALREADY_DONE)) {
+		rc = 0;
+		errno = 0;
+	}
+
+	return rc;
+}
 
 /*
  * slurm_signal_job - send the specified signal to all steps of an existing job
@@ -160,91 +292,10 @@ fail:
  	return rc ? -1 : 0;
 }
 
-static int
-_local_send_recv_rc_msgs(const char *nodelist, slurm_msg_type_t type,
-			 void *data)
-{
-	List ret_list = NULL;
-	int temp_rc = 0, rc = 0;
-	ret_data_info_t *ret_data_info = NULL;
-	slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t));
-
-	slurm_msg_t_init(msg);
-	msg->msg_type = type;
-	msg->data = data;
-
-	if((ret_list = slurm_send_recv_msgs(nodelist, msg, 0, false))) {
-		while((ret_data_info = list_pop(ret_list))) {
-			temp_rc = slurm_get_return_code(ret_data_info->type,
-							ret_data_info->data);
-			if(temp_rc)
-				rc = temp_rc;
-		}
-	} else {
-		error("slurm_signal_job: no list was returned");
-		rc = SLURM_ERROR;
-	}
-
-	slurm_free_msg(msg);
-	return rc;
-}
-
-static int
-_signal_job_step(const job_step_info_t *step,
-		 const resource_allocation_response_msg_t *allocation,
-		 uint16_t signal)
-{
-	kill_tasks_msg_t rpc;
-	int rc = SLURM_SUCCESS;
-
-	/* same remote procedure call for each node */
-	rpc.job_id = step->job_id;
-	rpc.job_step_id = step->step_id;
-	rpc.signal = (uint32_t)signal;
-	rc = _local_send_recv_rc_msgs(allocation->node_list,
-				      REQUEST_SIGNAL_TASKS, &rpc);
-	return rc;
-}
-
-static int _signal_batch_script_step(
-	const resource_allocation_response_msg_t *allocation, uint16_t signal)
-{
-	slurm_msg_t msg;
-	kill_tasks_msg_t rpc;
-	int rc = SLURM_SUCCESS;
-	char *name = nodelist_nth_host(allocation->node_list, 0);
-	if(!name) {
-		error("_signal_batch_script_step: "
-		      "can't get the first name out of %s",
-		      allocation->node_list);
-		return -1;
-	}
-	rpc.job_id = allocation->job_id;
-	rpc.job_step_id = SLURM_BATCH_SCRIPT;
-	rpc.signal = (uint32_t)signal;
-
-	slurm_msg_t_init(&msg);
-	msg.msg_type = REQUEST_SIGNAL_TASKS;
-	msg.data = &rpc;
-	if(slurm_conf_get_addr(name, &msg.address) == SLURM_ERROR) {
-		error("_signal_batch_script_step: "
-		      "can't find address for host %s, check slurm.conf",
-		      name);
-		free(name);
-		return -1;
-	}
-	free(name);
-	if (slurm_send_recv_rc_msg_only_one(&msg, &rc, 0) < 0) {
-		error("_signal_batch_script_step: %m");
-		rc = -1;
-	}
-	return rc;
-}
-
 
 /*
  * slurm_terminate_job - terminates all steps of an existing job by sending
- * 	a REQUEST_TERMINATE_JOB rpc to all slurmd in the the job allocation,
+ *	a REQUEST_TERMINATE_JOB rpc to all slurmd in the job allocation,
  *      and then calls slurm_complete_job().
  * IN job_id     - the job's id
  * RET 0 on success, otherwise return -1 and set errno to indicate the error
@@ -337,74 +388,6 @@ fail:
 	return rc ? -1 : 0;
 }
 
-
-/*
- * Send a REQUEST_TERMINATE_TASKS rpc to all nodes in a job step.
- *
- * RET Upon successful termination of the job step, 0 shall be returned.
- * Otherwise, -1 shall be returned and errno set to indicate the error.
- */
-static int
-_terminate_job_step(const job_step_info_t *step,
-		    const resource_allocation_response_msg_t *allocation)
-{
-	kill_tasks_msg_t rpc;
-	int rc = SLURM_SUCCESS;
-
-	/*
-	 *  Send REQUEST_TERMINATE_TASKS to all nodes of the step
-	 */
-	rpc.job_id = step->job_id;
-	rpc.job_step_id = step->step_id;
-	rpc.signal = (uint32_t)-1; /* not used by slurmd */
-	rc = _local_send_recv_rc_msgs(allocation->node_list,
-				      REQUEST_TERMINATE_TASKS, &rpc);
-	if (rc == -1 && errno == ESLURM_ALREADY_DONE) {
-		rc = 0;
-		errno = 0;
-	}
-
-	return rc;
-}
-
-static int _terminate_batch_script_step(
-	const resource_allocation_response_msg_t *allocation)
-{
-	slurm_msg_t msg;
-	kill_tasks_msg_t rpc;
-	int rc = SLURM_SUCCESS;
-	int i;
-	char *name = nodelist_nth_host(allocation->node_list, 0);
-	if(!name) {
-		error("_signal_batch_script_step: "
-		      "can't get the first name out of %s",
-		      allocation->node_list);
-		return -1;
-	}
-
-	rpc.job_id = allocation->job_id;
-	rpc.job_step_id = SLURM_BATCH_SCRIPT;
-	rpc.signal = (uint32_t)-1; /* not used by slurmd */
-
-	slurm_msg_t_init(&msg);
-	msg.msg_type = REQUEST_TERMINATE_TASKS;
-	msg.data = &rpc;
-
-	if(slurm_conf_get_addr(name, &msg.address) == SLURM_ERROR) {
-		error("_signal_batch_script_step: "
-		      "can't find address for host %s, check slurm.conf",
-		      name);
-		free(name);
-		return -1;
-	}
-	free(name);
-	i = slurm_send_recv_rc_msg_only_one(&msg, &rc, 0);
-	if (i != 0)
-		rc = i;
-
-	return rc;
-}
-
 /*
  * slurm_notify_job - send message to the job's stdout,
  *	usable only by user root
diff --git a/src/api/slurm_hostlist.c b/src/api/slurm_hostlist.c
index eee801643..42d726f04 100644
--- a/src/api/slurm_hostlist.c
+++ b/src/api/slurm_hostlist.c
@@ -8,7 +8,7 @@
  *  LLNL-CODE-402394.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/api/slurm_pmi.c b/src/api/slurm_pmi.c
index b534f7209..6dab96371 100644
--- a/src/api/slurm_pmi.c
+++ b/src/api/slurm_pmi.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,8 +38,9 @@
 
 #include <stdlib.h>
 #include <sys/time.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/api/slurm_pmi.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/slurm_pmi.h b/src/api/slurm_pmi.h
index 7d8f2f2a5..083b17214 100644
--- a/src/api/slurm_pmi.h
+++ b/src/api/slurm_pmi.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,7 +60,7 @@
 #define PMI_MAX_VAL_LEN     256	/* Maximum size of a PMI value */
 
 struct kvs_hosts {
-	uint16_t	task_id;	/* job step's task id */
+	uint32_t	task_id;	/* job step's task id */
 	uint16_t	port;		/* communication port */
 	char *		hostname;	/* communication host */
 };
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index 063b20c7b..077554319 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #include <sys/socket.h>
 #include <sys/types.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
diff --git a/src/api/step_ctx.h b/src/api/step_ctx.h
index f934d9922..f80bd3c08 100644
--- a/src/api/step_ctx.h
+++ b/src/api/step_ctx.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,7 +37,7 @@
 #include <unistd.h>
 #include <stdint.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/api/step_launch.h"
 
diff --git a/src/api/step_io.c b/src/api/step_io.c
index 6ed6c3f18..eae4d139d 100644
--- a/src/api/step_io.c
+++ b/src/api/step_io.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -92,8 +92,8 @@ static bool _listening_socket_readable(eio_obj_t *obj);
 static int _listening_socket_read(eio_obj_t *obj, List objs);
 
 struct io_operations listening_socket_ops = {
-	readable:	&_listening_socket_readable,
-	handle_read:	&_listening_socket_read
+	.readable = &_listening_socket_readable,
+	.handle_read = &_listening_socket_read
 };
 
 /**********************************************************************
@@ -105,10 +105,10 @@ static bool _server_writable(eio_obj_t *obj);
 static int _server_write(eio_obj_t *obj, List objs);
 
 struct io_operations server_ops = {
-        readable:	&_server_readable,
-	handle_read:	&_server_read,
-	writable:       &_server_writable,
-	handle_write:   &_server_write
+	.readable = &_server_readable,
+	.handle_read = &_server_read,
+	.writable = &_server_writable,
+	.handle_write = &_server_write
 };
 
 struct server_io_info {
@@ -138,8 +138,8 @@ static bool _file_writable(eio_obj_t *obj);
 static int _file_write(eio_obj_t *obj, List objs);
 
 struct io_operations file_write_ops = {
-	writable:	&_file_writable,
-	handle_write:	&_file_write,
+	.writable = &_file_writable,
+	.handle_write = &_file_write,
 };
 
 struct file_write_info {
@@ -163,8 +163,8 @@ static bool _file_readable(eio_obj_t *obj);
 static int _file_read(eio_obj_t *obj, List objs);
 
 struct io_operations file_read_ops = {
-	readable:	&_file_readable,
-	handle_read:	&_file_read,
+	.readable = &_file_readable,
+	.handle_read = &_file_read,
 };
 
 struct file_read_info {
@@ -1071,7 +1071,6 @@ client_io_handler_create(slurm_step_io_fds_t fds,
 			 bool label)
 {
 	client_io_t *cio;
-	int len;
 	int i;
 	uint32_t siglen;
 	char *sig;
@@ -1089,8 +1088,6 @@ client_io_handler_create(slurm_step_io_fds_t fds,
 	else
 		cio->label_width = 0;
 
-	len = sizeof(uint32_t) * num_tasks;
-
 	if (slurm_cred_get_signature(cred, &sig, &siglen) < 0) {
 		error("client_io_handler_create, invalid credential");
 		return NULL;
diff --git a/src/api/step_io.h b/src/api/step_io.h
index 0082489b4..5a9023ab5 100644
--- a/src/api/step_io.h
+++ b/src/api/step_io.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -31,7 +31,7 @@
 #include <stdint.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/eio.h"
 #include "src/common/list.h"
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index 98a46f30c..f8c8d2967 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,7 @@
 #include <sys/un.h>
 #include <netdb.h> /* for gethostbyname */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/slurm_protocol_api.h"
@@ -110,9 +110,9 @@ static int  _start_io_timeout_thread(step_launch_state_t *sls);
 static void *_check_io_timeout(void *_sls);
 
 static struct io_operations message_socket_ops = {
-	readable:	&eio_message_socket_readable,
-	handle_read:	&eio_message_socket_accept,
-	handle_msg:     &_handle_msg
+	.readable = &eio_message_socket_readable,
+	.handle_read = &eio_message_socket_accept,
+	.handle_msg = &_handle_msg
 };
 
 
@@ -181,7 +181,8 @@ int slurm_step_launch (slurm_step_ctx_t *ctx,
 		return SLURM_ERROR;
 	}
 	/* Now, hack the step_layout struct if the following it true.
-	   This looks like an ugly hack to support LAM/MPI's lamboot. */
+	   This looks like an ugly hack to support LAM/MPI's lamboot.
+	   NOTE: This also gets ran for BGQ systems. */
 	if (mpi_hook_client_single_task_per_node()) {
 		for (i = 0; i < ctx->step_resp->step_layout->node_cnt; i++)
 			ctx->step_resp->step_layout->tasks[i] = 1;
@@ -259,6 +260,8 @@ int slurm_step_launch (slurm_step_ctx_t *ctx,
 	launch.cpus_allocated  = ctx->step_resp->step_layout->tasks;
 	launch.global_task_ids = ctx->step_resp->step_layout->tids;
 
+	launch.select_jobinfo  = ctx->step_resp->select_jobinfo;
+
 	launch.user_managed_io = params->user_managed_io ? 1 : 0;
 	ctx->launch_state->user_managed_io = params->user_managed_io;
 
@@ -562,10 +565,10 @@ void slurm_step_launch_fwd_signal(slurm_step_ctx_t *ctx, int signo)
 		active = 0;
 		num_tasks = sls->layout->tasks[node_id];
 		for (j = 0; j < num_tasks; j++) {
-			if(bit_test(sls->tasks_started,
-				    sls->layout->tids[node_id][j]) &&
-			   !bit_test(sls->tasks_exited,
-				     sls->layout->tids[node_id][j])) {
+			if (bit_test(sls->tasks_started,
+				     sls->layout->tids[node_id][j]) &&
+			    !bit_test(sls->tasks_exited,
+				      sls->layout->tids[node_id][j])) {
 				/* this one has active tasks */
 				active = 1;
 				break;
@@ -575,14 +578,21 @@ void slurm_step_launch_fwd_signal(slurm_step_ctx_t *ctx, int signo)
 		if (!active)
 			continue;
 
-		name = nodelist_nth_host(sls->layout->node_list, node_id);
-		hostlist_push(hl, name);
-		free(name);
+		if (ctx->step_resp->step_layout->front_end) {
+			hostlist_push(hl,
+				      ctx->step_resp->step_layout->front_end);
+			break;
+		} else {
+			name = nodelist_nth_host(sls->layout->node_list,
+						 node_id);
+			hostlist_push(hl, name);
+			free(name);
+		}
 	}
 
 	pthread_mutex_unlock(&sls->lock);
 
-	if(!hostlist_count(hl)) {
+	if (!hostlist_count(hl)) {
 		hostlist_destroy(hl);
 		goto nothing_left;
 	}
@@ -638,11 +648,16 @@ struct step_launch_state *step_launch_state_create(slurm_step_ctx_t *ctx)
 
 	sls = xmalloc(sizeof(struct step_launch_state));
 	sls->slurmctld_socket_fd = -1;
+#if defined HAVE_BGQ
+//#if defined HAVE_BGQ && defined HAVE_BG_FILES
+	sls->tasks_requested = 1;
+#else
 	/* Hack for LAM-MPI's lamboot, launch one task per node */
 	if (mpi_hook_client_single_task_per_node())
 		sls->tasks_requested = layout->node_cnt;
 	else
 		sls->tasks_requested = layout->task_cnt;
+#endif
 	sls->tasks_started = bit_alloc(layout->task_cnt);
 	sls->tasks_exited = bit_alloc(layout->task_cnt);
 	sls->node_io_error = bit_alloc(layout->node_cnt);
@@ -945,7 +960,6 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	srun_node_fail_msg_t *nf = fail_msg->data;
 	hostset_t fail_nodes, all_nodes;
 	hostlist_iterator_t fail_itr;
-	char *node;
 	int num_node_ids;
 	int *node_ids;
 	int i, j;
@@ -962,7 +976,10 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	all_nodes = hostset_create(sls->layout->node_list);
 	/* find the index number of each down node */
 	for (i = 0; i < num_node_ids; i++) {
-		node = hostlist_next(fail_itr);
+#ifdef HAVE_FRONT_END
+		node_id = 0;
+#else
+		char *node = hostlist_next(fail_itr);
 		node_id = node_ids[i] = hostset_find(all_nodes, node);
 		if (node_id < 0) {
 			error(  "Internal error: bad SRUN_NODE_FAIL message. "
@@ -971,6 +988,7 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 			continue;
 		}
 		free(node);
+#endif
 
 		/* find all of the tasks that should run on this node and
 		 * mark them as having started and exited.  If they haven't
@@ -1319,6 +1337,9 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 			 launch_tasks_request_msg_t *launch_msg,
 			 uint32_t timeout)
 {
+#ifdef HAVE_FRONT_END
+	slurm_cred_arg_t cred_args;
+#endif
 	slurm_msg_t msg;
 	List ret_list = NULL;
 	ListIterator ret_itr;
@@ -1331,7 +1352,7 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 		char *name = NULL;
 		hostlist_t hl = hostlist_create(launch_msg->complete_nodelist);
 		int i = 0;
-		while((name = hostlist_shift(hl))) {
+		while ((name = hostlist_shift(hl))) {
 			_print_launch_msg(launch_msg, name, i++);
 			free(name);
 		}
@@ -1342,9 +1363,17 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 	msg.msg_type = REQUEST_LAUNCH_TASKS;
 	msg.data = launch_msg;
 
-	if(!(ret_list = slurm_send_recv_msgs(
-		     ctx->step_resp->step_layout->node_list,
-		     &msg, timeout, false))) {
+#ifdef HAVE_FRONT_END
+	slurm_cred_get_args(ctx->step_resp->cred, &cred_args);
+	//info("hostlist=%s", cred_args.step_hostlist);
+	ret_list = slurm_send_recv_msgs(cred_args.step_hostlist, &msg, timeout,
+					false);
+	slurm_cred_free_args(&cred_args);
+#else
+	ret_list = slurm_send_recv_msgs(ctx->step_resp->step_layout->node_list,
+					&msg, timeout, false);
+#endif
+	if (ret_list == NULL) {
 		error("slurm_send_recv_msgs failed miserably: %m");
 		return SLURM_ERROR;
 	}
@@ -1379,7 +1408,7 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 	list_iterator_destroy(ret_itr);
 	list_destroy(ret_list);
 
-	if(tot_rc != SLURM_SUCCESS)
+	if (tot_rc != SLURM_SUCCESS)
 		return tot_rc;
 	return rc;
 }
@@ -1495,7 +1524,8 @@ _exec_prog(slurm_msg_t *msg)
 	} else {
 		close(pfd[1]);
 		len = read(pfd[0], buf, sizeof(buf));
-		close(pfd[0]);
+		if (len >= 1)
+			close(pfd[0]);
 		waitpid(child, &status, 0);
 		exit_code = WEXITSTATUS(status);
 	}
@@ -1589,12 +1619,9 @@ _check_io_timeout(void *_sls)
 {
 	int ii;
 	time_t now, next_deadline;
-	client_io_t *cio;
 	struct timespec ts = {0, 0};
 	step_launch_state_t *sls = (step_launch_state_t *)_sls;
 
-	cio = sls->io.normal;
-
 	pthread_mutex_lock(&sls->lock);
 
 	while (1) {
diff --git a/src/api/step_launch.h b/src/api/step_launch.h
index c004f8b2b..e52b7fd6e 100644
--- a/src/api/step_launch.h
+++ b/src/api/step_launch.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,7 +49,7 @@
 #include <stdint.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/slurm_step_layout.h"
 #include "src/common/eio.h"
diff --git a/src/api/submit.c b/src/api/submit.c
index a98adda4c..ed38d5da9 100644
--- a/src/api/submit.c
+++ b/src/api/submit.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,7 +50,7 @@
 extern pid_t getsid(pid_t pid);		/* missing from <unistd.h> */
 #endif
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/suspend.c b/src/api/suspend.c
index a774cc1e0..5624fcacc 100644
--- a/src/api/suspend.c
+++ b/src/api/suspend.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,7 +41,7 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 #include "src/common/slurm_protocol_api.h"
 
 static int _suspend_op (uint16_t op, uint32_t job_id);
diff --git a/src/api/topo_info.c b/src/api/topo_info.c
index b5ae20e64..ec8ab0edb 100644
--- a/src/api/topo_info.c
+++ b/src/api/topo_info.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #include <arpa/inet.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/triggers.c b/src/api/triggers.c
index c06a3f861..b255c230c 100644
--- a/src/api/triggers.c
+++ b/src/api/triggers.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
diff --git a/src/api/update_config.c b/src/api/update_config.c
index 8cc869bf3..31ebc3263 100644
--- a/src/api/update_config.c
+++ b/src/api/update_config.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,12 +47,24 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/slurm_protocol_api.h"
 
 static int _slurm_update (void * data, slurm_msg_type_t msg_type);
 
+/*
+ * slurm_update_front_end - issue RPC to a front_end node's configuration per
+ *	request, only usable by user root
+ * IN front_end_msg - description of front_end node updates
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int
+slurm_update_front_end (update_front_end_msg_t * front_end_msg)
+{
+	return _slurm_update ((void *) front_end_msg, REQUEST_UPDATE_FRONT_END);
+}
+
 /*
  * slurm_update_job - issue RPC to a job's configuration per request,
  *	only usable by user root or (for some parameters) the job's owner
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index 7225858e5..6d989b0ee 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -22,6 +22,8 @@ endif
 INCLUDES     = -I$(top_srcdir) $(BG_INCLUDES)
 
 noinst_PROGRAMS = libcommon.o libeio.o libspank.o
+# This is needed if compiling on windows
+EXEEXT=
 
 noinst_LTLIBRARIES = 			\
 	libcommon.la 			\
@@ -30,8 +32,10 @@ noinst_LTLIBRARIES = 			\
 	libspank.la
 
 libcommon_la_SOURCES = 			\
+ 	xcgroup_read_config.c xcgroup_read_config.h		\
+ 	xcgroup.c xcgroup.h 					\
+ 	xcpuinfo.c xcpuinfo.h 					\
 	assoc_mgr.c assoc_mgr.h 	\
-	basil_resv_conf.c basil_resv_conf.h \
 	xmalloc.c xmalloc.h 		\
 	xassert.c xassert.h		\
 	xstring.c xstring.h		\
@@ -40,7 +44,6 @@ libcommon_la_SOURCES = 			\
 	strlcpy.c strlcpy.h		\
 	list.c list.h 			\
 	net.c net.h                     \
-	fd.c fd.h			\
 	log.c log.h			\
 	cbuf.c cbuf.h			\
 	safeopen.c safeopen.h		\
@@ -55,6 +58,7 @@ libcommon_la_SOURCES = 			\
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
 	env.c env.h      		\
+	fd.c fd.h       		\
 	slurm_cred.h       		\
 	slurm_cred.c			\
 	slurm_errno.c			\
@@ -117,8 +121,7 @@ EXTRA_libcommon_la_SOURCES = 	\
 
 libdaemonize_la_SOURCES =  		\
 	daemonize.c       	 	\
-	daemonize.h        		\
-	fd.c fd.h
+	daemonize.h
 
 libeio_la_SOURCES = 	   		\
 	eio.c eio.h	   		\
diff --git a/src/common/Makefile.in b/src/common/Makefile.in
index 79f4eec0c..ed3a290c1 100644
--- a/src/common/Makefile.in
+++ b/src/common/Makefile.in
@@ -74,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -84,6 +85,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -94,16 +96,17 @@ CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
 LTLIBRARIES = $(noinst_LTLIBRARIES)
 libcommon_la_DEPENDENCIES =
-am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h \
-	basil_resv_conf.c basil_resv_conf.h xmalloc.c xmalloc.h \
+am__libcommon_la_SOURCES_DIST = xcgroup_read_config.c \
+	xcgroup_read_config.h xcgroup.c xcgroup.h xcpuinfo.c \
+	xcpuinfo.h assoc_mgr.c assoc_mgr.h xmalloc.c xmalloc.h \
 	xassert.c xassert.h xstring.c xstring.h xsignal.c xsignal.h \
 	forward.c forward.h strlcpy.c strlcpy.h list.c list.h net.c \
-	net.h fd.c fd.h log.c log.h cbuf.c cbuf.h safeopen.c \
-	safeopen.h bitstring.c bitstring.h mpi.c mpi.h pack.c pack.h \
+	net.h log.c log.h cbuf.c cbuf.h safeopen.c safeopen.h \
+	bitstring.c bitstring.h mpi.c mpi.h pack.c pack.h \
 	parse_config.c parse_config.h parse_spec.c parse_spec.h \
 	plugin.c plugin.h plugrack.c plugrack.h print_fields.c \
 	print_fields.h read_config.c read_config.h node_select.c \
-	node_select.h env.c env.h slurm_cred.h slurm_cred.c \
+	node_select.h env.c env.h fd.c fd.h slurm_cred.h slurm_cred.c \
 	slurm_errno.c slurm_priority.c slurm_priority.h \
 	slurm_protocol_api.c slurm_protocol_api.h \
 	slurm_protocol_pack.c slurm_protocol_pack.h \
@@ -131,32 +134,33 @@ am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h \
 	proc_args.c proc_args.h slurm_strcasestr.c slurm_strcasestr.h \
 	node_conf.h node_conf.c gres.h gres.c
 @HAVE_UNSETENV_FALSE@am__objects_1 = unsetenv.lo
-am_libcommon_la_OBJECTS = assoc_mgr.lo basil_resv_conf.lo xmalloc.lo \
-	xassert.lo xstring.lo xsignal.lo forward.lo strlcpy.lo list.lo \
-	net.lo fd.lo log.lo cbuf.lo safeopen.lo bitstring.lo mpi.lo \
-	pack.lo parse_config.lo parse_spec.lo plugin.lo plugrack.lo \
-	print_fields.lo read_config.lo node_select.lo env.lo \
-	slurm_cred.lo slurm_errno.lo slurm_priority.lo \
-	slurm_protocol_api.lo slurm_protocol_pack.lo \
-	slurm_protocol_util.lo slurm_protocol_socket_implementation.lo \
-	slurm_protocol_defs.lo slurm_rlimits_info.lo slurmdb_defs.lo \
-	slurmdb_pack.lo slurmdbd_defs.lo working_cluster.lo uid.lo \
-	util-net.lo slurm_auth.lo jobacct_common.lo \
-	slurm_accounting_storage.lo slurm_jobacct_gather.lo \
-	slurm_jobcomp.lo slurm_topology.lo switch.lo arg_desc.lo \
-	malloc.lo getopt.lo getopt1.lo $(am__objects_1) \
-	slurm_selecttype_info.lo slurm_resource_info.lo hostlist.lo \
-	slurm_step_layout.lo checkpoint.lo job_resources.lo \
-	parse_time.lo job_options.lo global_defaults.lo timers.lo \
-	stepd_api.lo write_labelled_message.lo proc_args.lo \
-	slurm_strcasestr.lo node_conf.lo gres.lo
+am_libcommon_la_OBJECTS = xcgroup_read_config.lo xcgroup.lo \
+	xcpuinfo.lo assoc_mgr.lo xmalloc.lo xassert.lo xstring.lo \
+	xsignal.lo forward.lo strlcpy.lo list.lo net.lo log.lo cbuf.lo \
+	safeopen.lo bitstring.lo mpi.lo pack.lo parse_config.lo \
+	parse_spec.lo plugin.lo plugrack.lo print_fields.lo \
+	read_config.lo node_select.lo env.lo fd.lo slurm_cred.lo \
+	slurm_errno.lo slurm_priority.lo slurm_protocol_api.lo \
+	slurm_protocol_pack.lo slurm_protocol_util.lo \
+	slurm_protocol_socket_implementation.lo slurm_protocol_defs.lo \
+	slurm_rlimits_info.lo slurmdb_defs.lo slurmdb_pack.lo \
+	slurmdbd_defs.lo working_cluster.lo uid.lo util-net.lo \
+	slurm_auth.lo jobacct_common.lo slurm_accounting_storage.lo \
+	slurm_jobacct_gather.lo slurm_jobcomp.lo slurm_topology.lo \
+	switch.lo arg_desc.lo malloc.lo getopt.lo getopt1.lo \
+	$(am__objects_1) slurm_selecttype_info.lo \
+	slurm_resource_info.lo hostlist.lo slurm_step_layout.lo \
+	checkpoint.lo job_resources.lo parse_time.lo job_options.lo \
+	global_defaults.lo timers.lo stepd_api.lo \
+	write_labelled_message.lo proc_args.lo slurm_strcasestr.lo \
+	node_conf.lo gres.lo
 am__EXTRA_libcommon_la_SOURCES_DIST = unsetenv.c unsetenv.h
 libcommon_la_OBJECTS = $(am_libcommon_la_OBJECTS)
 libcommon_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
 	$(libcommon_la_LDFLAGS) $(LDFLAGS) -o $@
 libdaemonize_la_LIBADD =
-am_libdaemonize_la_OBJECTS = daemonize.lo fd.lo
+am_libdaemonize_la_OBJECTS = daemonize.lo
 libdaemonize_la_OBJECTS = $(am_libdaemonize_la_OBJECTS)
 libeio_la_LIBADD =
 am_libeio_la_OBJECTS = eio.lo io_hdr.lo
@@ -209,7 +213,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -235,7 +242,8 @@ ECHO_N = @ECHO_N@
 ECHO_T = @ECHO_T@
 EGREP = @EGREP@
 ELAN_LIBS = @ELAN_LIBS@
-EXEEXT = @EXEEXT@
+# This is needed if compiling on windows
+EXEEXT = 
 FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
 FGREP = @FGREP@
 GREP = @GREP@
@@ -246,6 +254,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -303,6 +312,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -338,6 +348,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -403,8 +414,10 @@ noinst_LTLIBRARIES = \
 	libspank.la
 
 libcommon_la_SOURCES = \
+ 	xcgroup_read_config.c xcgroup_read_config.h		\
+ 	xcgroup.c xcgroup.h 					\
+ 	xcpuinfo.c xcpuinfo.h 					\
 	assoc_mgr.c assoc_mgr.h 	\
-	basil_resv_conf.c basil_resv_conf.h \
 	xmalloc.c xmalloc.h 		\
 	xassert.c xassert.h		\
 	xstring.c xstring.h		\
@@ -413,7 +426,6 @@ libcommon_la_SOURCES = \
 	strlcpy.c strlcpy.h		\
 	list.c list.h 			\
 	net.c net.h                     \
-	fd.c fd.h			\
 	log.c log.h			\
 	cbuf.c cbuf.h			\
 	safeopen.c safeopen.h		\
@@ -428,6 +440,7 @@ libcommon_la_SOURCES = \
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
 	env.c env.h      		\
+	fd.c fd.h       		\
 	slurm_cred.h       		\
 	slurm_cred.c			\
 	slurm_errno.c			\
@@ -490,8 +503,7 @@ EXTRA_libcommon_la_SOURCES = \
 
 libdaemonize_la_SOURCES = \
 	daemonize.c       	 	\
-	daemonize.h        		\
-	fd.c fd.h
+	daemonize.h
 
 libeio_la_SOURCES = \
 	eio.c eio.h	   		\
@@ -584,7 +596,6 @@ distclean-compile:
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arg_desc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_mgr.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basil_resv_conf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bitstring.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cbuf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@
@@ -652,6 +663,9 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/working_cluster.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/write_labelled_message.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xassert.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcgroup.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcgroup_read_config.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcpuinfo.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xmalloc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xsignal.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xstring.Plo@am__quote@
diff --git a/src/common/arg_desc.c b/src/common/arg_desc.c
index 1d91a146e..57d7a741e 100644
--- a/src/common/arg_desc.c
+++ b/src/common/arg_desc.c
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/arg_desc.h b/src/common/arg_desc.h
index 0dce2dcfd..5aa72c86c 100644
--- a/src/common/arg_desc.h
+++ b/src/common/arg_desc.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 13117d6ad..7350a2703 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -101,6 +101,8 @@ static int _addto_used_info(slurmdb_association_rec_t *assoc1,
 	assoc1->usage->grp_used_cpus += assoc2->usage->grp_used_cpus;
 	assoc1->usage->grp_used_nodes += assoc2->usage->grp_used_nodes;
 	assoc1->usage->grp_used_wall += assoc2->usage->grp_used_wall;
+	assoc1->usage->grp_used_cpu_run_secs +=
+		assoc2->usage->grp_used_cpu_run_secs;
 
 	assoc1->usage->used_jobs += assoc2->usage->used_jobs;
 	assoc1->usage->used_submit_jobs += assoc2->usage->used_submit_jobs;
@@ -116,6 +118,7 @@ static int _clear_used_assoc_info(slurmdb_association_rec_t *assoc)
 
 	assoc->usage->grp_used_cpus = 0;
 	assoc->usage->grp_used_nodes = 0;
+	assoc->usage->grp_used_cpu_run_secs = 0;
 
 	assoc->usage->used_jobs  = 0;
 	assoc->usage->used_submit_jobs = 0;
@@ -127,6 +130,33 @@ static int _clear_used_assoc_info(slurmdb_association_rec_t *assoc)
 	return SLURM_SUCCESS;
 }
 
+static void _clear_qos_user_limit_info(slurmdb_qos_rec_t *qos_ptr)
+{
+	slurmdb_used_limits_t *used_limits = NULL;
+	ListIterator itr = NULL;
+
+	if (!qos_ptr->usage->user_limit_list
+	    || !list_count(qos_ptr->usage->user_limit_list))
+		return;
+
+	itr = list_iterator_create(qos_ptr->usage->user_limit_list);
+	while ((used_limits = list_next(itr))) {
+		used_limits->cpu_run_mins = 0; /* Currently isn't used
+						  in the code but put
+						  here for future
+						  reference when/if it
+						  is.
+					       */
+		used_limits->cpus = 0;
+		used_limits->jobs = 0;
+		used_limits->nodes = 0;
+		used_limits->submit_jobs = 0;
+	}
+	list_iterator_destroy(itr);
+
+	return;
+}
+
 static int _clear_used_qos_info(slurmdb_qos_rec_t *qos)
 {
 	if (!qos || !qos->usage)
@@ -134,6 +164,7 @@ static int _clear_used_qos_info(slurmdb_qos_rec_t *qos)
 
 	qos->usage->grp_used_cpus = 0;
 	qos->usage->grp_used_nodes = 0;
+	qos->usage->grp_used_cpu_run_secs = 0;
 
 	qos->usage->grp_used_jobs  = 0;
 	qos->usage->grp_used_submit_jobs = 0;
@@ -142,6 +173,8 @@ static int _clear_used_qos_info(slurmdb_qos_rec_t *qos)
 	 * else where since sometimes we call this and do not want
 	 * shares reset */
 
+	_clear_qos_user_limit_info(qos);
+
 	return SLURM_SUCCESS;
 }
 
@@ -1048,7 +1081,8 @@ static void _wr_wrunlock(lock_datatype_t datatype)
 	slurm_mutex_unlock(&locks_mutex);
 }
 
-extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
+extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
+			  int db_conn_errno)
 {
 	static uint16_t enforce = 0;
 	static uint16_t cache_level = ASSOC_MGR_CACHE_ALL;
@@ -1065,7 +1099,9 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 	}
 
 	if (args) {
+		cache_level = args->cache_level;
 		enforce = args->enforce;
+
 		if (args->remove_assoc_notify)
 			remove_assoc_notify = args->remove_assoc_notify;
 		if (args->remove_qos_notify)
@@ -1076,7 +1112,6 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 			update_qos_notify = args->update_qos_notify;
 		if (args->update_resvs)
 			update_resvs = args->update_resvs;
-		cache_level = args->cache_level;
 		assoc_mgr_refresh_lists(db_conn, args);
 	}
 
@@ -1092,8 +1127,9 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 		assoc_mgr_cluster_name = slurm_get_cluster_name();
 	}
 
-	/* check if we can't talk to the db yet */
-	if (errno == ESLURM_ACCESS_DENIED)
+	/* check if we can't talk to the db yet (Do this after all
+	 * the initialization above) */
+	if (db_conn_errno != SLURM_SUCCESS)
 		return SLURM_ERROR;
 
 	/* get qos before association since it is used there */
@@ -1496,6 +1532,8 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 	/* if (!assoc->usage->childern_list) */
 	/* 	assoc->usage->childern_list = ret_assoc->usage->childern_list; */
 	/* assoc->usage->grp_used_cpus   = ret_assoc->usage->grp_used_cpus; */
+	/* assoc->usage->grp_used_cpu_run_mins  = */
+	/* 	ret_assoc->usage->grp_used_cpu_run_mins; */
 	/* assoc->usage->grp_used_nodes  = ret_assoc->usage->grp_used_nodes; */
 	/* assoc->usage->grp_used_wall   = ret_assoc->usage->grp_used_wall; */
 
@@ -1637,6 +1675,7 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 
 	qos->id = found_qos->id;
 
+	qos->grace_time      = found_qos->grace_time;
 	qos->grp_cpu_mins    = found_qos->grp_cpu_mins;
 	qos->grp_cpu_run_mins= found_qos->grp_cpu_run_mins;
 	qos->grp_cpus        = found_qos->grp_cpus;
@@ -1648,8 +1687,10 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	qos->max_cpu_mins_pj = found_qos->max_cpu_mins_pj;
 	qos->max_cpu_run_mins_pu = found_qos->max_cpu_run_mins_pu;
 	qos->max_cpus_pj     = found_qos->max_cpus_pj;
+	qos->max_cpus_pu     = found_qos->max_cpus_pu;
 	qos->max_jobs_pu     = found_qos->max_jobs_pu;
 	qos->max_nodes_pj    = found_qos->max_nodes_pj;
+	qos->max_nodes_pu    = found_qos->max_nodes_pu;
 	qos->max_submit_jobs_pu = found_qos->max_submit_jobs_pu;
 	qos->max_wall_pj     = found_qos->max_wall_pj;
 
@@ -1670,6 +1711,8 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	   use the pointer that is returned. */
 
 	/* qos->usage->grp_used_cpus   = found_qos->usage->grp_used_cpus; */
+	/* qos->usage->grp_used_cpu_run_mins  = */
+	/* 	found_qos->usage->grp_used_cpu_run_mins; */
 	/* qos->usage->grp_used_jobs   = found_qos->usage->grp_used_jobs; */
 	/* qos->usage->grp_used_nodes  = found_qos->usage->grp_used_nodes; */
 	/* qos->usage->grp_used_submit_jobs = */
@@ -2063,7 +2106,7 @@ extern List assoc_mgr_get_shares(void *db_conn,
 			/* We only calculate user effective usage when
 			 * we need it
 			 */
-			if (assoc->usage->usage_efctv == (long double)NO_VAL)
+			if (fuzzy_equal(assoc->usage->usage_efctv, NO_VAL))
 				priority_g_set_assoc_usage(assoc);
 
 			share->name = xstrdup(assoc->user);
@@ -2496,8 +2539,11 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 					goto is_user;
 				itr2 = list_iterator_create(
 					object->usage->childern_list);
-				while ((rec = list_next(itr2)))
-					count += rec->shares_raw;
+				while ((rec = list_next(itr2))) {
+					if (rec->shares_raw
+					    != SLURMDB_FS_USE_PARENT)
+						count += rec->shares_raw;
+				}
 				list_iterator_reset(itr2);
 				while ((rec = list_next(itr2)))
 					rec->usage->level_shares = count;
@@ -2877,6 +2923,8 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 					rec->flags = object->flags;
 			}
 
+			if (object->grace_time != NO_VAL)
+				rec->grace_time = object->grace_time;
 			if (object->grp_cpu_mins != (uint64_t)NO_VAL)
 				rec->grp_cpu_mins = object->grp_cpu_mins;
 			if (object->grp_cpu_run_mins != (uint64_t)NO_VAL)
@@ -2908,12 +2956,20 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 				update_jobs = true;
 				rec->max_cpus_pj = object->max_cpus_pj;
 			}
+			if (object->max_cpus_pu != NO_VAL) {
+				update_jobs = true;
+				rec->max_cpus_pu = object->max_cpus_pu;
+			}
 			if (object->max_jobs_pu != NO_VAL)
 				rec->max_jobs_pu = object->max_jobs_pu;
 			if (object->max_nodes_pj != NO_VAL) {
 				update_jobs = true;
 				rec->max_nodes_pj = object->max_nodes_pj;
 			}
+			if (object->max_nodes_pu != NO_VAL) {
+				update_jobs = true;
+				rec->max_nodes_pu = object->max_nodes_pu;
+			}
 			if (object->max_submit_jobs_pu != NO_VAL)
 				rec->max_submit_jobs_pu =
 					object->max_submit_jobs_pu;
@@ -2953,11 +3009,10 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 					_set_qos_norm_priority(rec);
 			}
 
-			if (object->usage_factor != (double)NO_VAL)
-				rec->usage_factor =
-					object->usage_factor;
+			if (!fuzzy_equal(object->usage_factor, NO_VAL))
+				rec->usage_factor = object->usage_factor;
 
-			if (object->usage_thres != (double)NO_VAL)
+			if (!fuzzy_equal(object->usage_thres, NO_VAL))
 				rec->usage_thres = object->usage_thres;
 
 			if (update_jobs && update_qos_notify) {
@@ -3450,7 +3505,7 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 
 extern int load_assoc_usage(char *state_save_location)
 {
-	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
+	int data_allocated, data_read = 0;
 	uint32_t data_size = 0;
 	uint16_t ver = 0;
 	int state_fd;
@@ -3472,7 +3527,6 @@ extern int load_assoc_usage(char *state_save_location)
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
 		debug2("No Assoc usage file (%s) to recover", state_file);
-		error_code = ENOENT;
 	} else {
 		data_allocated = BUF_SIZE;
 		data = xmalloc(data_allocated);
@@ -3557,7 +3611,7 @@ unpack_error:
 
 extern int load_qos_usage(char *state_save_location)
 {
-	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
+	int data_allocated, data_read = 0;
 	uint32_t data_size = 0;
 	uint16_t ver = 0;
 	int state_fd;
@@ -3579,7 +3633,6 @@ extern int load_qos_usage(char *state_save_location)
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
 		debug2("No Qos usage file (%s) to recover", state_file);
-		error_code = ENOENT;
 	} else {
 		data_allocated = BUF_SIZE;
 		data = xmalloc(data_allocated);
@@ -3824,7 +3877,8 @@ extern int assoc_mgr_refresh_lists(void *db_conn, assoc_init_args_t *args)
 
 	/* get qos before association since it is used there */
 	if (cache_level & ASSOC_MGR_CACHE_QOS)
-		if (_refresh_assoc_mgr_qos_list(db_conn, enforce) == SLURM_ERROR)
+		if (_refresh_assoc_mgr_qos_list(db_conn, enforce)
+		    == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	/* get user before association/wckey since it is used there */
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index 305b3fe2f..c280ee6b9 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,8 +49,8 @@
 #include "src/common/slurmdbd_defs.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/locks.h"
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #define ASSOC_MGR_CACHE_ASSOC 0x0001
 #define ASSOC_MGR_CACHE_QOS 0x0002
@@ -106,6 +106,8 @@ struct assoc_mgr_association_usage {
 				  * (DON'T PACK) */
 	double grp_used_wall;   /* group count of time used in
 				 * running jobs (DON'T PACK) */
+	uint64_t grp_used_cpu_run_secs; /* count of running cpu secs
+					 * (DON'T PACK) */
 
 	uint32_t level_shares;  /* number of shares on this level of
 				 * the tree (DON'T PACK) */
@@ -120,8 +122,6 @@ struct assoc_mgr_association_usage {
 	long double usage_norm;	/* normalized usage (DON'T PACK) */
 	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
 
-	uint64_t used_cpu_run_secs; /* count of running cpu secs
-				     * (DON'T PACK) */
 	uint32_t used_jobs;	/* count of active jobs (DON'T PACK) */
 	uint32_t used_submit_jobs; /* count of jobs pending or running
 				    * (DON'T PACK) */
@@ -163,7 +163,8 @@ extern uint32_t g_qos_max_priority; /* max priority in all qos's */
 extern uint32_t g_qos_count; /* count used for generating qos bitstr's */
 
 
-extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args);
+extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
+			  int db_conn_errno);
 extern int assoc_mgr_fini(char *state_save_location);
 extern void assoc_mgr_lock(assoc_mgr_lock_t *locks);
 extern void assoc_mgr_unlock(assoc_mgr_lock_t *locks);
diff --git a/src/common/basil_resv_conf.c b/src/common/basil_resv_conf.c
deleted file mode 100644
index bd0ff24bc..000000000
--- a/src/common/basil_resv_conf.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*****************************************************************************\
- *  basil_resv_conf.h - user interface to BASIL for confirming a resource
- *	reservation. BASIL is Cray's Batch Application Scheduler Interface
- *	Layer.
- *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif		/* HAVE_INTTYPES_H */
-#endif
-
-#include <slurm/slurm_errno.h>
-
-#include "src/common/log.h"
-
-#define BASIL_DEBUG 1
-
-/*
- * basil_resv_conf - confirm a previously created BASIL resource reservation.
- *	This must be called from the same container from which the user
- *	application is to run. The container is normally a Linux Process
- *	Group or SGI Process Aggregate (see http://oss.sgi.com/projects/pagg).
- * IN reservation_id - ID of reservation conform
- * IN job_id - SLURM job ID
- * RET 0 or error code
- */
-extern int basil_resv_conf(uint32_t reservation_id, uint32_t job_id)
-{
-	int error_code = SLURM_SUCCESS;
-#ifdef HAVE_CRAY
-	/* Issue the BASIL CONFIRM request  - To Be Done */
-#endif
-	return error_code;
-}
diff --git a/src/common/basil_resv_conf.h b/src/common/basil_resv_conf.h
deleted file mode 100644
index db22239a6..000000000
--- a/src/common/basil_resv_conf.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*****************************************************************************\
- *  basil_resv_conf.h - user interface to BASIL for confirming a resource
- *	reservation. BASIL is Cray's Batch Application Scheduler Interface
- *	Layer.
- *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef _HAVE_BASIL_RESV_CONF_H
-#define _HAVE_BASIL_RESV_CONF_H
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif		/* HAVE_INTTYPES_H */
-#endif
-
-/*
- * basil_resv_conf - confirm a previously created BASIL resource reservation.
- *	This must be called from the same container from which the user
- *	application is to run. The container is normally a Linux Process
- *	Group or SGI Process Aggregate (see http://oss.sgi.com/projects/pagg).
- * IN reservation_id - ID of reservation conform
- * IN job_id - SLURM job ID
- * RET 0 or error code
- */
-extern int basil_resv_conf(uint32_t reservation_id, uint32_t job_id);
-
-#endif	/* !_HAVE_BASIL_RESV_CONF_H */
diff --git a/src/common/bitstring.c b/src/common/bitstring.c
index 6363ddf9d..1607eb6fc 100644
--- a/src/common/bitstring.c
+++ b/src/common/bitstring.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -1117,7 +1117,7 @@ int bit_unfmt_hexmask(bitstr_t * bitmap, const char* str)
 	int bit_index = 0, len = strlen(str);
 	int rc = 0;
 	const char *curpos = str + len - 1;
-	char current;
+	int current;
 	bitoff_t bitsize = bit_size(bitmap);
 
 	bit_nclear(bitmap, 0, bitsize - 1);
diff --git a/src/common/bitstring.h b/src/common/bitstring.h
index a0cc63a2d..fcbca5851 100644
--- a/src/common/bitstring.h
+++ b/src/common/bitstring.h
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/checkpoint.c b/src/common/checkpoint.c
index 38557b992..ac7b3e204 100644
--- a/src/common/checkpoint.c
+++ b/src/common/checkpoint.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/checkpoint.h b/src/common/checkpoint.h
index 43431c7dd..0eae38780 100644
--- a/src/common/checkpoint.h
+++ b/src/common/checkpoint.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/daemonize.c b/src/common/daemonize.c
index f92774514..09ab391e2 100644
--- a/src/common/daemonize.c
+++ b/src/common/daemonize.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,16 +51,6 @@
 #include "src/common/macros.h"
 #include "src/common/xassert.h"
 
-/* closeall FDs >= a specified value */
-static void
-closeall(int fd)
-{
-	int fdlimit = sysconf(_SC_OPEN_MAX);
-
-	while (fd < fdlimit)
-		close(fd++);
-}
-
 /* detach and go into background.
  * caller is responsible for umasks
  *
diff --git a/src/common/daemonize.h b/src/common/daemonize.h
index 2d30d2d33..42e9fb99a 100644
--- a/src/common/daemonize.h
+++ b/src/common/daemonize.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/eio.c b/src/common/eio.c
index e913cbd09..82858c617 100644
--- a/src/common/eio.c
+++ b/src/common/eio.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -235,7 +235,8 @@ static int _eio_wakeup_handler(eio_handle_t *eio)
 	/* move new eio objects from the new_objs to the obj_list */
 	list_transfer(eio->obj_list, eio->new_objs);
 
-	if (rc < 0) return error("eio_clear: read: %m");
+	if (rc < 0)
+		return error("eio_clear: read: %m");
 
 	return 0;
 }
@@ -301,8 +302,10 @@ _poll_internal(struct pollfd *pfds, unsigned int nfds)
 	int n;
 	while ((n = poll(pfds, nfds, -1)) < 0) {
 		switch (errno) {
-		case EINTR : return 0;
-		case EAGAIN: continue;
+		case EINTR :
+			return 0;
+		case EAGAIN:
+			continue;
 		default:
 			error("poll: %m");
 			return -1;
diff --git a/src/common/eio.h b/src/common/eio.h
index 8e7b3dcdd..12bdbe961 100644
--- a/src/common/eio.h
+++ b/src/common/eio.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/env.c b/src/common/env.c
index 8963a639a..7814062d7 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -90,29 +90,45 @@ static int _setup_particulars(uint32_t cluster_flags,
 	int rc = SLURM_SUCCESS;
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		char *bg_part_id = NULL;
+		uint32_t node_cnt = 0;
 		select_g_select_jobinfo_get(select_jobinfo,
 					    SELECT_JOBDATA_BLOCK_ID,
 					    &bg_part_id);
 		if (bg_part_id) {
 			/* check to see if this is a HTC block or not. */
 			if (cluster_flags & CLUSTER_FLAG_BGP) {
-				uint16_t conn_type =
-					(uint16_t)NO_VAL;
+				uint16_t conn_type[HIGHEST_DIMENSIONS];
 				select_g_select_jobinfo_get(
 					select_jobinfo,
 					SELECT_JOBDATA_CONN_TYPE,
 					&conn_type);
-				if (conn_type > SELECT_SMALL) {
+				if (conn_type[0] > SELECT_SMALL) {
 					/* SUBMIT_POOL over rides
 					   HTC_SUBMIT_POOL */
 					setenvf(dest, "SUBMIT_POOL", "%s",
 						bg_part_id);
 				}
 			}
+			select_g_select_jobinfo_get(
+				select_jobinfo,
+				SELECT_JOBDATA_BLOCK_NODE_CNT,
+				&node_cnt);
+			if (node_cnt)
+				setenvf(dest, "SLURM_BLOCK_NUM_NODES",
+					"%u", node_cnt);
+
 			setenvf(dest, "MPIRUN_PARTITION", "%s", bg_part_id);
 			setenvf(dest, "MPIRUN_NOFREE", "%d", 1);
 			setenvf(dest, "MPIRUN_NOALLOCATE", "%d", 1);
 			xfree(bg_part_id);
+			select_g_select_jobinfo_get(select_jobinfo,
+						    SELECT_JOBDATA_IONODES,
+						    &bg_part_id);
+			if (bg_part_id) {
+				setenvf(dest, "SLURM_JOB_SUB_MP", "%s",
+					bg_part_id);
+				xfree(bg_part_id);
+			}
 		} else
 			rc = SLURM_FAILURE;
 
@@ -947,6 +963,34 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 	env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
 	xfree(tmp);
 
+	if (alloc->pn_min_memory & MEM_PER_CPU) {
+		uint32_t tmp_mem = alloc->pn_min_memory & (~MEM_PER_CPU);
+		env_array_overwrite_fmt(dest, "SLURM_MEM_PER_CPU", "%u",
+					tmp_mem);
+#ifdef HAVE_CRAY
+		env_array_overwrite_fmt(dest, "CRAY_AUTO_APRUN_OPTIONS",
+					"\"-m%u\"", tmp_mem);
+#endif
+	} else if (alloc->pn_min_memory) {
+		uint32_t tmp_mem = alloc->pn_min_memory;
+#ifdef HAVE_CRAY
+		uint32_t i, max_cpus_per_node = 1;
+		for (i = 0; i < alloc->num_cpu_groups; i++) {
+			if ((i == 0) ||
+			    (max_cpus_per_node < alloc->cpus_per_node[i])) {
+				max_cpus_per_node = alloc->cpus_per_node[i];
+			}
+		}
+#endif
+		env_array_overwrite_fmt(dest, "SLURM_MEM_PER_NODE", "%u",
+					tmp_mem);
+#ifdef HAVE_CRAY
+		tmp_mem /= max_cpus_per_node;
+		env_array_overwrite_fmt(dest, "CRAY_AUTO_APRUN_OPTIONS",
+					"\"-m%u\"", tmp_mem);
+#endif
+	}
+
 	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", node_cnt);
@@ -1065,15 +1109,11 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_NODELIST", "%s", batch->nodes);
 
-	if((batch->cpus_per_task != 0) &&
-	   (batch->cpus_per_task != (uint16_t) NO_VAL))
+	if ((batch->cpus_per_task != 0) &&
+	    (batch->cpus_per_task != (uint16_t) NO_VAL))
 		cpus_per_task = batch->cpus_per_task;
 	else
 		cpus_per_task = 1;	/* default value */
-	if (cpus_per_task > 1) {
-		env_array_overwrite_fmt(dest, "SLURM_CPUS_PER_TASK", "%u",
-					cpus_per_task);
-	}
 
 	if(num_tasks) {
 		env_array_overwrite_fmt(dest, "SLURM_NTASKS", "%u",
@@ -1107,6 +1147,35 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	slurm_step_layout_destroy(step_layout);
 	env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp);
 	xfree(tmp);
+
+	if (batch->pn_min_memory & MEM_PER_CPU) {
+		uint32_t tmp_mem = batch->pn_min_memory & (~MEM_PER_CPU);
+		env_array_overwrite_fmt(dest, "SLURM_MEM_PER_CPU", "%u",
+					tmp_mem);
+#ifdef HAVE_CRAY
+		env_array_overwrite_fmt(dest, "CRAY_AUTO_APRUN_OPTIONS",
+					"\"-m%u\"", tmp_mem);
+#endif
+	} else if (batch->pn_min_memory) {
+		uint32_t tmp_mem = batch->pn_min_memory;
+#ifdef HAVE_CRAY
+		uint32_t i, max_cpus_per_node = 1;
+		for (i = 0; i < batch->num_cpu_groups; i++) {
+			if ((i == 0) ||
+			    (max_cpus_per_node < batch->cpus_per_node[i])) {
+				max_cpus_per_node = batch->cpus_per_node[i];
+			}
+		}
+#endif
+		env_array_overwrite_fmt(dest, "SLURM_MEM_PER_NODE", "%u",
+					tmp_mem);
+#ifdef HAVE_CRAY
+		tmp_mem /= max_cpus_per_node;
+		env_array_overwrite_fmt(dest, "CRAY_AUTO_APRUN_OPTIONS",
+					"\"-m%u\"", tmp_mem);
+#endif
+	}
+
 	return SLURM_SUCCESS;
 
 }
@@ -1128,6 +1197,7 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
  *	SLURM_STEP_LAUNCHER_PORT
  *	SLURM_STEP_LAUNCHER_IPADDR
  *	SLURM_STEP_RESV_PORTS
+ *      SLURM_STEP_SUB_MP
  *
  * Sets OBSOLETE variables:
  *	SLURM_STEPID
@@ -1145,18 +1215,42 @@ env_array_for_step(char ***dest,
 		   uint16_t launcher_port,
 		   bool preserve_env)
 {
-	char *tmp;
+	char *tmp, *tpn;
+	uint32_t node_cnt = step->step_layout->node_cnt;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	tmp = _uint16_array_to_str(step->step_layout->node_cnt,
+	tpn = _uint16_array_to_str(step->step_layout->node_cnt,
 				   step->step_layout->tasks);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_ID", "%u", step->job_step_id);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NODELIST",
 				"%s", step->step_layout->node_list);
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		char geo_char[HIGHEST_DIMENSIONS+1];
+
+		select_g_select_jobinfo_get(step->select_jobinfo,
+					    SELECT_JOBDATA_NODE_CNT,
+					    &node_cnt);
+		if (!node_cnt)
+			node_cnt = step->step_layout->node_cnt;
+
+		select_g_select_jobinfo_sprint(step->select_jobinfo,
+					       geo_char, sizeof(geo_char),
+					       SELECT_PRINT_GEOMETRY);
+		if (geo_char[0] != '0')
+			env_array_overwrite_fmt(dest, "SLURM_STEP_GEO",
+						"%s", geo_char);
+		select_g_select_jobinfo_sprint(step->select_jobinfo,
+					       geo_char, sizeof(geo_char),
+					       SELECT_PRINT_START_LOC);
+		env_array_overwrite_fmt(dest, "SLURM_STEP_START_LOC",
+					"%s", geo_char);
+	}
+
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NUM_NODES",
-				"%hu", step->step_layout->node_cnt);
+				"%u", node_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NUM_TASKS",
 				"%u", step->step_layout->task_cnt);
-	env_array_overwrite_fmt(dest, "SLURM_STEP_TASKS_PER_NODE", "%s", tmp);
+	env_array_overwrite_fmt(dest, "SLURM_STEP_TASKS_PER_NODE", "%s", tpn);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_PORT",
 				"%hu", launcher_port);
 	if (step->resv_ports) {
@@ -1164,23 +1258,32 @@ env_array_for_step(char ***dest,
 					"%s", step->resv_ports);
 	}
 
-	/* OBSOLETE, but needed by MPI, do not remove */
+	tmp = NULL;
+	select_g_select_jobinfo_get(step->select_jobinfo,
+				    SELECT_JOBDATA_IONODES,
+				    &tmp);
+	if (tmp) {
+		setenvf(dest, "SLURM_STEP_SUB_MP", "%s", tmp);
+		xfree(tmp);
+	}
+
+	/* OBSOLETE, but needed by some MPI implementations, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_STEPID", "%u", step->job_step_id);
 	if (!preserve_env) {
 		env_array_overwrite_fmt(dest, "SLURM_NNODES",
-					"%hu", step->step_layout->node_cnt);
+					"%u", node_cnt);
 		env_array_overwrite_fmt(dest, "SLURM_NTASKS", "%u",
 					step->step_layout->task_cnt);
 		/* keep around for old scripts */
 		env_array_overwrite_fmt(dest, "SLURM_NPROCS",
 					"%u", step->step_layout->task_cnt);
 		env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s",
-					tmp);
+					tpn);
 	}
 	env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT",
 				"%hu", launcher_port);
 
-	xfree(tmp);
+	xfree(tpn);
 }
 
 /*
diff --git a/src/common/env.h b/src/common/env.h
index 81e251e35..776e01ae3 100644
--- a/src/common/env.h
+++ b/src/common/env.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -29,9 +29,9 @@
 
 #include <sys/types.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
 #include <sys/utsname.h>
 
+#include "slurm/slurm.h"
 #include "src/common/macros.h"
 #include "src/common/slurm_protocol_api.h"
 
diff --git a/src/common/fd.h b/src/common/fd.h
index 109735caa..cf7fda6aa 100644
--- a/src/common/fd.h
+++ b/src/common/fd.h
@@ -49,6 +49,15 @@
 
 #include "src/common/macros.h"
 
+/* close all FDs >= a specified value */
+static inline void closeall(int fd)
+{
+	int fdlimit = sysconf(_SC_OPEN_MAX);
+
+	while (fd < fdlimit)
+		close(fd++);
+}
+
 void fd_set_close_on_exec(int fd);
 /*
  *  Sets the file descriptor (fd) to be closed on exec().
diff --git a/src/common/forward.c b/src/common/forward.c
index b0f68411f..91aca3af2 100644
--- a/src/common/forward.c
+++ b/src/common/forward.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,7 +47,7 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/forward.h"
 #include "src/common/xmalloc.h"
@@ -84,7 +84,6 @@ void *_forward_thread(void *arg)
 {
 	forward_msg_t *fwd_msg = (forward_msg_t *)arg;
 	Buf buffer = init_buf(fwd_msg->buf_len);
-	int i=0;
 	List ret_list = NULL;
 	slurm_fd_t fd = -1;
 	ret_data_info_t *ret_data_info = NULL;
@@ -182,7 +181,6 @@ void *_forward_thread(void *arg)
 			list_push(fwd_msg->ret_list, ret_data_info);
 			ret_data_info->node_name = xstrdup(name);
 			free(name);
-			i=0;
 			while((name = hostlist_shift(hl))) {
 				ret_data_info =
 					xmalloc(sizeof(ret_data_info_t));
@@ -315,9 +313,9 @@ void *_fwd_tree_thread(void *arg)
 	send_msg.data = fwd_tree->orig_msg->data;
 
 	/* repeat until we are sure the message was sent */
-	while((name = hostlist_shift(fwd_tree->tree_hl))) {
-		if(slurm_conf_get_addr(name, &send_msg.address)
-		   == SLURM_ERROR) {
+	while ((name = hostlist_shift(fwd_tree->tree_hl))) {
+		if (slurm_conf_get_addr(name, &send_msg.address)
+		    == SLURM_ERROR) {
 			error("fwd_tree_thread: can't find address for host "
 			      "%s, check slurm.conf", name);
 			slurm_mutex_lock(fwd_tree->tree_mutex);
diff --git a/src/common/forward.h b/src/common/forward.h
index a7ad7d7cd..987cab5bb 100644
--- a/src/common/forward.h
+++ b/src/common/forward.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/gres.c b/src/common/gres.c
index 897251b15..a0e4523e3 100644
--- a/src/common/gres.c
+++ b/src/common/gres.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -67,10 +67,10 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
 #include <sys/stat.h>
 
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/gres.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
@@ -133,6 +133,7 @@ static uint32_t	_get_gres_cnt(char *orig_config, char *gres_name,
 			      char *gres_name_colon, int gres_name_colon_len);
 static char *	_get_gres_conf(void);
 static uint32_t	_get_tot_gres_cnt(uint32_t plugin_id, uint32_t *set_cnt);
+static int	_gres_find_id(void *x, void *key);
 static void	_gres_job_list_delete(void *list_element);
 extern int	_job_alloc(void *job_gres_data, void *node_gres_data,
 			   int node_cnt, int node_offset, uint32_t cpu_cnt,
@@ -207,6 +208,15 @@ static uint32_t	_build_id(char *gres_name)
 	return id;
 }
 
+static int _gres_find_id(void *x, void *key)
+{
+	uint32_t *plugin_id = (uint32_t *)key;
+	gres_state_t *state_ptr = (gres_state_t *) x;
+	if (state_ptr->plugin_id == *plugin_id)
+		return 1;
+	return 0;
+}
+
 /* Variant of strcmp that will accept NULL string pointers */
 static int  _strcmp(const char *s1, const char *s2)
 {
@@ -468,7 +478,8 @@ extern int gres_plugin_reconfig(bool *did_change)
 	char *plugin_names = slurm_get_gres_plugins();
 	bool plugin_change;
 
-	*did_change = false;
+	if (did_change)
+		*did_change = false;
 	slurm_mutex_lock(&gres_context_lock);
 	if (slurm_get_debug_flags() & DEBUG_FLAG_GRES)
 		gres_debug = true;
@@ -485,7 +496,8 @@ extern int gres_plugin_reconfig(bool *did_change)
 		error("GresPlugins changed from %s to %s ignored",
 		     gres_plugin_list, plugin_names);
 		error("Restart the slurmctld daemon to change GresPlugins");
-		*did_change = true;
+		if (did_change)
+			*did_change = true;
 #if 0
 		/* This logic would load new plugins, but we need the old
 		 * plugins to persist in order to process old state
@@ -736,6 +748,85 @@ static void _validate_config(slurm_gres_context_t *context_ptr)
 	list_iterator_destroy(iter);
 }
 
+extern int gres_plugin_node_config_devices_path(char **dev_path,
+						char **gres_name,
+						int array_len)
+{
+	static s_p_options_t _gres_options[] = {
+		{"Name", S_P_ARRAY, _parse_gres_config, NULL},
+		{NULL}
+	};
+
+	int count, i;
+	struct stat config_stat;
+	s_p_hashtbl_t *tbl;
+	gres_slurmd_conf_t **gres_array;
+	char *gres_conf_file;
+
+	gres_plugin_init();
+	gres_conf_file = _get_gres_conf();
+	if (stat(gres_conf_file, &config_stat) < 0) {
+		error("can't stat gres.conf file %s: %m", gres_conf_file);
+		xfree(gres_conf_file);
+		return 0;
+	}
+
+	slurm_mutex_lock(&gres_context_lock);
+	tbl = s_p_hashtbl_create(_gres_options);
+	if (s_p_parse_file(tbl, NULL, gres_conf_file, false) == SLURM_ERROR)
+		fatal("error opening/reading %s", gres_conf_file);
+	FREE_NULL_LIST(gres_conf_list);
+	gres_conf_list = list_create(_destroy_gres_slurmd_conf);
+	if (gres_conf_list == NULL)
+		fatal("list_create: malloc failure");
+	if (s_p_get_array((void ***) &gres_array, &count, "Name", tbl)) {
+		if (count > array_len) {
+			error("GRES device count exceeds array size (%d > %d)",
+			      count, array_len);
+			count = array_len;
+		}
+		for (i = 0; i < count; i++) {
+			if ((gres_array[i]) && (gres_array[i]->file)) {
+				dev_path[i]   = gres_array[i]->file;
+				gres_name[i]  = gres_array[i]->name;
+				gres_array[i] = NULL;
+			}
+		}
+	}
+	s_p_hashtbl_destroy(tbl);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	xfree(gres_conf_file);
+	return count;
+}
+
+/* No gres.conf file found.
+ * Initialize gres table with zero counts of all resources.
+ * Counts can be altered by node_config_load() in the gres plugin. */
+static int _no_gres_conf(uint32_t cpu_cnt)
+{
+	int i, rc = SLURM_SUCCESS;
+	gres_slurmd_conf_t *p;
+
+	slurm_mutex_lock(&gres_context_lock);
+	FREE_NULL_LIST(gres_conf_list);
+	gres_conf_list = list_create(_destroy_gres_slurmd_conf);
+	if (gres_conf_list == NULL)
+		fatal("list_create: malloc failure");
+	p = xmalloc(sizeof(gres_slurmd_conf_t *) * gres_context_cnt);
+	for (i = 0; ((i < gres_context_cnt) && (rc == SLURM_SUCCESS)); i++) {
+		p = xmalloc(sizeof(gres_slurmd_conf_t));
+		p->cpu_cnt	= cpu_cnt;
+		p->name		= xstrdup(gres_context[i].gres_name);
+		p->plugin_id	= gres_context[i].plugin_id;
+		list_append(gres_conf_list, p);
+		rc = (*(gres_context[i].ops.node_config_load))(gres_conf_list);
+	}
+	slurm_mutex_unlock(&gres_context_lock);
+
+	return rc;
+}
+
 /*
  * Load this node's configuration (how many resources it has, topology, etc.)
  * IN cpu_cnt - Number of CPUs on configured on this node
@@ -758,13 +849,19 @@ extern int gres_plugin_node_config_load(uint32_t cpu_cnt)
 		return SLURM_SUCCESS;
 
 	gres_conf_file = _get_gres_conf();
+	if (stat(gres_conf_file, &config_stat) < 0) {
+		error("can't stat gres.conf file %s, assuming zero resource "
+		      "counts", gres_conf_file);
+		xfree(gres_conf_file);
+		return _no_gres_conf(cpu_cnt);
+	}
+
 	slurm_mutex_lock(&gres_context_lock);
 	gres_cpu_cnt = cpu_cnt;
-	if (stat(gres_conf_file, &config_stat) < 0)
-		fatal("can't stat gres.conf file %s: %m", gres_conf_file);
 	tbl = s_p_hashtbl_create(_gres_options);
-	if (s_p_parse_file(tbl, NULL, gres_conf_file) == SLURM_ERROR)
+	if (s_p_parse_file(tbl, NULL, gres_conf_file, false) == SLURM_ERROR)
 		fatal("error opening/reading %s", gres_conf_file);
+	FREE_NULL_LIST(gres_conf_list);
 	gres_conf_list = list_create(_destroy_gres_slurmd_conf);
 	if (gres_conf_list == NULL)
 		fatal("list_create: malloc failure");
@@ -2412,6 +2509,12 @@ extern uint32_t _job_test(void *job_gres_data, void *node_gres_data,
 		return NO_VAL;
 	} else if (job_gres_ptr->gres_cnt_alloc && node_gres_ptr->topo_cnt) {
 		/* Need to determine which specific CPUs can be used */
+		gres_avail = node_gres_ptr->gres_cnt_avail;
+		if (!use_total_gres)
+			gres_avail -= node_gres_ptr->gres_cnt_alloc;
+		if (job_gres_ptr->gres_cnt_alloc > gres_avail)
+			return (uint32_t) 0;	/* insufficient, gres to use */
+
 		if (cpu_bitmap) {
 			cpus_ctld = cpu_end_bit - cpu_start_bit + 1;
 			if (cpus_ctld < 1) {
@@ -2463,9 +2566,10 @@ extern uint32_t _job_test(void *job_gres_data, void *node_gres_data,
 				break;
 			}
 			cpus_avail[top_inx] = 0;
-			i = node_gres_ptr->topo_gres_cnt_avail[top_inx] -
-			    node_gres_ptr->topo_gres_cnt_alloc[top_inx];
-			if (i <= 0) {
+			i = node_gres_ptr->topo_gres_cnt_avail[top_inx];
+			if (!use_total_gres)
+			    i -= node_gres_ptr->topo_gres_cnt_alloc[top_inx];
+			if (i < 0) {
 				error("gres/%s: topology allocation error on "
 				      "node %s", gres_name, node_name);
 				continue;
@@ -2555,7 +2659,12 @@ extern uint32_t gres_plugin_job_test(List job_gres_list, List node_gres_list,
 					    cpu_start_bit, cpu_end_bit,
 					    &topo_set, job_id, node_name,
 					    gres_context[i].gres_name);
-			cpu_cnt = MIN(tmp_cnt, cpu_cnt);
+			if (tmp_cnt != NO_VAL) {
+				if (cpu_cnt == NO_VAL)
+					cpu_cnt = tmp_cnt;
+				else
+					cpu_cnt = MIN(tmp_cnt, cpu_cnt);
+			}
 			break;
 		}
 		if (cpu_cnt == 0)
@@ -2622,7 +2731,6 @@ extern int _job_alloc(void *job_gres_data, void *node_gres_data,
 
 	/*
 	 * Select the specific resources to use for this job.
-	 * We'll need to add topology information in the future
 	 */
 	if (job_gres_ptr->gres_bit_alloc[node_offset]) {
 		/* Resuming a suspended job, resources already allocated */
@@ -2663,6 +2771,18 @@ extern int _job_alloc(void *job_gres_data, void *node_gres_data,
 	} else {
 		node_gres_ptr->gres_cnt_alloc += job_gres_ptr->gres_cnt_alloc;
 	}
+	if (job_gres_ptr->gres_bit_alloc &&
+	    job_gres_ptr->gres_bit_alloc[node_offset] &&
+	    node_gres_ptr->topo_gres_bitmap &&
+	    node_gres_ptr->topo_gres_cnt_alloc) {
+		for (i=0; i<node_gres_ptr->topo_cnt; i++) {
+			gres_cnt = bit_overlap(job_gres_ptr->
+					       gres_bit_alloc[node_offset],
+					       node_gres_ptr->
+					       topo_gres_bitmap[i]);
+			node_gres_ptr->topo_gres_cnt_alloc[i] += gres_cnt;
+		}
+	}
 
 	return SLURM_SUCCESS;
 }
@@ -2745,7 +2865,7 @@ static int _job_dealloc(void *job_gres_data, void *node_gres_data,
 			int node_offset, char *gres_name, uint32_t job_id,
 			char *node_name)
 {
-	int i, len;
+	int i, len, gres_cnt;
 	gres_job_state_t  *job_gres_ptr  = (gres_job_state_t *)  job_gres_data;
 	gres_node_state_t *node_gres_ptr = (gres_node_state_t *) node_gres_data;
 
@@ -2800,6 +2920,19 @@ static int _job_dealloc(void *job_gres_data, void *node_gres_data,
 		      gres_name, job_id, node_name);
 	}
 
+	if (job_gres_ptr->gres_bit_alloc &&
+	    job_gres_ptr->gres_bit_alloc[node_offset] &&
+	    node_gres_ptr->topo_gres_bitmap &&
+	    node_gres_ptr->topo_gres_cnt_alloc) {
+		for (i=0; i<node_gres_ptr->topo_cnt; i++) {
+			gres_cnt = bit_overlap(job_gres_ptr->
+					       gres_bit_alloc[node_offset],
+					       node_gres_ptr->
+					       topo_gres_bitmap[i]);
+			node_gres_ptr->topo_gres_cnt_alloc[i] -= gres_cnt;
+		}
+	}
+
 	return SLURM_SUCCESS;
 }
 
@@ -2874,6 +3007,188 @@ extern int gres_plugin_job_dealloc(List job_gres_list, List node_gres_list,
 	return rc;
 }
 
+/*
+ * Merge one job's gres allocation into another job's gres allocation.
+ * IN from_job_gres_list - List of gres records for the job being merged
+ *			into another job
+ * IN from_job_node_bitmap - bitmap of nodes for the job being merged into
+ *			another job
+ * IN/OUT to_job_gres_list - List of gres records for the job being merged
+ *			into job
+ * IN to_job_node_bitmap - bitmap of nodes for the job being merged into
+ */
+extern void gres_plugin_job_merge(List from_job_gres_list,
+				  bitstr_t *from_job_node_bitmap,
+				  List to_job_gres_list,
+				  bitstr_t *to_job_node_bitmap)
+{
+	ListIterator gres_iter;
+	gres_state_t *gres_ptr, *gres_ptr2;
+	gres_job_state_t *gres_job_ptr, *gres_job_ptr2;
+	int new_node_cnt;
+	int i_first, i_last, i;
+	int from_inx, to_inx, new_inx;
+	bitstr_t **new_gres_bit_alloc, **new_gres_bit_step_alloc;
+	uint32_t *new_gres_cnt_step_alloc;
+
+	(void) gres_plugin_init();
+	new_node_cnt = bit_set_count(from_job_node_bitmap) +
+		       bit_set_count(to_job_node_bitmap) -
+		       bit_overlap(from_job_node_bitmap, to_job_node_bitmap);
+	i_first = MIN(bit_ffs(from_job_node_bitmap),
+		      bit_ffs(to_job_node_bitmap));
+	i_first = MAX(i_first, 0);
+	i_last  = MAX(bit_fls(from_job_node_bitmap),
+		      bit_fls(to_job_node_bitmap));
+	if (i_last == -1) {
+		error("gres_plugin_job_merge: node_bitmaps are empty");
+		return;
+	}
+
+	slurm_mutex_lock(&gres_context_lock);
+
+	/* Step one - Expand the gres data structures in "to" job */
+	if (!to_job_gres_list)
+		goto step2;
+	gres_iter = list_iterator_create(to_job_gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
+	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
+		gres_job_ptr = (gres_job_state_t *) gres_ptr->gres_data;
+		new_gres_bit_alloc = xmalloc(sizeof(bitstr_t *) *
+					     new_node_cnt);
+		new_gres_bit_step_alloc = xmalloc(sizeof(bitstr_t *) *
+						  new_node_cnt);
+		new_gres_cnt_step_alloc = xmalloc(sizeof(uint32_t) *
+						  new_node_cnt);
+		if (!new_gres_bit_alloc || !new_gres_bit_step_alloc ||
+		    !new_gres_cnt_step_alloc)
+			fatal("malloc failure");
+
+		from_inx = to_inx = new_inx = -1;
+		for (i = i_first; i <= i_last; i++) {
+			bool from_match = false, to_match = false;
+			if (bit_test(to_job_node_bitmap, i)) {
+				to_match = true;
+				to_inx++;
+			}
+			if (bit_test(from_job_node_bitmap, i)) {
+				from_match = true;
+				from_inx++;
+			}
+			if (from_match || to_match)
+				new_inx++;
+			if (to_match) {
+				if (gres_job_ptr->gres_bit_alloc) {
+					new_gres_bit_alloc[new_inx] =
+						gres_job_ptr->
+						gres_bit_alloc[to_inx];
+				}
+				if (gres_job_ptr->gres_bit_step_alloc) {
+					new_gres_bit_step_alloc[new_inx] =
+						gres_job_ptr->
+						gres_bit_step_alloc[to_inx];
+				}
+				if (gres_job_ptr->gres_cnt_step_alloc) {
+					new_gres_cnt_step_alloc[new_inx] =
+						gres_job_ptr->
+						gres_cnt_step_alloc[to_inx];
+				}
+			}
+		}
+		gres_job_ptr->node_cnt = new_node_cnt;
+		xfree(gres_job_ptr->gres_bit_alloc);
+		gres_job_ptr->gres_bit_alloc = new_gres_bit_alloc;
+		xfree(gres_job_ptr->gres_bit_step_alloc);
+		gres_job_ptr->gres_bit_step_alloc = new_gres_bit_step_alloc;
+		xfree(gres_job_ptr->gres_cnt_step_alloc);
+		gres_job_ptr->gres_cnt_step_alloc = new_gres_cnt_step_alloc;
+	}
+	list_iterator_destroy(gres_iter);
+
+	/* Step two - Merge the gres information from the "from" job into the
+	 * existing gres information for the "to" job */
+step2:	if (!from_job_gres_list)
+		goto step3;
+	if (!to_job_gres_list) {
+		to_job_gres_list = list_create(_gres_job_list_delete);
+		if (!to_job_gres_list)
+			fatal("list_create: malloc failure");
+	}
+	gres_iter = list_iterator_create(from_job_gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
+	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
+		gres_job_ptr = (gres_job_state_t *) gres_ptr->gres_data;
+		gres_ptr2 = list_find_first(to_job_gres_list, _gres_find_id,
+					    &gres_ptr->plugin_id);
+		if (gres_ptr2) {
+			gres_job_ptr2 = gres_ptr2->gres_data;
+		} else {
+			gres_ptr2 = xmalloc(sizeof(gres_state_t));
+			gres_job_ptr2 = xmalloc(sizeof(gres_job_state_t));
+			gres_ptr2->plugin_id = gres_ptr->plugin_id;
+			gres_ptr2->gres_data = gres_job_ptr2;
+			gres_job_ptr2->gres_cnt_alloc = gres_job_ptr->
+							gres_cnt_alloc;
+			gres_job_ptr2->node_cnt = new_node_cnt;
+			gres_job_ptr2->gres_bit_alloc = 
+				xmalloc(sizeof(bitstr_t *) * new_node_cnt);
+			gres_job_ptr2->gres_bit_step_alloc = 
+				xmalloc(sizeof(bitstr_t *) * new_node_cnt);
+			gres_job_ptr2->gres_cnt_step_alloc = 
+				xmalloc(sizeof(uint32_t) * new_node_cnt);
+			list_append(to_job_gres_list, gres_ptr2);
+		}
+		from_inx = to_inx = new_inx = -1;
+		for (i = i_first; i <= i_last; i++) {
+			bool from_match = false, to_match = false;
+			if (bit_test(to_job_node_bitmap, i)) {
+				to_match = true;
+				to_inx++;
+			}
+			if (bit_test(from_job_node_bitmap, i)) {
+				from_match = true;
+				from_inx++;
+			}
+			if (from_match || to_match)
+				new_inx++;
+			if (from_match) {
+				if (!gres_job_ptr->gres_bit_alloc) {
+					;
+				} else if (gres_job_ptr2->
+					   gres_bit_alloc[new_inx]) {
+					/* Do not merge GRES allocations on
+					 * a node, just keep original job's */
+#if 0
+					bit_or(gres_job_ptr2->
+					       gres_bit_alloc[new_inx],
+					       gres_job_ptr->
+					       gres_bit_alloc[from_inx]);
+#endif
+				} else {
+					gres_job_ptr2->gres_bit_alloc[new_inx] =
+						gres_job_ptr->
+						gres_bit_alloc[from_inx];
+					gres_job_ptr->
+						gres_bit_alloc
+						[from_inx] = NULL;
+				}
+				if (gres_job_ptr->gres_cnt_step_alloc &&
+				    gres_job_ptr->
+				    gres_cnt_step_alloc[from_inx]) {
+					error("Attempt to merge gres, from "
+					      "job has active steps");
+				}
+			}
+		}
+	}
+	list_iterator_destroy(gres_iter);
+
+step3:	slurm_mutex_unlock(&gres_context_lock);
+	return;
+}
+
 /*
  * Set environment variables as required for a batch job
  * IN/OUT job_env_ptr - environment variable array
@@ -2956,6 +3271,40 @@ static void _job_state_log(void *gres_data, uint32_t job_id, char *gres_name)
 	}
 }
 
+/*
+ * Extract from the job record's gres_list the count of allocated resources of
+ * 	the named gres gres typee.
+ * IN job_gres_list  - job record's gres_list.
+ * IN gres_name_type - the name of the gres type to retrieve the associated
+ *	value from.
+ * RET The value associated with the gres type or NO_VAL if not found.
+ */
+extern uint32_t gres_plugin_get_job_value_by_type(List job_gres_list,
+						  char *gres_name_type)
+{
+	uint32_t gres_val, gres_name_type_id;
+	ListIterator  job_gres_iter;
+	gres_state_t *job_gres_ptr;
+
+	if (job_gres_list == NULL)
+		return NO_VAL;
+
+	gres_name_type_id = _build_id(gres_name_type);
+	gres_val = NO_VAL;
+
+	job_gres_iter = list_iterator_create(job_gres_list);
+	while ((job_gres_ptr = (gres_state_t *) list_next(job_gres_iter))) {
+		if (job_gres_ptr->plugin_id == gres_name_type_id) {
+			gres_val = ((gres_job_state_t*)
+				   (job_gres_ptr->gres_data))->gres_cnt_alloc;
+			break;
+		}
+	}
+	list_iterator_destroy(job_gres_iter);
+
+	return gres_val;
+}
+
 /*
  * Log a job's current gres state
  * IN gres_list - generated by gres_plugin_job_state_validate()
@@ -2988,6 +3337,61 @@ extern void gres_plugin_job_state_log(List gres_list, uint32_t job_id)
 	slurm_mutex_unlock(&gres_context_lock);
 }
 
+extern void gres_plugin_job_state_file(List gres_list, int *gres_bit_alloc,
+				       int *gres_count)
+{
+	int i, j, gres_cnt=0, len, p, found=0;
+	ListIterator gres_iter;
+	gres_state_t *gres_ptr;
+	gres_job_state_t *gres_job_ptr;
+
+	if (gres_list == NULL)
+		return;
+	(void) gres_plugin_init();
+
+	slurm_mutex_lock(&gres_context_lock);
+	gres_iter = list_iterator_create(gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
+	
+	for (j=0; j<gres_context_cnt; j++) {
+		found = 0;
+		list_iterator_reset(gres_iter);
+		while ((gres_ptr = (gres_state_t *) list_next(gres_iter))){
+			if (gres_ptr->plugin_id !=
+			    gres_context[j].plugin_id ) {
+				continue;
+			}
+			found = 1;
+			gres_job_ptr = (gres_job_state_t *) gres_ptr->gres_data;
+			if ((gres_job_ptr != NULL) &&
+			    (gres_job_ptr->node_cnt == 1) &&
+			    (gres_job_ptr->gres_bit_alloc != NULL) &&
+			    (gres_job_ptr->gres_bit_alloc[0] != NULL)) {
+			     	len = bit_size(gres_job_ptr->gres_bit_alloc[0]);
+				for (i=0; i<len; i++) {
+					if (!bit_test(gres_job_ptr->
+						      gres_bit_alloc[0], i))
+						gres_bit_alloc[gres_cnt] = 0;
+					else
+						gres_bit_alloc[gres_cnt] = 1;
+					gres_cnt++;
+				}
+			}
+			break;
+		}
+		if (found == 0) {
+			for (p=0; p<gres_count[j]; p++){
+				gres_bit_alloc[gres_cnt] = 0;
+				gres_cnt++;
+			}
+		}
+	}
+	list_iterator_destroy(gres_iter);
+	slurm_mutex_unlock(&gres_context_lock);
+}
+
+
 static void _step_state_delete(void *gres_data)
 {
 	int i;
@@ -3280,6 +3684,8 @@ List gres_plugin_step_state_extract(List gres_list, int node_index)
 
 	slurm_mutex_lock(&gres_context_lock);
 	gres_iter = list_iterator_create(gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
 	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
 		if (node_index == -1)
 			new_gres_data = _step_state_dup(gres_ptr->gres_data);
@@ -3303,6 +3709,104 @@ List gres_plugin_step_state_extract(List gres_list, int node_index)
 	return new_gres_list;
 }
 
+/*
+ * A job allocation size has changed. Update the job step gres information
+ * bitmaps and other data structures.
+ * IN gres_list - List of Gres records for this step to track usage
+ * IN orig_job_node_bitmap - bitmap of nodes in the original job allocation
+ * IN new_job_node_bitmap  - bitmap of nodes in the new job allocation
+ */
+void gres_plugin_step_state_rebase(List gres_list,
+				   bitstr_t *orig_job_node_bitmap,
+				   bitstr_t *new_job_node_bitmap)
+{
+	ListIterator gres_iter;
+	gres_state_t *gres_ptr;
+	gres_step_state_t *gres_step_ptr;
+	int new_node_cnt;
+	int i_first, i_last, i;
+	int old_inx, new_inx;
+	bitstr_t *new_node_in_use;
+	bitstr_t **new_gres_bit_alloc = NULL;
+
+	if (gres_list == NULL)
+		return;
+
+	(void) gres_plugin_init();
+
+	slurm_mutex_lock(&gres_context_lock);
+	gres_iter = list_iterator_create(gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
+	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
+		gres_step_ptr = (gres_step_state_t *) gres_ptr->gres_data;
+		if (!gres_step_ptr)
+			continue;
+		if (!gres_step_ptr->node_in_use) {
+			error("gres_plugin_step_state_rebase: node_in_use is "
+			      "NULL");
+			continue;
+		}
+		new_node_cnt = bit_set_count(new_job_node_bitmap);
+		i_first = MIN(bit_ffs(orig_job_node_bitmap),
+			      bit_ffs(new_job_node_bitmap));
+		i_first = MAX(i_first, 0);
+		i_last  = MAX(bit_fls(orig_job_node_bitmap),
+			      bit_fls(new_job_node_bitmap));
+		if (i_last == -1) {
+			error("gres_plugin_step_state_rebase: node_bitmaps "
+			      "are empty");
+			continue;
+		}
+		new_node_in_use = bit_alloc(new_node_cnt);
+		if (!new_node_in_use)
+			fatal("bit_alloc: malloc failure");
+
+		old_inx = new_inx = -1;
+		for (i = i_first; i <= i_last; i++) {
+			bool old_match = false, new_match = false;
+			if (bit_test(orig_job_node_bitmap, i)) {
+				old_match = true;
+				old_inx++;
+			}
+			if (bit_test(new_job_node_bitmap, i)) {
+				new_match = true;
+				new_inx++;
+			}
+			if (old_match && new_match) {
+				bit_set(new_node_in_use, new_inx);
+				if (gres_step_ptr->gres_bit_alloc) {
+					if (!new_gres_bit_alloc) {
+						new_gres_bit_alloc =
+							xmalloc(
+							sizeof(bitstr_t *) *
+							new_node_cnt);
+					}
+					new_gres_bit_alloc[new_inx] =
+						gres_step_ptr->gres_bit_alloc[old_inx];
+				}
+			} else if (old_match &&
+				   gres_step_ptr->gres_bit_alloc &&
+				   gres_step_ptr->gres_bit_alloc[old_inx]) {
+				/* Node removed from job allocation,
+				 * release step's resources */
+				bit_free(gres_step_ptr->
+					 gres_bit_alloc[old_inx]);
+			}
+		}
+
+		gres_step_ptr->node_cnt = new_node_cnt;
+		bit_free(gres_step_ptr->node_in_use);
+		gres_step_ptr->node_in_use = new_node_in_use;
+		xfree(gres_step_ptr->gres_bit_alloc);
+		gres_step_ptr->gres_bit_alloc = new_gres_bit_alloc;
+	}
+	list_iterator_destroy(gres_iter);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	return;
+}
+
 /*
  * Pack a step's current gres status, called from slurmctld for save/restore
  * IN gres_list - generated by gres_plugin_step_allocate()
@@ -3603,7 +4107,12 @@ extern uint32_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
 					     node_offset, ignore_alloc,
 					     gres_context[i].gres_name,
 					     job_id, step_id);
-			cpu_cnt = MIN(tmp_cnt, cpu_cnt);
+			if (tmp_cnt != NO_VAL) {
+				if (cpu_cnt == NO_VAL)
+					cpu_cnt = tmp_cnt;
+				else
+					cpu_cnt = MIN(tmp_cnt, cpu_cnt);
+			}
 			break;
 		}
 		if (cpu_cnt == 0)
@@ -3926,3 +4435,59 @@ extern int gres_plugin_step_dealloc(List step_gres_list, List job_gres_list,
 
 	return rc;
 }
+
+extern void gres_plugin_step_state_file(List gres_list, int *gres_bit_alloc, 
+					int *gres_count)
+{
+	int i, j, p, gres_cnt = 0, len, found;
+	ListIterator gres_iter;
+	gres_state_t *gres_ptr;
+	gres_step_state_t *gres_step_ptr;
+
+	if (gres_list == NULL)
+		return;
+	(void) gres_plugin_init();
+
+	slurm_mutex_lock(&gres_context_lock);
+	gres_iter = list_iterator_create(gres_list);
+	if (!gres_iter)
+		fatal("list_iterator_create: malloc failure");
+
+	for (j=0; j<gres_context_cnt; j++) {
+		found = 0;
+		list_iterator_reset(gres_iter);
+		while ((gres_ptr = (gres_state_t *) list_next(gres_iter))){
+			if (gres_ptr->plugin_id !=
+			    gres_context[j].plugin_id) {
+				continue;
+			}
+			found = 1;
+			gres_step_ptr = (gres_step_state_t *) gres_ptr->gres_data;
+			if ((gres_step_ptr != NULL) &&
+			    (gres_step_ptr->node_cnt == 1) &&
+			    (gres_step_ptr->gres_bit_alloc != NULL) &&
+			    (gres_step_ptr->gres_bit_alloc[0] != NULL)) {
+				len = bit_size(gres_step_ptr->gres_bit_alloc[0]);
+				for (i=0; i<len; i++) {
+					 if (!bit_test(gres_step_ptr->
+						       gres_bit_alloc[0], i))
+						 gres_bit_alloc[gres_cnt] = 0;
+					 else
+						 gres_bit_alloc[gres_cnt] = 1;
+					gres_cnt++;
+				}
+			}
+			break;
+		}
+		if (found == 0) {
+			for (p=0; p<gres_count[j]; p++){
+				gres_bit_alloc[gres_cnt] = 0;
+				gres_cnt++;
+			}
+		}
+	}
+
+	list_iterator_destroy(gres_iter);
+	slurm_mutex_unlock(&gres_context_lock);
+}
+
diff --git a/src/common/gres.h b/src/common/gres.h
index ec9b2ac64..fd53b2649 100644
--- a/src/common/gres.h
+++ b/src/common/gres.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,7 @@
 #ifndef _GRES_H
 #define _GRES_H
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 #include "src/common/bitstring.h"
 #include "src/common/pack.h"
 
@@ -91,7 +91,7 @@ typedef struct gres_node_state {
 
 /* Gres job state as used by slurmctld daemon */
 typedef struct gres_job_state {
-	/* Count of resources needed */
+	/* Count of resources needed per node */
 	uint32_t gres_cnt_alloc;
 
 	/* Resources currently allocated to job on each node */
@@ -107,7 +107,7 @@ typedef struct gres_job_state {
 
 /* Gres job step state as used by slurmctld daemon */
 typedef struct gres_step_state {
-	/* Count of resources needed */
+	/* Count of resources needed per node */
 	uint32_t gres_cnt_alloc;
 
 	/* Resources currently allocated to the job step on each node
@@ -171,6 +171,35 @@ extern int gres_plugin_node_config_load(uint32_t cpu_cnt);
  */
 extern int gres_plugin_node_config_pack(Buf buffer);
 
+/*
+ * Return information about the configured gres devices on the node
+ * OUT dev_path - the devices paths as written on gres.conf file
+ * OUT gres_name - the names of the devices (ex. gpu, nic,..)
+ * IN array_len - count of elements in dev_path and gres_name
+ * OUT int - number of lines of gres.conf file
+ */
+extern int gres_plugin_node_config_devices_path(char **dev_path,
+						char **gres_name,
+						int array_len);
+
+/*
+ * Provide information about the allocate gres devices for a particular job
+ * IN gres_list - jobs allocated gres devices
+ * IN gres_count - count of gres.conf records for each gres name
+ * OUT gres_bit_alloc - the exact devices which are allocated
+ */
+extern void gres_plugin_job_state_file(List gres_list, int *gres_bit_alloc,
+				       int *gres_count);
+
+/*
+ * Provide information about the allocate gres devices for a particular step
+ * IN gres_list - jobs allocated gres devices
+ * IN gres_count - count of gres.conf records for each gres name
+ * OUT gres_bit_alloc - the exact devices which are allocated
+ */
+extern void gres_plugin_step_state_file(List gres_list, int *gres_bit_alloc,
+					int *gres_count);
+
 /*
  **************************************************************************
  *                 PLUGIN CALLS FOR SLURMCTLD DAEMON                      *
@@ -370,6 +399,21 @@ extern int gres_plugin_job_dealloc(List job_gres_list, List node_gres_list,
 				   int node_offset, uint32_t job_id,
 				   char *node_name);
 
+/*
+ * Merge one job's gres allocation into another job's gres allocation.
+ * IN from_job_gres_list - List of gres records for the job being merged
+ *			into another job
+ * IN from_job_node_bitmap - bitmap of nodes for the job being merged into
+ *			another job
+ * IN/OUT to_job_gres_list - List of gres records for the job being merged
+ *			into job
+ * IN to_job_node_bitmap - bitmap of nodes for the job being merged into
+ */
+extern void gres_plugin_job_merge(List from_job_gres_list,
+				  bitstr_t *from_job_node_bitmap,
+				  List to_job_gres_list,
+				  bitstr_t *to_job_node_bitmap);
+
 /*
  * Set environment variables as required for a batch job
  * IN/OUT job_env_ptr - environment variable array
@@ -377,6 +421,18 @@ extern int gres_plugin_job_dealloc(List job_gres_list, List node_gres_list,
   */
 extern void gres_plugin_job_set_env(char ***job_env_ptr, List job_gres_list);
 
+
+/*
+ * Extract from the job record's gres_list the count of allocated resources of
+ * 	the named gres gres typee.
+ * IN job_gres_list  - job record's gres_list.
+ * IN gres_name_type - the name of the gres type to retrieve the associated
+ *	value from.
+ * RET The value associated with the gres type or NO_VAL if not found.
+ */
+extern uint32_t gres_plugin_get_job_value_by_type(List job_gres_list,
+						  char *gres_name_type);
+
 /*
  * Log a job's current gres state
  * IN gres_list - generated by gres_plugin_job_state_validate()
@@ -412,6 +468,16 @@ List gres_plugin_step_state_dup(List gres_list);
  */
 List gres_plugin_step_state_extract(List gres_list, int node_index);
 
+/*
+ * A job allocation size has changed. Update the job step gres information
+ * bitmaps and other data structures.
+ * IN gres_list - List of Gres records for this step to track usage
+ * IN orig_job_node_bitmap - bitmap of nodes in the original job allocation
+ * IN new_job_node_bitmap - bitmap of nodes in the new job allocation
+ */
+void gres_plugin_step_state_rebase(List gres_list,
+				   bitstr_t *orig_job_node_bitmap,
+				   bitstr_t *new_job_node_bitmap);
 
 /*
  * Pack a step's current gres status, called from slurmctld for save/restore
@@ -486,5 +552,4 @@ extern int gres_plugin_step_alloc(List step_gres_list, List job_gres_list,
  */
 extern int gres_plugin_step_dealloc(List step_gres_list, List job_gres_list,
 				    uint32_t job_id, uint32_t step_id);
-
 #endif /* !_GRES_H */
diff --git a/src/common/hostlist.c b/src/common/hostlist.c
index c624db7ee..d02bdd6be 100644
--- a/src/common/hostlist.c
+++ b/src/common/hostlist.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -67,6 +67,7 @@
 #include <ctype.h>
 #include <sys/param.h>
 #include <unistd.h>
+#include <slurm/slurmdb.h>
 
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
@@ -80,15 +81,20 @@
  * Define slurm-specific aliases for use by plugins, see slurm_xlator.h
  * for details.
  */
+strong_alias(hostlist_create_dims,	slurm_hostlist_create_dims);
 strong_alias(hostlist_create,		slurm_hostlist_create);
 strong_alias(hostlist_copy,		slurm_hostlist_copy);
 strong_alias(hostlist_count,		slurm_hostlist_count);
 strong_alias(hostlist_delete,		slurm_hostlist_delete);
 strong_alias(hostlist_delete_host,	slurm_hostlist_delete_host);
 strong_alias(hostlist_delete_nth,	slurm_hostlist_delete_nth);
+strong_alias(hostlist_deranged_string_dims,
+	                                slurm_hostlist_deranged_string_dims);
 strong_alias(hostlist_deranged_string,	slurm_hostlist_deranged_string);
 strong_alias(hostlist_deranged_string_malloc,
 					slurm_hostlist_deranged_string_malloc);
+strong_alias(hostlist_deranged_string_xmalloc_dims,
+	     slurm_hostlist_deranged_string_xmalloc_dims);
 strong_alias(hostlist_deranged_string_xmalloc,
 					slurm_hostlist_deranged_string_xmalloc);
 strong_alias(hostlist_destroy,		slurm_hostlist_destroy);
@@ -102,11 +108,16 @@ strong_alias(hostlist_nth,		slurm_hostlist_nth);
 strong_alias(hostlist_pop,		slurm_hostlist_pop);
 strong_alias(hostlist_pop_range,	slurm_hostlist_pop_range);
 strong_alias(hostlist_push,		slurm_hostlist_push);
+strong_alias(hostlist_push_host_dims,	slurm_hostlist_push_host_dims);
 strong_alias(hostlist_push_host,	slurm_hostlist_push_host);
 strong_alias(hostlist_push_list,	slurm_hostlist_push_list);
+strong_alias(hostlist_ranged_string_dims,
+	                                slurm_hostlist_ranged_string_dims);
 strong_alias(hostlist_ranged_string,	slurm_hostlist_ranged_string);
 strong_alias(hostlist_ranged_string_malloc,
 					slurm_hostlist_ranged_string_malloc);
+strong_alias(hostlist_ranged_string_xmalloc_dims,
+	     slurm_hostlist_ranged_string_xmalloc_dims);
 strong_alias(hostlist_ranged_string_xmalloc,
 					slurm_hostlist_ranged_string_xmalloc);
 strong_alias(hostlist_remove,		slurm_hostlist_remove);
@@ -174,10 +185,10 @@ extern void * lsd_nomem_error(char *file, int line, char *mesg);
 #define HOSTLIST_CHUNK    16
 
 /* max host range: anything larger will be assumed to be an error */
-#define MAX_RANGE    16384    /* 16K Hosts */
+#define MAX_RANGE    (64*1024)    /* 64K Hosts */
 
 /* max number of ranges that will be processed between brackets */
-#define MAX_RANGES    12288    /* 12K Ranges */
+#define MAX_RANGES   (12*1024)    /* 12K Ranges */
 
 /* size of internal hostname buffer (+ some slop), hostnames will probably
  * be truncated if longer than MAXHOSTNAMELEN */
@@ -279,7 +290,6 @@ struct _range {
 
 /* Multi-dimension system stuff here */
 char *alpha_num = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-enum {A, B, C, D};
 
 /* logic for block node description */
 
@@ -288,7 +298,7 @@ enum {A, B, C, D};
  * the maximum sized array for each dimension.  This way we can be
  * prepared for any size coming in.
  */
-static bool grid[HIGHEST_BASE*HIGHEST_BASE*HIGHEST_BASE*HIGHEST_BASE];
+static bool grid[HIGHEST_BASE*HIGHEST_BASE*HIGHEST_BASE*HIGHEST_BASE*HIGHEST_BASE];
 
 static int grid_start[HIGHEST_DIMENSIONS];
 static int grid_end[HIGHEST_DIMENSIONS];
@@ -302,31 +312,30 @@ static int _tell_if_used(int dim, int curr,
 			 int *start,
 			 int *end,
 			 int *last,
-			 int *found);
-static int _get_next_box(int *start,
-			 int *end);
-static int _get_boxes(char *buf, int max_len);
+			 int *found, int dims);
+static int _get_next_box(int *start, int *end, int dims);
+static int _get_boxes(char *buf, int max_len, int dims, int brackets);
 static void _set_box_in_grid(int dim, int curr,
 			     int *start,
 			     int *end,
-			     bool value);
+			     bool value, int dims);
 static int _add_box_ranges(int dim,  int curr,
 			   int *start,
 			   int *end,
 			   int *pos,
 			   struct _range *ranges,
-			   int len, int *count);
+			   int len, int *count, int dims);
 static void _set_min_max_of_grid(int dim, int curr,
 				 int *start,
 				 int *end,
 				 int *min,
 				 int *max,
-				 int *pos);
-static void _set_grid(unsigned long start, unsigned long end);
+				 int *pos, int dims);
+static void _set_grid(unsigned long start, unsigned long end, int dims);
 static bool _test_box_in_grid(int dim, int curr,
 			      int *start,
-			      int *end);
-static bool _test_box(int *start, int *end);
+			      int *end, int dims);
+static bool _test_box(int *start, int *end, int dims);
 
 /* ------[ static function prototypes ]------ */
 
@@ -336,7 +345,7 @@ static char * _next_tok(char *, char **);
 static int    _zero_padded(unsigned long, int);
 static int    _width_equiv(unsigned long, int *, unsigned long, int *);
 
-static int           host_prefix_end(const char *);
+static int           host_prefix_end(const char *, int dims);
 static hostname_t    hostname_create(const char *);
 static void          hostname_destroy(hostname_t);
 static int           hostname_suffix_is_valid(hostname_t);
@@ -361,11 +370,12 @@ static int           hostrange_join(hostrange_t, hostrange_t);
 static hostrange_t   hostrange_intersect(hostrange_t, hostrange_t);
 static int           hostrange_hn_within(hostrange_t, hostname_t);
 static size_t        hostrange_to_string(hostrange_t hr, size_t, char *,
-					 char *);
-static size_t        hostrange_numstr(hostrange_t, size_t, char *);
+					 char *, int);
+static size_t        hostrange_numstr(hostrange_t, size_t, char *, int);
 
 static hostlist_t  hostlist_new(void);
-static hostlist_t _hostlist_create_bracketed(const char *, char *, char *);
+static hostlist_t _hostlist_create_bracketed(const char *, char *,
+					     char *, int);
 static int         hostlist_resize(hostlist_t, size_t);
 static int         hostlist_expand(hostlist_t);
 static int         hostlist_push_range(hostlist_t, hostrange_t);
@@ -375,7 +385,7 @@ static int         hostlist_insert_range(hostlist_t, hostrange_t, int);
 static void        hostlist_delete_range(hostlist_t, int n);
 static void        hostlist_coalesce(hostlist_t hl);
 static void        hostlist_collapse(hostlist_t hl);
-static hostlist_t _hostlist_create(const char *, char *, char *);
+static hostlist_t _hostlist_create(const char *, char *, char *, int);
 static void        hostlist_shift_iterators(hostlist_t, int, int, int);
 static int        _attempt_range_join(hostlist_t, int);
 static int        _is_bracket_needed(hostlist_t, int);
@@ -593,54 +603,47 @@ static int _width_equiv(unsigned long n, int *wn, unsigned long m, int *wm)
 /*
  * return the location of the last char in the hostname prefix
  */
-static int host_prefix_end(const char *hostname)
+static int host_prefix_end(const char *hostname, int dims)
 {
-	int idx, len;
-	int dims = slurmdb_setup_cluster_dims();
+	int idx;
 
 	assert(hostname != NULL);
 
-	len = strlen(hostname);
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
 
-	if(dims > 1) {
-		idx = len - 1;
+	idx = strlen(hostname) - 1;
 
-		while (idx >= 0) {
-			if (((hostname[idx] >= '0')
-			     && (hostname[idx] <= '9')) ||
-			    ((hostname[idx] >= 'A') && (hostname[idx] <= 'Z')))
-				idx--;
-			else
-				break;
-		}
+	if (dims > 1) {
+		while ((idx >= 0) &&
+		       (isdigit((int)hostname[idx]) ||
+		        isupper((int)hostname[idx])))
+			idx--;
 	} else {
-		if (len < 1)
-			return -1;
-		idx = len - 1;
-
-		while (idx >= 0 && isdigit((char) hostname[idx]))
+		while ((idx >= 0) && isdigit((int)hostname[idx]))
 			idx--;
 	}
 
 	return idx;
 }
 
-/*
- * create a hostname_t object from a string hostname
- */
-static hostname_t hostname_create(const char *hostname)
+static hostname_t hostname_create_dims(const char *hostname, int dims)
 {
 	hostname_t hn = NULL;
 	char *p = '\0';
 	int idx = 0;
-	int hostlist_base = hostlist_get_base();
-	int dims = slurmdb_setup_cluster_dims();
+	int hostlist_base;
+
 	assert(hostname != NULL);
 
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+	hostlist_base = hostlist_get_base(dims);
+
 	if (!(hn = (hostname_t) malloc(sizeof(*hn))))
   		out_of_memory("hostname create");
 
-	idx = host_prefix_end(hostname);
+	idx = host_prefix_end(hostname, dims);
 
 	if (!(hn->hostname = strdup(hostname))) {
 		free(hn);
@@ -682,6 +685,15 @@ static hostname_t hostname_create(const char *hostname)
 
 	return hn;
 }
+/*
+ * create a hostname_t object from a string hostname
+ */
+static hostname_t hostname_create(const char *hostname)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+
+	return hostname_create_dims(hostname, dims);
+}
 
 /* free a hostname object
  */
@@ -942,13 +954,15 @@ static char *hostrange_pop(hostrange_t hr)
 {
 	size_t size = 0;
 	char *host = NULL;
-	int dims = slurmdb_setup_cluster_dims();
+	int dims = slurmdb_setup_cluster_name_dims();
 
 	assert(hr != NULL);
 
 	if (hr->singlehost) {
 		hr->lo++;    /* effectively set count == 0 */
 		host = strdup(hr->prefix);
+		if (host == NULL)
+			out_of_memory("hostrange pop");
 	} else if (hostrange_count(hr) > 0) {
 		size = strlen(hr->prefix) + hr->width + 16;
 		if (!(host = (char *) malloc(size * sizeof(char))))
@@ -961,12 +975,12 @@ static char *hostrange_pop(hostrange_t hr)
 			hostlist_parse_int_to_array(hr->hi, coord, dims, 0);
 
 			len = snprintf(host, size, "%s", hr->prefix);
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len <= size)
-					host[len++] = alpha_num[coord[i2]];
+			if (len >= 0 && len + dims < size) {
+				while (i2 < dims)
+					host[len++] = alpha_num[coord[i2++]];
+				host[len] = '\0';
 			}
 			hr->hi--;
-			host[len] = '\0';
 		} else {
 			snprintf(host, size, "%s%0*lu", hr->prefix,
 				 hr->width, hr->hi--);
@@ -981,7 +995,7 @@ static char *hostrange_shift(hostrange_t hr)
 {
 	size_t size = 0;
 	char *host = NULL;
-	int dims = slurmdb_setup_cluster_dims();
+	int dims = slurmdb_setup_cluster_name_dims();
 
 	assert(hr != NULL);
 
@@ -1001,12 +1015,12 @@ static char *hostrange_shift(hostrange_t hr)
 			hostlist_parse_int_to_array(hr->lo, coord, dims, 0);
 
 			len = snprintf(host, size, "%s", hr->prefix);
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len <= size)
-					host[len++] = alpha_num[coord[i2]];
+			if (len >= 0 && len + dims < size) {
+				while (i2 < dims)
+					host[len++] = alpha_num[coord[i2++]];
+				host[len] = '\0';
 			}
 			hr->lo++;
-			host[len] = '\0';
 		} else {
 			snprintf(host, size, "%s%0*lu", hr->prefix,
 				 hr->width, hr->lo++);
@@ -1143,73 +1157,78 @@ static int hostrange_hn_within(hostrange_t hr, hostname_t hn)
  * writing at most n chars including NUL termination
  */
 static size_t
-hostrange_to_string(hostrange_t hr, size_t n, char *buf, char *separator)
+hostrange_to_string(hostrange_t hr, size_t n, char *buf,
+		    char *separator, int dims)
 {
 	unsigned long i;
-	int truncated = 0;
-	int len = 0;
+	int ret, len = 0;
 	char sep = separator == NULL ? ',' : separator[0];
-	int dims = slurmdb_setup_cluster_dims();
+
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
 
 	if (n == 0)
 		return 0;
 
 	assert(hr != NULL);
 
-	if (hr->singlehost)
-		return snprintf(buf, n, "%s", hr->prefix);
+	if (hr->singlehost) {
+		ret = snprintf(buf, n, "%s", hr->prefix);
+		if (ret < 0 || ret >= n)
+			goto truncated;
+		return ret;
+	}
 
 	for (i = hr->lo; i <= hr->hi; i++) {
-		size_t m = (n - len) <= n ? n - len : 0; /* check for < 0 */
-		int ret = 0;
+		if (i > hr->lo)
+			buf[len++] = sep;
+		if (len >= n)
+			goto truncated;
+
 		if ((dims > 1) && (hr->width == dims)) {
 			int i2 = 0;
 			int coord[dims];
 
 			hostlist_parse_int_to_array(i, coord, dims, 0);
-			ret = snprintf(buf + len, m, "%s", hr->prefix);
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len + ret < n)
-					buf[len+ret] = alpha_num[coord[i2]];
-				ret++;
-			}
+			ret = snprintf(buf + len, n - len, "%s", hr->prefix);
+			if (ret < 0 || (len += ret) >= n || len + dims >= n)
+				goto truncated;
+			while (i2 < dims)
+				buf[len++] = alpha_num[coord[i2++]];
 		} else {
-			ret = snprintf(buf + len, m, "%s%0*lu",
+			ret = snprintf(buf + len, n - len, "%s%0*lu",
 				       hr->prefix, hr->width, i);
+			if (ret < 0 || (len += ret) >= n)
+				goto truncated;
 		}
-
-		if (ret < 0 || ret >= m) {
-			len = n;
-			truncated = 1;
-			break;
-		}
-		len+=ret;
-		buf[len++] = sep;
 	}
 
-	if (truncated) {
-		buf[n-1] = '\0';
-		return -1;
-	} else {
-		/* back up over final separator */
-		buf[--len] = '\0';
-		return len;
-	}
+	buf[len] = '\0';
+	return len;
+truncated:
+	buf[n-1] = '\0';
+	return -1;
 }
 
 /* Place the string representation of the numeric part of hostrange into buf
- * writing at most n chars including NUL termination.
+ * writing at most n chars including NUL termination. The width argument
+ * controls the number of leading zeroes.
  */
-static size_t hostrange_numstr(hostrange_t hr, size_t n, char *buf)
+static size_t hostrange_numstr(hostrange_t hr, size_t n, char *buf, int width)
 {
 	int len = 0;
-	int dims = slurmdb_setup_cluster_dims();
+	int dims = slurmdb_setup_cluster_name_dims();
 
 	assert(buf != NULL);
 	assert(hr != NULL);
 
 	if (hr->singlehost || n == 0)
 		return 0;
+	if (n <= dims)
+		return -1;
+
+	if (width < 0 || width > hr->width)
+		width = hr->width;
 
 	if ((dims > 1) && (hr->width == dims)) {
 		int i2 = 0;
@@ -1217,16 +1236,18 @@ static size_t hostrange_numstr(hostrange_t hr, size_t n, char *buf)
 
 		hostlist_parse_int_to_array(hr->lo, coord, dims, 0);
 
-		for (i2 = 0; i2 < dims; i2++) {
-			if (len <= n)
-				buf[len++] = alpha_num[coord[i2]];
-		}
+		while (i2 < dims)
+			buf[len++] = alpha_num[coord[i2++]];
 		buf[len] = '\0';
 	} else {
-		len = snprintf(buf, n, "%0*lu", hr->width, hr->lo);
+		len = snprintf(buf, n, "%0*lu", hr->width - width, hr->lo);
+		if (len < 0 || len >= n)
+			return -1;
 	}
 
-	if ((len >= 0) && (len < n) && (hr->lo < hr->hi)) {
+	if (hr->lo < hr->hi) {
+		if (n < len + dims + 2)	/* '-' plus 'dims' digits, plus '\0' */
+			return -1;
 		if ((dims > 1) && (hr->width == dims)) {
 			int i2 = 0;
 			int coord[dims];
@@ -1234,18 +1255,14 @@ static size_t hostrange_numstr(hostrange_t hr, size_t n, char *buf)
 			hostlist_parse_int_to_array(hr->hi, coord, dims, 0);
 
 			buf[len++] = '-';
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len <= n)
-					buf[len++] = alpha_num[coord[i2]];
-			}
+			while (i2 < dims)
+				buf[len++] = alpha_num[coord[i2++]];
 			buf[len] = '\0';
 		} else {
 			int len2 = snprintf(buf + len, n - len, "-%0*lu",
-					    hr->width, hr->hi);
-			if (len2 < 0)
-				len = -1;
-			else
-				len += len2;
+					    hr->width - width, hr->hi);
+			if (len2 < 0 || (len += len2) >= n)
+				return -1;
 		}
 	}
 
@@ -1285,7 +1302,7 @@ static hostlist_t hostlist_new(void)
 fail2:
 	free(new);
 fail1:
-	out_of_memory("hostlist_create");
+	out_of_memory("hostlist_new");
 }
 
 
@@ -1451,7 +1468,8 @@ static void hostlist_delete_range(hostlist_t hl, int n)
  * See comment in hostlist.h:hostlist_create() for more info on
  * the different choices for hostlist notation.
  */
-hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
+hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op,
+			    int dims)
 {
 	char *str, *orig;
 	char *tok, *cur;
@@ -1459,7 +1477,7 @@ hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
 	char prefix[256] = "";
 	int pos = 0;
 	int error = 0;
-	int hostlist_base = hostlist_get_base();
+	int hostlist_base;
 	char range_op = r_op[0];/* XXX support > 1 char range ops in future? */
 
 	hostlist_t new = hostlist_new();
@@ -1467,9 +1485,10 @@ hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
 	if (hostlist == NULL)
 		return new;
 
-	if(slurmdb_setup_cluster_dims() > 1)
+	if (dims > 1)
 		fatal("WANT_RECKLESS_HOSTRANGE_EXPANSION does not "
-		      "work on Multidimentional systems!!!!");
+		      "work on multi-dimensional systems!!!!");
+	hostlist_base = hostlist_get_base(1);
 
 	orig = str = strdup(hostlist);
 
@@ -1479,7 +1498,7 @@ hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
 
 	/* Use hostlist_create_bracketed if we see "[" */
 	if (strchr(str, '[') != NULL)
-		return _hostlist_create_bracketed(hostlist, sep, r_op);
+		return _hostlist_create_bracketed(hostlist, sep, r_op, dims);
 
 	while ((tok = _next_tok(sep, &str)) != NULL) {
 
@@ -1574,7 +1593,7 @@ hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
 			error = 1;
 
 		if (error) {    /* assume this is not a range on any error */
-			hostlist_push_host(new, cur);
+			hostlist_push_host_dims(new, cur, dims);
 		} else {
 			if (high < low)
 				high = low;
@@ -1593,9 +1612,10 @@ done:
 
 #else                /* !WANT_RECKLESS_HOSTRANGE_EXPANSION */
 
-hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
+hostlist_t _hostlist_create(const char *hostlist, char *sep,
+			    char *r_op, int dims)
 {
-	return _hostlist_create_bracketed(hostlist, sep, r_op);
+	return _hostlist_create_bracketed(hostlist, sep, r_op, dims);
 }
 
 #endif                /* WANT_RECKLESS_HOSTRANGE_EXPANSION */
@@ -1603,9 +1623,8 @@ hostlist_t _hostlist_create(const char *hostlist, char *sep, char *r_op)
 
 
 static int _parse_box_range(char *str, struct _range *ranges,
- 			    int len, int *count)
+ 			    int len, int *count, int dims)
 {
-	int dims = slurmdb_setup_cluster_dims();
 	int start[dims], end[dims],
 		pos[dims];
 	char coord[dims+1];
@@ -1645,19 +1664,18 @@ static int _parse_box_range(char *str, struct _range *ranges,
 	}
 /* 	info("adding ranges in %sx%s", coord, coord2); */
 
-	return _add_box_ranges(A, 0, start, end, pos, ranges, len, count);
+	return _add_box_ranges(0, 0, start, end, pos, ranges, len, count, dims);
 }
 
 /* Grab a single range from str
  * returns 1 if str contained a valid number or range,
  *         0 if conversion of str to a range failed.
  */
-static int _parse_single_range(const char *str, struct _range *range)
+static int _parse_single_range(const char *str, struct _range *range, int dims)
 {
 	char *p, *q;
 	char *orig = strdup(str);
-	int hostlist_base = hostlist_get_base();
-	int dims = slurmdb_setup_cluster_dims();
+	int hostlist_base = hostlist_get_base(dims);
 
 	if (!orig)
 		seterrno_ret(ENOMEM, 0);
@@ -1717,11 +1735,11 @@ error:
  *
  * Return number of ranges created, or -1 on error.
  */
-static int _parse_range_list(char *str, struct _range *ranges, int len)
+static int _parse_range_list(char *str, struct _range *ranges,
+			     int len, int dims)
 {
 	char *p;
 	int count = 0;
-	int dims = slurmdb_setup_cluster_dims();
 
 	while (str) {
 		if (count == len) {
@@ -1737,10 +1755,10 @@ static int _parse_range_list(char *str, struct _range *ranges, int len)
 		if ((dims > 1) &&
 		    (str[dims] == 'x') &&
 		    (strlen(str) == (dims * 2 + 1))) {
-			if (!_parse_box_range(str, ranges, len, &count))
+			if (!_parse_box_range(str, ranges, len, &count, dims))
 				return -1;
 		} else {
-			if (!_parse_single_range(str, &ranges[count++]))
+			if (!_parse_single_range(str, &ranges[count++], dims))
 				return -1;
 		}
 		str = p;
@@ -1753,7 +1771,7 @@ static int _parse_range_list(char *str, struct _range *ranges, int len)
  * RET 0 on success, -1 on failure (invalid prefix) */
 static int
 _push_range_list(hostlist_t hl, char *prefix, struct _range *range,
-		 int n)
+		 int n, int dims)
 {
 	int i, k, nr;
 	char *p, *q;
@@ -1769,7 +1787,7 @@ _push_range_list(hostlist_t hl, char *prefix, struct _range *range,
 		*q++ = '\0';
 		if (strrchr(tmp_prefix, '[') != NULL)
 			return -1;	/* third range is illegal */
-		nr = _parse_range_list(p, prefix_range, MAX_RANGES);
+		nr = _parse_range_list(p, prefix_range, MAX_RANGES, dims);
 		if (nr < 0)
 			return -1;	/* bad numeric expression */
 		for (i = 0; i < nr; i++) {
@@ -1809,7 +1827,8 @@ _push_range_list(hostlist_t hl, char *prefix, struct _range *range,
  * detection of ranges and compressed lists
  */
 static hostlist_t
-_hostlist_create_bracketed(const char *hostlist, char *sep, char *r_op)
+_hostlist_create_bracketed(const char *hostlist, char *sep,
+			   char *r_op, int dims)
 {
 	hostlist_t new = hostlist_new();
 	struct _range ranges[MAX_RANGES];
@@ -1835,10 +1854,12 @@ _hostlist_create_bracketed(const char *hostlist, char *sep, char *r_op)
 				if ((q[1] != ',') && (q[1] != '\0'))
 					goto error;
 				*q = '\0';
-				nr = _parse_range_list(p, ranges, MAX_RANGES);
+				nr = _parse_range_list(p, ranges,
+						       MAX_RANGES, dims);
 				if (nr < 0)
 					goto error;
-				if (_push_range_list(new, prefix, ranges, nr))
+				if (_push_range_list(
+					    new, prefix, ranges, nr, dims))
 					goto error;
 			} else {
 				/* The hostname itself contains a '['
@@ -1848,14 +1869,15 @@ _hostlist_create_bracketed(const char *hostlist, char *sep, char *r_op)
 				 * the end. */
 				strcat(cur_tok, "]");
 				if(prefix && prefix[0])
-					hostlist_push_host(new, cur_tok);
+					hostlist_push_host_dims(
+						new, cur_tok, dims);
 				else
-					hostlist_push_host(new, p);
+					hostlist_push_host_dims(new, p, dims);
 
 			}
 
 		} else
-			hostlist_push_host(new, cur_tok);
+			hostlist_push_host_dims(new, cur_tok, dims);
 	}
 
 	free(orig);
@@ -1870,9 +1892,17 @@ error:
 
 
 
+hostlist_t hostlist_create_dims(const char *str, int dims)
+{
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+	return _hostlist_create(str, "\t, ", "-", dims);
+}
+
 hostlist_t hostlist_create(const char *str)
 {
-	return _hostlist_create(str, "\t, ", "-");
+	int dims = slurmdb_setup_cluster_name_dims();
+	return hostlist_create_dims(str, dims);
 }
 
 
@@ -1940,7 +1970,7 @@ int hostlist_push(hostlist_t hl, const char *hosts)
 	return retval;
 }
 
-int hostlist_push_host(hostlist_t hl, const char *str)
+int hostlist_push_host_dims(hostlist_t hl, const char *str, int dims)
 {
 	hostrange_t hr;
 	hostname_t hn;
@@ -1948,7 +1978,10 @@ int hostlist_push_host(hostlist_t hl, const char *str)
 	if (!str || !hl)
 		return 0;
 
-	hn = hostname_create(str);
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+
+	hn = hostname_create_dims(str, dims);
 
 	if (hostname_suffix_is_valid(hn))
 		hr = hostrange_create(hn->prefix, hn->num, hn->num,
@@ -1964,6 +1997,12 @@ int hostlist_push_host(hostlist_t hl, const char *str)
 	return 1;
 }
 
+int hostlist_push_host(hostlist_t hl, const char *str)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+	return hostlist_push_host_dims(hl, str, dims);
+}
+
 int hostlist_push_list(hostlist_t h1, hostlist_t h2)
 {
 	int i, n = 0;
@@ -2175,24 +2214,29 @@ static char *
 _hostrange_string(hostrange_t hr, int depth)
 {
 	char buf[MAXHOSTNAMELEN + 16];
-	int  len = snprintf(buf, MAXHOSTNAMELEN + 15, "%s", hr->prefix);
-	int dims = slurmdb_setup_cluster_dims();
+	const int size = sizeof(buf);
+	int  len = snprintf(buf, size, "%s", hr->prefix);
+	int dims = slurmdb_setup_cluster_name_dims();
+
+	if (len < 0 || len + dims >= size)
+		return NULL;
 
 	if (!hr->singlehost) {
 		if ((dims > 1) && (hr->width == dims)) {
 			int i2 = 0;
 			int coord[dims];
 
-			hostlist_parse_int_to_array(hr->lo + depth, coord, dims, 0);
+			hostlist_parse_int_to_array(
+				hr->lo + depth, coord, dims, 0);
 
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len <= MAXHOSTNAMELEN + 15)
-					buf[len++] = alpha_num[coord[i2]];
-			}
+			while (i2 < dims)
+				buf[len++] = alpha_num[coord[i2++]];
 			buf[len] = '\0';
 		} else {
-			snprintf(buf+len, MAXHOSTNAMELEN+15 - len, "%0*lu",
-				 hr->width, hr->lo + depth);
+			len = snprintf(buf + len, size - len, "%0*lu",
+				       hr->width, hr->lo + depth);
+			if (len < 0 || len >= size)
+				return NULL;
 		}
 	}
 	return strdup(buf);
@@ -2465,103 +2509,65 @@ char *hostlist_deranged_string_malloc(hostlist_t hl)
 	return buf;
 }
 
-char *hostlist_deranged_string_xmalloc(hostlist_t hl)
+char *hostlist_deranged_string_xmalloc_dims(hostlist_t hl, int dims)
 {
 	int buf_size = 8192;
 	char *buf = xmalloc(buf_size);
-	while (hostlist_deranged_string(hl, buf_size, buf) < 0) {
+
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+
+	while (hostlist_deranged_string_dims(hl, buf_size, buf, dims) < 0) {
 		buf_size *= 2;
 		xrealloc(buf, buf_size);
 	}
 	return buf;
 }
 
-ssize_t hostlist_deranged_string(hostlist_t hl, size_t n, char *buf)
+char *hostlist_deranged_string_xmalloc(hostlist_t hl)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+	return hostlist_deranged_string_xmalloc_dims(hl, dims);
+}
+
+ssize_t hostlist_deranged_string_dims(
+	hostlist_t hl, size_t n, char *buf, int dims)
 {
 	int i;
-	int len = 0;
-	int truncated = 0;
+	int len = 0, ret;
 
 	LOCK_HOSTLIST(hl);
-	for (i = 0; i < hl->nranges; i++) {
-		size_t m = (n - len) <= n ? n - len : 0;
-		int ret = hostrange_to_string(hl->hr[i], m, buf + len, ",");
-		if (ret < 0 || ret > m) {
-			len = n;
-			truncated = 1;
-			break;
-		}
-		len+=ret;
-		buf[len++] = ',';
+	for (i = 0; i < hl->nranges && len < n; i++) {
+		if (i)
+			buf[len++] = ',';
+		if (len >= n)
+			goto truncated;
+		ret = hostrange_to_string(hl->hr[i], n - len, buf + len, ",", dims);
+		if (ret < 0)
+			goto truncated;
+		len += ret;
 	}
 	UNLOCK_HOSTLIST(hl);
-
-	buf[len > 0 ? --len : 0] = '\0';
-	if (len == n)
-		truncated = 1;
-
-	return truncated ? -1 : len;
+	return len;
+truncated:
+	UNLOCK_HOSTLIST(hl);
+	buf[n-1] = '\0';
+	return -1;
 }
 
-int hostlist_get_base()
+ssize_t hostlist_deranged_string(hostlist_t hl, size_t n, char *buf)
 {
-	int hostlist_base;
-
-	if(slurmdb_setup_cluster_dims() > 1)
-		hostlist_base = 36;
-	else
-		hostlist_base = 10;
-
-	return hostlist_base;
+	int dims = slurmdb_setup_cluster_name_dims();
+	return hostlist_deranged_string_dims(hl, n, buf, dims);
 }
 
-
-void hostlist_parse_int_to_array(int in, int *out, int dims, int hostlist_base)
+/* convert 'in' polynomial of base 'base' to 'out' array of 'dim' dimensions */
+void hostlist_parse_int_to_array(int in, int *out, int dims, int base)
 {
-	int a;
-
-	static int my_start_pow_minus = 0;
-	static int my_start_pow = 0;
-	static int last_dims = 0;
-        int my_pow_minus = my_start_pow_minus;
-	int my_pow = my_start_pow;
-
-	if(!hostlist_base)
-		hostlist_base = hostlist_get_base();
-
-	if(!my_start_pow || (last_dims != dims)) {
-		/* this will never change so just calculate it once */
-		my_start_pow = 1;
-
-		/* To avoid having to use the pow function and include
-		   the math lib everywhere just do this. */
-		for(a = 0; a<dims; a++)
-			my_start_pow *= hostlist_base;
+	int hostlist_base = base ? base : hostlist_get_base(dims);
 
-		my_pow = my_start_pow;
-		my_pow_minus = my_start_pow_minus =
-			my_start_pow / hostlist_base;
-		last_dims = dims;
-	}
-
-	for(a = 0; a<dims; a++) {
-		out[a] = (int)in % my_pow;
-		/* This only needs to be done until we get a 0 here
-		   meaning we are on the last dimension. This avoids
-		   dividing by 0. */
-		if(dims - a) {
-			out[a] /= my_pow_minus;
-			/* set this up for the next dimension */
-			my_pow = my_pow_minus;
-			my_pow_minus /= hostlist_base;
-		}
-		if(out[a] < 0) {
-			error("Dim %d returned negative %d from %d %d %d",
-			      a, out[a], in, my_pow, my_pow_minus);
-			xassert(0);
-			out[a] = 0;
-		}
-	}
+	for ( ; --dims >= 0; in /= hostlist_base)
+		out[dims] = in % hostlist_base;
 }
 
 /* return true if a bracket is needed for the range at i in hostlist hl */
@@ -2587,39 +2593,51 @@ _get_bracketed_list(hostlist_t hl, int *start, const size_t n, char *buf)
 	int i = *start;
 	int m, len = 0;
 	int bracket_needed = _is_bracket_needed(hl, i);
+	int zeropad = 0;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	len = snprintf(buf, n, "%s", hr[i]->prefix);
+	if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+		/*
+		 * Find minimum common zero-padding prefix. Cray has nid%05u
+		 * syntax, factoring this out makes host strings much shorter.
+		 */
+		zeropad = _zero_padded(hr[i]->hi, hr[i]->width);
+
+		/* Find the minimum common zero-padding prefix. */
+		for (m = i + 1; zeropad && m < hl->nranges; m++) {
+			int pad = 0;
 
-	if ((len < 0) || (len > n))
-		return n; /* truncated, buffer filled */
+			if (!hostrange_within_range(hr[m], hr[m-1]))
+				break;
+			if (hl->hr[m]->width == hl->hr[m-1]->width)
+				pad = _zero_padded(hr[m]->hi, hr[m]->width);
+			if (pad < zeropad)
+				zeropad = pad;
+		}
+	}
 
-	if (bracket_needed && len < n && len >= 0)
+	if (zeropad)
+		len = snprintf(buf, n, "%s%0*u", hr[i]->prefix, zeropad, 0);
+	else
+		len = snprintf(buf, n, "%s", hr[i]->prefix);
+	if (len < 0 || len + 4 >= n)	/* min: '[', <digit>, ']', '\0' */
+		return n;		/* truncated, buffer filled */
+
+	if (bracket_needed)
 		buf[len++] = '[';
 
 	do {
-		m = (n - len) <= n ? n - len : 0;
-		len += hostrange_numstr(hr[i], m, buf + len);
-		if (len >= n)
-			break;
-		if (bracket_needed) /* Only need commas inside brackets */
+		if (i > *start)
 			buf[len++] = ',';
+		m = hostrange_numstr(hr[i], n - len, buf + len, zeropad);
+		if (m < 0 || (len += m) >= n - 1)	/* insufficient space */
+			return n;
 	} while (++i < hl->nranges && hostrange_within_range(hr[i], hr[i-1]));
-	if (bracket_needed && len < n && len > 0) {
-
-		/* Add trailing bracket (change trailing "," from above to "]" */
-		buf[len - 1] = ']';
-
-		/* NUL terminate for safety, but do not add terminator to len */
-		buf[len]   = '\0';
-	} else if (len >= n) {
-		if (n > 0)
-			buf[n-1] = '\0';
 
-	} else {
-		/* If len is > 0, NUL terminate (but do not add to len) */
-		buf[len > 0 ? len : 0] = '\0';
-	}
+	if (bracket_needed)
+		buf[len++] = ']';
 
+	buf[len] = '\0';
 	*start = i;
 	return len;
 }
@@ -2627,11 +2645,11 @@ _get_bracketed_list(hostlist_t hl, int *start, const size_t n, char *buf)
 static int _tell_if_used(int dim, int curr,
 			 int *start,
 			 int *end,
-			 int *last, int *found)
+			 int *last, int *found, int dims)
 {
 	int rc = 1;
 	int start_curr = curr;
-	int dims = slurmdb_setup_cluster_dims();
+
 /* 	int i; */
 /* 	char coord[dims+1]; */
 /* 	memset(coord, 0, sizeof(coord)); */
@@ -2677,7 +2695,7 @@ static int _tell_if_used(int dim, int curr,
 		} else {
 			if((rc = _tell_if_used(dim+1, curr,
 					       start, end,
-					       last, found)) != 1) {
+					       last, found, dims)) != 1) {
 				return rc;
 			}
 			if((*found) >= dim) {
@@ -2697,27 +2715,25 @@ end_it:
 	return rc;
 }
 
-static int _get_next_box(int *start,
-			 int *end)
+static int _get_next_box(int *start, int *end, int dims)
 {
-	int hostlist_base = hostlist_get_base();
-	int dims = slurmdb_setup_cluster_dims();
+	int hostlist_base = hostlist_get_base(dims);
 	static int orig_grid_end[HIGHEST_DIMENSIONS];
 	static int last[HIGHEST_DIMENSIONS];
 	int pos[dims];
-/* 	int i; */
-/* 	char coord[dims+1]; */
-/* 	char coord2[dims+1]; */
+	/* int i; */
+	/* char coord[dims+1]; */
+	/* char coord2[dims+1]; */
 	int found = -1;
 	int rc = 0;
 	int new_min[dims];
 	int new_max[dims];
 
-/* 	memset(coord, 0, sizeof(coord)); */
-/* 	memset(coord2, 0, sizeof(coord2)); */
+	/* memset(coord, 0, sizeof(coord)); */
+	/* memset(coord2, 0, sizeof(coord2)); */
 
 again:
-	if(start[A] == -1) {
+	if(start[0] == -1) {
 		memcpy(start, grid_start, dim_grid_size);
 		/* We need to keep track of this to make sure we get
 		   all the nodes marked since this could change based
@@ -2730,43 +2746,43 @@ again:
 	memcpy(end, start, dim_grid_size);
 
 
-/* 	for(i = 0; i<dims; i++) { */
-/* 		coord[i] = alpha_num[start[i]]; */
-/* 	}	 */
-/* 	info("beginning with %s", coord); */
+	/* for(i = 0; i<dims; i++) { */
+	/* 	coord[i] = alpha_num[start[i]]; */
+	/* } */
+	/* info("beginning with %s dims %d", coord, dims); */
 
-	_tell_if_used(A, 0, start, end, last, &found);
+	_tell_if_used(0, 0, start, end, last, &found, dims);
 
-/* 	for(i = 0; i<dims; i++) { */
-/* 		coord[i] = alpha_num[grid_start[i]]; */
-/* 		coord2[i] = alpha_num[grid_end[i]]; */
-/* 	}	 */
-/* 	info("current grid is %sx%s", coord, coord2); */
+	/* for(i = 0; i<dims; i++) { */
+	/* 	coord[i] = alpha_num[grid_start[i]]; */
+	/* 	coord2[i] = alpha_num[grid_end[i]]; */
+	/* } */
+	/* info("current grid is %sx%s", coord, coord2); */
 
 	/* remove what we just did */
-	_set_box_in_grid(A, 0, start, end, false);
+	_set_box_in_grid(0, 0, start, end, false, dims);
 
 	/* set the new min max of the grid */
 	memset(new_min, hostlist_base, dim_grid_size);
 	memset(new_max, -1, dim_grid_size);
 
 	/* send the orid_grid_end so we don't miss anything that was set. */
-	_set_min_max_of_grid(A, 0, grid_start, orig_grid_end,
-			     new_min, new_max, pos);
-
-	if(new_max[A] != -1) {
-/* 		for(i = 0; i<dims; i++) { */
-/* 			coord[i] = alpha_num[new_min[i]]; */
-/* 			coord2[i] = alpha_num[new_max[i]]; */
-/* 		}	 */
-/* 		info("here with %sx%s", coord, coord2); */
+	_set_min_max_of_grid(0, 0, grid_start, orig_grid_end,
+			     new_min, new_max, pos, dims);
+
+	if(new_max[0] != -1) {
+		/* for(i = 0; i<dims; i++) { */
+		/* 	coord[i] = alpha_num[new_min[i]]; */
+		/* 	coord2[i] = alpha_num[new_max[i]]; */
+		/* } */
+		/* info("here with %sx%s", coord, coord2); */
 		memcpy(grid_start, new_min, dim_grid_size);
 		memcpy(grid_end, new_max, dim_grid_size);
 		memcpy(last, grid_start, dim_grid_size);
 
-/* 		for(i = 0; i<dims; i++)  */
-/* 			coord[i] = alpha_num[last[i]]; */
-/* 		info("next start %s", coord); */
+		/* for(i = 0; i<dims; i++) */
+		/* 	coord[i] = alpha_num[last[i]]; */
+		/* info("next start %s", coord); */
 		if(found == -1) {
 			/* There are still nodes set in the grid, so we need
 			   to go through them again to make sure we got all
@@ -2792,10 +2808,9 @@ again:
  * Assumes hostlist is locked.
  */
 static int
-_get_boxes(char *buf, int max_len)
+_get_boxes(char *buf, int max_len, int dims, int brackets)
 {
 	int len=0, i;
-	int dims = slurmdb_setup_cluster_dims();
 	int curr_min[dims], curr_max[dims];
 /* 	char coord[dims+1]; */
 /* 	char coord2[dims+1]; */
@@ -2803,14 +2818,14 @@ _get_boxes(char *buf, int max_len)
 /* 	memset(coord2, 0, sizeof(coord2)); */
 
 	/* this means we are at the beginning */
-	curr_min[A] = -1;
+	curr_min[0] = -1;
 
 /* 	for(i=0; i<HOSTLIST_BASE*HOSTLIST_BASE*HOSTLIST_BASE*HOSTLIST_BASE; i++) { */
 /* 		if(grid[i]) */
 /* 			info("got one at %d", i); */
 /* 	} */
 
-	while(_get_next_box(curr_min, curr_max)) {
+	while(_get_next_box(curr_min, curr_max, dims)) {
 /* 		for(i = 0; i<dims; i++) { */
 /* 			coord[i] = alpha_num[curr_min[i]]; */
 /* 			coord2[i] = alpha_num[curr_max[i]]; */
@@ -2845,8 +2860,10 @@ _get_boxes(char *buf, int max_len)
 		}
 	}
 
-	buf[len - 1] = ']';
-
+	if (brackets)
+		buf[len - 1] = ']';
+	else
+		buf[len - 1] = '\0';
 end_it:
 	/* NUL terminate for safety, but do not add terminator to len */
 	buf[len]   = '\0';
@@ -2856,18 +2873,17 @@ end_it:
 
 static void
 _set_box_in_grid(int dim, int curr, int *start,
-		 int *end, bool value)
+		 int *end, bool value, int dims)
 {
 	int i;
 	int start_curr = curr;
-	int dims = slurmdb_setup_cluster_dims();
 
 	for (i=start[dim]; i<=end[dim]; i++) {
 		curr = start_curr + (i * offset[dim]);
 		if(dim == (dims-1))
 			grid[curr] = value;
 		else
-			_set_box_in_grid(dim+1, curr, start, end, value);
+			_set_box_in_grid(dim+1, curr, start, end, value, dims);
 
 	}
 }
@@ -2877,11 +2893,10 @@ static int _add_box_ranges(int dim,  int curr,
 			   int *end,
 			   int *pos,
 			   struct _range *ranges,
-			   int len, int *count)
+			   int len, int *count, int dims)
 {
 	int i;
 	int start_curr = curr;
-	int dims = slurmdb_setup_cluster_dims();
 
 	for (pos[dim]=start[dim]; pos[dim]<=end[dim]; pos[dim]++) {
 		curr = start_curr + (pos[dim] * offset[dim]);
@@ -2906,13 +2921,13 @@ static int _add_box_ranges(int dim,  int curr,
 			new_str[dims+i+1] = alpha_num[end[i]];
 
 /* 			info("got %s", new_str); */
-			if (!_parse_single_range(new_str,
-						 &ranges[*count]))
+			if (!_parse_single_range(
+				    new_str, &ranges[*count], dims))
 				return 0;
 			(*count)++;
 		} else
 			if(!_add_box_ranges(dim+1, curr, start, end, pos,
-					    ranges, len, count))
+					    ranges, len, count, dims))
 				return 0;
 	}
 	return 1;
@@ -2923,11 +2938,11 @@ static void _set_min_max_of_grid(int dim, int curr,
 				 int *end,
 				 int *min,
 				 int *max,
-				 int *pos)
+				 int *pos,
+				 int dims)
 {
 	int i;
 	int start_curr = curr;
-	int dims = slurmdb_setup_cluster_dims();
 
 	for (pos[dim]=start[dim]; pos[dim]<=end[dim]; pos[dim]++) {
 		curr = start_curr + (pos[dim] * offset[dim]);
@@ -2940,14 +2955,13 @@ static void _set_min_max_of_grid(int dim, int curr,
 			}
 		} else
 			_set_min_max_of_grid(dim+1, curr, start, end,
-					     min, max, pos);
+					     min, max, pos, dims);
 	}
 }
 
 static void
-_set_grid(unsigned long start, unsigned long end)
+_set_grid(unsigned long start, unsigned long end, int dims)
 {
-	int dims = slurmdb_setup_cluster_dims();
 	int sent_start[dims], sent_end[dims];
 	int i;
 /* 	char coord[dims+1]; */
@@ -2966,16 +2980,15 @@ _set_grid(unsigned long start, unsigned long end)
 	}
 /* 	info("going to set %sx%s", coord, coord2); */
 
-	_set_box_in_grid(A, 0, sent_start, sent_end, true);
+	_set_box_in_grid(0, 0, sent_start, sent_end, true, dims);
 }
 
 static bool
 _test_box_in_grid(int dim, int curr,
-		  int *start, int *end)
+		  int *start, int *end, int dims)
 {
 	int i;
 	int start_curr = curr;
-	int dims = slurmdb_setup_cluster_dims();
 
 	for (i=start[dim]; i<=end[dim]; i++) {
 		curr = start_curr + (i * offset[dim]);
@@ -2983,7 +2996,7 @@ _test_box_in_grid(int dim, int curr,
 			if(!grid[curr])
 				return false;
 		} else {
-			if(!_test_box_in_grid(dim+1, curr, start, end))
+			if(!_test_box_in_grid(dim+1, curr, start, end, dims))
 				return false;
 		}
 	}
@@ -2992,19 +3005,18 @@ _test_box_in_grid(int dim, int curr,
 }
 
 static bool
-_test_box(int *start, int *end)
+_test_box(int *start, int *end, int dims)
 {
 	int i;
-	int dims = slurmdb_setup_cluster_dims();
 
-	if(!memcmp(start, end, dim_grid_size)) /* single node */
+	if (!memcmp(start, end, dim_grid_size)) /* single node */
 		return false;
 
-	for(i = 0; i<dims; i++)
+	for (i = 0; i<dims; i++)
 		if (start[i] > end[i])
 			return false;
 
-	return _test_box_in_grid(A, 0, start, end);
+	return _test_box_in_grid(0, 0, start, end, dims);
 }
 
 char *hostlist_ranged_string_malloc(hostlist_t hl)
@@ -3018,55 +3030,69 @@ char *hostlist_ranged_string_malloc(hostlist_t hl)
 	return buf;
 }
 
-char *hostlist_ranged_string_xmalloc(hostlist_t hl)
+char *hostlist_ranged_string_xmalloc_dims(
+	hostlist_t hl, int dims, int brackets)
 {
 	int buf_size = 8192;
 	char *buf = xmalloc(buf_size);
-	while (hostlist_ranged_string(hl, buf_size, buf) < 0) {
+	while (hostlist_ranged_string_dims(
+		       hl, buf_size, buf, dims, brackets) < 0) {
 		buf_size *= 2;
 		xrealloc(buf, buf_size);
 	}
 	return buf;
 }
 
-ssize_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
+char *hostlist_ranged_string_xmalloc(hostlist_t hl)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+	return hostlist_ranged_string_xmalloc_dims(hl, dims, 1);
+}
+
+ssize_t hostlist_ranged_string_dims(hostlist_t hl, size_t n,
+				    char *buf, int dims, int brackets)
 {
 	int i = 0;
 	int len = 0;
 	int truncated = 0;
 	bool box = false;
-	int hostlist_base = hostlist_get_base();
-	int dims = slurmdb_setup_cluster_dims();
+	int hostlist_base;
+	static int last_dims = -1;
+
 	DEF_TIMERS;
 
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+	hostlist_base = hostlist_get_base(dims);
+
 	START_TIMER;
 	LOCK_HOSTLIST(hl);
 
-	if(dims > 1) {	/* logic for block node description */
+	if (dims > 1 && hl->nranges) {	/* logic for block node description */
 		slurm_mutex_lock(&multi_dim_lock);
 
-		/* compute things that only need to be calculated once */
-		if(dim_grid_size == -1) {
-			int i;
-
+		/* compute things that only need to be calculated once
+		 * (unless you change the dimensions of the
+		 * hostlist.  This can happen on a BGQ system.
+		 */
+		if ((last_dims != dims) || (dim_grid_size == -1)) {
+			last_dims = dims;
 			dim_grid_size = sizeof(int) * dims;
 
 			/* the last one is always 1 */
 			offset[dims-1] = 1;
-			for(i=(dims-2); i>=0; i--)
+			for (i=(dims-2); i>=0; i--)
 				offset[i] = offset[i+1] * hostlist_base;
 		}
 
-		if (hl->nranges < 1)
-			goto notbox;	/* no data */
-
 		memset(grid, 0, sizeof(grid));
 		memset(grid_start, hostlist_base, dim_grid_size);
 		memset(grid_end, -1, dim_grid_size);
 
-		for (i=0;i<hl->nranges;i++) {
-			/*info("got %s %d %d-%d", hl->hr[i]->prefix, hl->hr[i]->width, */
-			/*     hl->hr[i]->lo, hl->hr[i]->hi); */
+		for (i=0; i<hl->nranges; i++) {
+			/* info("got %s %d %d-%d", hl->hr[i]->prefix, */
+			/*      hl->hr[i]->width, hl->hr[i]->lo, */
+			/*      hl->hr[i]->hi); */
 			if (hl->hr[i]->width != dims) {
 				/* We use this logic to build task
 				 * list ranges, so this does not
@@ -3089,50 +3115,50 @@ ssize_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
 				}
 				goto notbox;
 			}
-			_set_grid(hl->hr[i]->lo, hl->hr[i]->hi);
+			_set_grid(hl->hr[i]->lo, hl->hr[i]->hi, dims);
 		}
 		if (!memcmp(grid_start, grid_end, dim_grid_size)) {
-			len += snprintf(buf, n, "%s", hl->hr[0]->prefix);
-			for(i = 0; i<dims; i++) {
-				if(len > n)
-					goto too_long;
+			len = snprintf(buf, n, "%s", hl->hr[0]->prefix);
+			if (len < 0 || ((len + dims) >= n))
+				goto too_long;
+			for (i = 0; i < dims; i++)
 				buf[len++] = alpha_num[grid_start[i]];
-			}
-		} else if (!_test_box(grid_start, grid_end)) {
-			sprintf(buf, "%s[", hl->hr[0]->prefix);
-			len = strlen(hl->hr[0]->prefix) + 1;
-			len += _get_boxes(buf + len, (n-len));
+		} else if (!_test_box(grid_start, grid_end, dims)) {
+			len = snprintf(buf, n, "%s", hl->hr[0]->prefix);
+			if (len < 0 || (len+1) >= n)
+				goto too_long;
+			if (brackets)
+				buf[len++] = '[';
+			len += _get_boxes(buf + len, (n-len), dims, brackets);
 		} else {
-			len += snprintf(buf, n, "%s[", hl->hr[0]->prefix);
-			for(i = 0; i<dims; i++) {
-				if(len > n)
-					goto too_long;
+			len = snprintf(buf, n, "%s", hl->hr[0]->prefix);
+			if (len < 0 || ((len + 3 + (dims * 2)) >= n))
+				goto too_long;
+			if (brackets)
+				buf[len++] = '[';
+
+			for (i = 0; i < dims; i++)
 				buf[len++] = alpha_num[grid_start[i]];
-			}
-			if(len <= n)
-				buf[len++] = 'x';
+			buf[len++] = 'x';
 
-			for(i = 0; i<dims; i++) {
-				if(len > n)
-					goto too_long;
+			for (i = 0; i < dims; i++)
 				buf[len++] = alpha_num[grid_end[i]];
-			}
-			if(len <= n)
+			if (brackets)
 				buf[len++] = ']';
 		}
 		if ((len < 0) || (len > n))
 		too_long:
 			len = n;	/* truncated */
 		box = true;
-	}
 notbox:
+		slurm_mutex_unlock(&multi_dim_lock);
+	}
 
 	if (!box) {
-		i=0;
-		while (i < hl->nranges && len < n) {
-			len += _get_bracketed_list(hl, &i, n - len, buf + len);
-			if ((len > 0) && (len < n) && (i < hl->nranges))
+		for (i = 0; i < hl->nranges && len < n;) {
+			if (i)
 				buf[len++] = ',';
+			len += _get_bracketed_list(hl, &i, n - len, buf + len);
 		}
 	}
 
@@ -3144,16 +3170,21 @@ notbox:
 		if (n > 0)
 			buf[n-1] = '\0';
 	} else
-		buf[len > 0 ? len : 0] = '\0';
+		buf[len] = '\0';
 
 	END_TIMER;
-	if(dims > 1)	/* logic for block node description */
-		slurm_mutex_unlock(&multi_dim_lock);
 
 //	info("time was %s", TIME_STR);
 	return truncated ? -1 : len;
 }
 
+ssize_t hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+
+	return hostlist_ranged_string_dims(hl, n, buf, dims, 1);
+}
+
 /* ----[ hostlist iterator functions ]---- */
 
 static hostlist_iterator_t hostlist_iterator_new(void)
@@ -3257,23 +3288,27 @@ static void _iterator_advance_range(hostlist_iterator_t i)
 	}
 }
 
-char *hostlist_next(hostlist_iterator_t i)
+char *hostlist_next_dims(hostlist_iterator_t i, int dims)
 {
 	char buf[MAXHOSTNAMELEN + 16];
+	const int size = sizeof(buf);
 	int len = 0;
-	int dims = slurmdb_setup_cluster_dims();
 
 	assert(i != NULL);
 	assert(i->magic == HOSTLIST_MAGIC);
 	LOCK_HOSTLIST(i->hl);
 	_iterator_advance(i);
 
-	if (i->idx > i->hl->nranges - 1) {
-		UNLOCK_HOSTLIST(i->hl);
-		return NULL;
-	}
+	if (!dims)
+		dims = slurmdb_setup_cluster_name_dims();
+
+	if (i->idx > i->hl->nranges - 1)
+		goto no_next;
+
+	len = snprintf(buf, size, "%s", i->hr->prefix);
+	if (len < 0 || len + dims >= size)
+		goto no_next;
 
-	len = snprintf(buf, MAXHOSTNAMELEN + 15, "%s", i->hr->prefix);
 	if (!i->hr->singlehost) {
 		if ((dims > 1) && (i->hr->width == dims)) {
 			int i2 = 0;
@@ -3281,18 +3316,28 @@ char *hostlist_next(hostlist_iterator_t i)
 
 			hostlist_parse_int_to_array(i->hr->lo + i->depth,
 						    coord, dims, 0);
-			for (i2 = 0; i2 < dims; i2++) {
-				if (len <= MAXHOSTNAMELEN + 15)
-					buf[len++] = alpha_num[coord[i2]];
-			}
+			while (i2 < dims)
+				buf[len++] = alpha_num[coord[i2++]];
 			buf[len] = '\0';
 		} else {
-			snprintf(buf + len, MAXHOSTNAMELEN + 15 - len, "%0*lu",
-				 i->hr->width, i->hr->lo + i->depth);
+			len = snprintf(buf + len, size - len, "%0*lu",
+				       i->hr->width, i->hr->lo + i->depth);
+			if (len < 0 || len >= size)
+				goto no_next;
 		}
 	}
 	UNLOCK_HOSTLIST(i->hl);
 	return strdup(buf);
+no_next:
+	UNLOCK_HOSTLIST(i->hl);
+	return NULL;
+}
+
+char *hostlist_next(hostlist_iterator_t i)
+{
+	int dims = slurmdb_setup_cluster_name_dims();
+
+	return hostlist_next_dims(i, dims);
 }
 
 char *hostlist_next_range(hostlist_iterator_t i)
diff --git a/src/common/hostlist.h b/src/common/hostlist.h
index d2d61e63e..80d91592e 100644
--- a/src/common/hostlist.h
+++ b/src/common/hostlist.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -59,7 +59,9 @@
 #endif
 
 /* largest configured system dimensions */
-#define HIGHEST_DIMENSIONS 4
+#ifndef HIGHEST_DIMENSIONS
+#  define HIGHEST_DIMENSIONS 5
+#endif
 #define HIGHEST_BASE 36
 
 extern char *alpha_num;
@@ -155,6 +157,7 @@ int set_grid(int start, int end, int count);
  * The returned hostlist must be freed with hostlist_destroy()
  *
  */
+hostlist_t hostlist_create_dims(const char *hostlist, int dims);
 hostlist_t hostlist_create(const char *hostlist);
 
 /* hostlist_copy():
@@ -193,6 +196,7 @@ int hostlist_push(hostlist_t hl, const char *hosts);
  *
  * return value is 1 for success, 0 for failure.
  */
+int hostlist_push_host_dims(hostlist_t hl, const char *str, int dims);
 int hostlist_push_host(hostlist_t hl, const char *host);
 
 
@@ -318,13 +322,21 @@ void hostlist_sort(hostlist_t hl);
  */
 void hostlist_uniq(hostlist_t hl);
 
-int hostlist_get_base();
+/* Return the base used for encoding numeric hostlist suffixes */
+#define hostlist_get_base(_dimensions) ((_dimensions) > 1 ? 36 : 10)
 
 /* given a int will parse it into sizes in each dimension */
 void hostlist_parse_int_to_array(int in, int *out, int dims, int hostlist_base);
 
 /* ----[ hostlist print functions ]---- */
 
+/* hostlist_ranged_string_dims():
+ *
+ * do the same thing as hostlist_ranged_string, but provide the
+ * dimensions you are looking for.
+ */
+ssize_t hostlist_ranged_string_dims(hostlist_t hl, size_t n,
+				    char *buf, int dims, int brackets);
 /* hostlist_ranged_string():
  *
  * Write the string representation of the hostlist hl into buf,
@@ -352,6 +364,8 @@ char *hostlist_ranged_string_malloc(hostlist_t hl);
 /* Variant of hostlist_ranged_string().
  * Returns the buffer which must be released using xfree().
  */
+char *hostlist_ranged_string_xmalloc_dims(
+	hostlist_t hl, int dims, int brackets);
 char *hostlist_ranged_string_xmalloc(hostlist_t hl);
 
 /* hostlist_deranged_string():
@@ -363,6 +377,8 @@ char *hostlist_ranged_string_xmalloc(hostlist_t hl);
  * hostlist_deranged_string() will not attempt to write a bracketed
  * hostlist representation. Every hostname will be explicitly written.
  */
+ssize_t hostlist_deranged_string_dims(
+	hostlist_t hl, size_t n, char *buf, int dims);
 ssize_t hostlist_deranged_string(hostlist_t hl, size_t n, char *buf);
 
 /* Variant of hostlist_deranged_string().
@@ -373,6 +389,7 @@ char *hostlist_deranged_string_malloc(hostlist_t hl);
 /* Variant of hostlist_deranged_string().
  * Returns the buffer which must be released using xfree().
  */
+char *hostlist_deranged_string_xmalloc_dims(hostlist_t hl, int dims);
 char *hostlist_deranged_string_xmalloc(hostlist_t hl);
 
 /* ----[ hostlist utility functions ]---- */
@@ -419,6 +436,7 @@ void hostlist_iterator_reset(hostlist_iterator_t i);
  *
  * The caller is responsible for freeing the returned memory.
  */
+char * hostlist_next_dims(hostlist_iterator_t i, int dims);
 char * hostlist_next(hostlist_iterator_t i);
 
 
diff --git a/src/common/io_hdr.c b/src/common/io_hdr.c
index da7b2315e..4ae4d7ee0 100644
--- a/src/common/io_hdr.c
+++ b/src/common/io_hdr.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/io_hdr.h b/src/common/io_hdr.h
index f5eb6f6b6..253109994 100644
--- a/src/common/io_hdr.h
+++ b/src/common/io_hdr.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/job_options.c b/src/common/job_options.c
index 00ac72bfc..85df63121 100644
--- a/src/common/job_options.c
+++ b/src/common/job_options.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,13 +41,12 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <src/common/xassert.h>
-#include <src/common/xmalloc.h>
-#include <src/common/xstring.h>
-#include <src/common/list.h>
-#include <src/common/pack.h>
-
+#include "slurm/slurm.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/list.h"
+#include "src/common/pack.h"
 #include "src/common/job_options.h"
 
 #define JOB_OPTIONS_PACK_TAG "job_options"
diff --git a/src/common/job_options.h b/src/common/job_options.h
index 6ec2b3550..8fcfe754b 100644
--- a/src/common/job_options.h
+++ b/src/common/job_options.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/job_resources.c b/src/common/job_resources.c
index a187e7055..dfd6fb9ce 100644
--- a/src/common/job_resources.c
+++ b/src/common/job_resources.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,8 @@
 
 #include <stdlib.h>
 #include <string.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/job_resources.h"
@@ -236,7 +237,10 @@ extern int reset_node_bitmap(job_resources_t *job_resrcs_ptr, uint32_t job_id)
 		error("Invalid nodes (%s) for job_id %u",
 		      job_resrcs_ptr->nodes, job_id);
 		return SLURM_ERROR;
+	} else if (job_resrcs_ptr->nodes == NULL) {
+		job_resrcs_ptr->node_bitmap = bit_alloc(node_record_count);
 	}
+
 	i = bit_set_count(job_resrcs_ptr->node_bitmap);
 	if (job_resrcs_ptr->nhosts != i) {
 		error("Invalid change in resource allocation node count for "
@@ -529,15 +533,13 @@ extern void pack_job_resources(job_resources_t *job_resrcs_ptr, Buf buffer,
 {
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		if (job_resrcs_ptr == NULL) {
 			uint32_t empty = NO_VAL;
 			pack32(empty, buffer);
 			return;
 		}
 
-		xassert(job_resrcs_ptr->nhosts);
-
 		pack32(job_resrcs_ptr->nhosts, buffer);
 		pack32(job_resrcs_ptr->ncpus, buffer);
 		pack8(job_resrcs_ptr->node_req, buffer);
@@ -616,8 +618,6 @@ extern void pack_job_resources(job_resources_t *job_resrcs_ptr, Buf buffer,
 			return;
 		}
 
-		xassert(job_resrcs_ptr->nhosts);
-
 		pack32(job_resrcs_ptr->nhosts, buffer);
 		pack32(job_resrcs_ptr->ncpus, buffer);
 		pack8(job_resrcs_ptr->node_req, buffer);
@@ -907,6 +907,89 @@ extern int set_job_resources_bit(job_resources_t *job_resrcs_ptr,
 	return SLURM_SUCCESS;
 }
 
+/* For every core bitmap and core_bitmap_used set in the "from" resources
+ * structure at from_node_offset, set the corresponding bit in the "new"
+ * resources structure at new_node_offset */
+extern int job_resources_bits_copy(job_resources_t *new_job_resrcs_ptr,
+				   uint16_t new_node_offset,
+				   job_resources_t *from_job_resrcs_ptr,
+				   uint16_t from_node_offset)
+{
+	int i, rc = SLURM_SUCCESS;
+	int new_bit_inx  = 0, new_core_cnt  = 0;
+	int from_bit_inx = 0, from_core_cnt = 0;
+
+	xassert(new_job_resrcs_ptr);
+	xassert(from_job_resrcs_ptr);
+
+	if (new_node_offset >= new_job_resrcs_ptr->nhosts) {
+		error("job_resources_bits_move: new_node_offset invalid "
+		      "(%u is 0 or >=%u)", new_node_offset,
+		      new_job_resrcs_ptr->nhosts);
+		return SLURM_ERROR;
+	}
+	for (i = 0; i < new_job_resrcs_ptr->nhosts; i++) {
+		if (new_job_resrcs_ptr->sock_core_rep_count[i] <=
+		    new_node_offset) {
+			new_bit_inx += new_job_resrcs_ptr->sockets_per_node[i] *
+				new_job_resrcs_ptr->cores_per_socket[i] *
+				new_job_resrcs_ptr->sock_core_rep_count[i];
+			new_node_offset -= new_job_resrcs_ptr->
+					   sock_core_rep_count[i];
+		} else {
+			new_bit_inx += new_job_resrcs_ptr->sockets_per_node[i] *
+				new_job_resrcs_ptr->cores_per_socket[i] *
+				new_node_offset;
+			new_core_cnt = new_job_resrcs_ptr->sockets_per_node[i] *
+				new_job_resrcs_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+
+	if (from_node_offset >= from_job_resrcs_ptr->nhosts) {
+		error("job_resources_bits_move: from_node_offset invalid "
+		      "(%u is 0 or >=%u)", from_node_offset,
+		      from_job_resrcs_ptr->nhosts);
+		return SLURM_ERROR;
+	}
+	for (i = 0; i < from_job_resrcs_ptr->nhosts; i++) {
+		if (from_job_resrcs_ptr->sock_core_rep_count[i] <=
+		    from_node_offset) {
+			from_bit_inx += from_job_resrcs_ptr->sockets_per_node[i] *
+				from_job_resrcs_ptr->cores_per_socket[i] *
+				from_job_resrcs_ptr->sock_core_rep_count[i];
+			from_node_offset -= from_job_resrcs_ptr->
+					    sock_core_rep_count[i];
+		} else {
+			from_bit_inx += from_job_resrcs_ptr->sockets_per_node[i] *
+				from_job_resrcs_ptr->cores_per_socket[i] *
+				from_node_offset;
+			from_core_cnt = from_job_resrcs_ptr->sockets_per_node[i] *
+				from_job_resrcs_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+
+	if (new_core_cnt != from_core_cnt) {
+		error("job_resources_bits_move: core_cnt mis-match (%d != %d)",
+		      new_core_cnt, from_core_cnt);
+		new_core_cnt = MIN(new_core_cnt, from_core_cnt);
+		rc = SLURM_ERROR;
+	}
+
+	for (i = 0; i < new_core_cnt; i++) {
+		if (bit_test(from_job_resrcs_ptr->core_bitmap, from_bit_inx+i))
+			bit_set(new_job_resrcs_ptr->core_bitmap,new_bit_inx+i);
+		if (bit_test(from_job_resrcs_ptr->core_bitmap_used,
+			     from_bit_inx+i)) {
+			bit_set(new_job_resrcs_ptr->core_bitmap_used,
+				new_bit_inx+i);
+		}
+	}
+
+	return rc;
+}
+
 extern int get_job_resources_node(job_resources_t *job_resrcs_ptr,
 				  uint32_t node_id)
 {
@@ -1003,6 +1086,50 @@ extern int clear_job_resources_node(job_resources_t *job_resrcs_ptr,
 	return _change_job_resources_node(job_resrcs_ptr, node_id, false);
 }
 
+/* Return the count of core bitmaps set for the specific node */
+extern int count_job_resources_node(job_resources_t *job_resrcs_ptr,
+				    uint32_t node_id)
+{
+	int i, bit_inx = 0, core_cnt = 0;
+	int set_cnt = 0;
+
+	xassert(job_resrcs_ptr);
+
+	for (i=0; i<job_resrcs_ptr->nhosts; i++) {
+		if (job_resrcs_ptr->sock_core_rep_count[i] <= node_id) {
+			bit_inx += job_resrcs_ptr->sockets_per_node[i] *
+				job_resrcs_ptr->cores_per_socket[i] *
+				job_resrcs_ptr->sock_core_rep_count[i];
+			node_id -= job_resrcs_ptr->sock_core_rep_count[i];
+		} else {
+			bit_inx += job_resrcs_ptr->sockets_per_node[i] *
+				job_resrcs_ptr->cores_per_socket[i] *
+				node_id;
+			core_cnt = job_resrcs_ptr->sockets_per_node[i] *
+				job_resrcs_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+	if (core_cnt < 1) {
+		error("count_job_resources_node: core_cnt=0");
+		return set_cnt;
+	}
+
+	i = bit_size(job_resrcs_ptr->core_bitmap);
+	if ((bit_inx + core_cnt) > i) {
+		error("count_job_resources_node: offset > bitmap size "
+		      "(%d >= %d)", (bit_inx + core_cnt), i);
+		return set_cnt;
+	}
+
+	for (i=0; i<core_cnt; i++) {
+		if (bit_test(job_resrcs_ptr->core_bitmap, bit_inx++))
+			set_cnt++;
+	}
+
+	return set_cnt;
+}
+
 extern int get_job_resources_cnt(job_resources_t *job_resrcs_ptr,
 				 uint32_t node_id, uint16_t *socket_cnt,
 				 uint16_t *cores_per_socket_cnt)
@@ -1106,6 +1233,48 @@ extern void add_job_to_cores(job_resources_t *job_resrcs_ptr,
 	}
 }
 
+/*
+ * Remove job from full-length core_bitmap
+ * IN job_resrcs_ptr - resources allocated to a job
+ * IN/OUT full_bitmap - bitmap of available CPUs, allocate as needed
+ * IN bits_per_node - bits per node in the full_bitmap
+ * RET 1 on success, 0 otherwise
+ */
+extern void remove_job_from_cores(job_resources_t *job_resrcs_ptr,
+			     bitstr_t **full_core_bitmap,
+			     const uint16_t *bits_per_node)
+{
+	int full_node_inx = 0;
+	int job_bit_inx  = 0, full_bit_inx  = 0, i;
+
+	if (!job_resrcs_ptr->core_bitmap)
+		return;
+
+	/* add the job to the row_bitmap */
+	if (*full_core_bitmap == NULL) {
+		uint32_t size = 0;
+		for (i = 0; i < node_record_count; i++)
+			size += bits_per_node[i];
+		*full_core_bitmap = bit_alloc(size);
+		if (!*full_core_bitmap)
+			fatal("add_job_to_cores: bitmap memory error");
+	}
+
+	for (full_node_inx = 0; full_node_inx < node_record_count;
+	     full_node_inx++) {
+		if (bit_test(job_resrcs_ptr->node_bitmap, full_node_inx)) {
+			for (i = 0; i < bits_per_node[full_node_inx]; i++) {
+				if (!bit_test(job_resrcs_ptr->core_bitmap,
+					      job_bit_inx + i))
+					continue;
+				bit_clear(*full_core_bitmap, full_bit_inx + i);
+			}
+			job_bit_inx += bits_per_node[full_node_inx];
+		}
+		full_bit_inx += bits_per_node[full_node_inx];
+	}
+}
+
 /* Given a job pointer and a global node index, return the index of that
  * node in the job_resrcs_ptr->cpus. Return -1 if invalid */
 extern int job_resources_node_inx_to_cpu_inx(job_resources_t *job_resrcs_ptr,
diff --git a/src/common/job_resources.h b/src/common/job_resources.h
index a3875ec72..d88f24dbb 100644
--- a/src/common/job_resources.h
+++ b/src/common/job_resources.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -206,6 +206,13 @@ extern int get_job_resources_bit(job_resources_t *job_resrcs_ptr,
 extern int set_job_resources_bit(job_resources_t *job_resrcs_ptr,
 				 uint32_t node_id, uint16_t socket_id,
 				 uint16_t core_id);
+/* For every core bitmap set in the "from" resources structure at
+ * from_node_offset, set the corresponding bit in the "new" resources structure
+ * at new_node_offset */
+extern int job_resources_bits_copy(job_resources_t *new_job_resrcs_ptr,
+				   uint16_t new_node_offset,
+				   job_resources_t *from_job_resrcs_ptr,
+				   uint16_t from_node_offset);
 
 /* Get/clear/set bit value at specified location for whole node allocations
  *	get is for any socket/core on the specified node
@@ -219,6 +226,10 @@ extern int clear_job_resources_node(job_resources_t *job_resrcs_ptr,
 extern int set_job_resources_node(job_resources_t *job_resrcs_ptr,
 				  uint32_t node_id);
 
+/* Return the count of core bitmaps set for the specific node */
+extern int count_job_resources_node(job_resources_t *job_resrcs_ptr,
+				    uint32_t node_id);
+
 /* Get socket and core count for a specific node_id (zero origin) */
 extern int get_job_resources_cnt(job_resources_t *job_resrcs_ptr,
 				 uint32_t node_id, uint16_t *socket_cnt,
@@ -246,6 +257,17 @@ extern void add_job_to_cores(job_resources_t *job_resrcs_ptr,
 			     bitstr_t **full_core_bitmap,
 			     const uint16_t *bits_per_node);
 
+/*
+ * Remove job from full-length core_bitmap
+ * IN job_resrcs_ptr - resources allocated to a job
+ * IN/OUT full_bitmap - bitmap of available CPUs, allocate as needed
+ * IN bits_per_node - bits per node in the full_bitmap
+ * RET 1 on success, 0 otherwise
+ */
+extern void remove_job_from_cores(job_resources_t *job_resrcs_ptr,
+			       bitstr_t **full_core_bitmap,
+			       const uint16_t *bits_per_node);
+
 /* Given a job pointer and a global node index, return the index of that
  * node in the job_resrcs_ptr->cpus. Return -1 if invalid */
 extern int job_resources_node_inx_to_cpu_inx(job_resources_t *job_resrcs_ptr, 
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 997d02505..5bac311b5 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h
index 24f8da11b..be20d3769 100644
--- a/src/common/jobacct_common.h
+++ b/src/common/jobacct_common.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,7 +55,8 @@
 
 #include <dirent.h>
 #include <sys/stat.h>
-#include <slurm/slurmdb.h>
+
+#include "slurm/slurmdb.h"
 
 #include "src/common/xmalloc.h"
 #include "src/common/list.h"
diff --git a/src/common/log.c b/src/common/log.c
index 0898830c1..d0a6aac94 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -70,7 +70,6 @@
 #  include <stdlib.h>	/* for abort() */
 #endif
 
-#include <slurm/slurm_errno.h>
 #include <sys/poll.h>
 #include <sys/socket.h>
 #include <sys/stat.h>
@@ -78,6 +77,7 @@
 #include <sys/types.h>
 #include <sys/unistd.h>
 
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/common/fd.h"
 #include "src/common/macros.h"
diff --git a/src/common/macros.h b/src/common/macros.h
index 8d392dc6d..7f1bcd188 100644
--- a/src/common/macros.h
+++ b/src/common/macros.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -265,4 +265,10 @@ typedef enum {false, true} bool;
 #  define strndup(src,size) strdup(src)
 #endif
 
+/* There are places where we put NO_VAL or INFINITE into a float or double
+ * Use fuzzy_equal below to test for those values rather than an comparision
+ * which could fail due to rounding errors. */
+#define FUZZY_EPSILON 0.00001
+#define fuzzy_equal(v1, v2) ((((v1)-(v2)) > -FUZZY_EPSILON) && (((v1)-(v2)) < FUZZY_EPSILON))
+
 #endif /* !_MACROS_H */
diff --git a/src/common/mpi.c b/src/common/mpi.c
index 283f73da5..8b3a2952e 100644
--- a/src/common/mpi.c
+++ b/src/common/mpi.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -296,8 +296,15 @@ bool mpi_hook_client_single_task_per_node (void)
 {
 	if (_mpi_init(NULL) < 0)
 		return SLURM_ERROR;
-
+#if defined HAVE_BGQ
+//#if defined HAVE_BGQ && defined HAVE_BG_FILES
+	/* On BGQ systems we only want 1 task to be spawned since srun
+	   is wrapping runjob.
+	*/
+	return true;
+#else
 	return (*(g_context->ops.client_single_task))();
+#endif
 }
 
 int mpi_hook_client_fini (mpi_plugin_client_state_t *state)
diff --git a/src/common/mpi.h b/src/common/mpi.h
index 603f5e73b..37d1b08ed 100644
--- a/src/common/mpi.h
+++ b/src/common/mpi.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #endif
 
 #include <stdbool.h>
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 typedef struct slurm_mpi_context *slurm_mpi_context_t;
 typedef void mpi_plugin_client_state_t;
@@ -93,8 +93,8 @@ int mpi_hook_slurmstepd_init (char ***env);
  * task.  The process will be running as the user of the job step at that
  * point.
  *
- * If the plugin want to set environment variables for the task,
- * it will add the necessary variables the the env array pointed
+ * If the plugin wants to set environment variables for the task,
+ * it will add the necessary variables the env array pointed
  * to be "env".  If "env" is NULL, a new array will be allocated
  * automaticallly.
  *
@@ -122,7 +122,7 @@ int mpi_hook_client_init (char *mpi_type);
  *
  * If the plugin requires that environment variables be set in the
  * environment of every task, it will add the necessary variables
- * the the env array pointed to be "env".  If "env" is NULL, a new
+ * the env array pointed to be "env".  If "env" is NULL, a new
  * array will be allocated automaticallly.
  *
  * The returned "env" array may be manipulated (and freed) by using
diff --git a/src/common/net.c b/src/common/net.c
index 3f4ad8772..c4fb8c41f 100644
--- a/src/common/net.c
+++ b/src/common/net.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/net.h b/src/common/net.h
index 75c5baa6d..7b7ee2f75 100644
--- a/src/common/net.h
+++ b/src/common/net.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/node_conf.c b/src/common/node_conf.c
index 4aff65daa..80aa1c35d 100644
--- a/src/common/node_conf.c
+++ b/src/common/node_conf.c
@@ -13,7 +13,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -78,8 +78,9 @@
 #define _DEBUG 0
 
 /* Global variables */
-List config_list  = NULL;		/* list of config_record entries */
-List feature_list = NULL;		/* list of features_record entries */
+List config_list  = NULL;	/* list of config_record entries */
+List feature_list = NULL;	/* list of features_record entries */
+List front_end_list = NULL;	/* list of slurm_conf_frontend_t entries */
 time_t last_node_update = (time_t) 0;	/* time of last update */
 struct node_record *node_record_table_ptr = NULL;	/* node records */
 struct node_record **node_hash_table = NULL;	/* node_record hash table */
@@ -100,7 +101,6 @@ static int	_list_find_config (void *config_entry, void *key);
 static int	_list_find_feature (void *feature_entry, void *key);
 
 
-
 static void _add_config_feature(char *feature, bitstr_t *node_bitmap)
 {
 	struct features_record *feature_ptr;
@@ -150,9 +150,10 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 	char *hostname = NULL;
 	char *address = NULL;
 	int state_val = NODE_STATE_UNKNOWN;
+	int address_count, alias_count, hostname_count;
 
 	if (node_ptr->state != NULL) {
-		state_val = state_str2int(node_ptr->state);
+		state_val = state_str2int(node_ptr->state, node_ptr->nodenames);
 		if (state_val == NO_VAL)
 			goto cleanup;
 	}
@@ -177,22 +178,27 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 	}
 
 	/* some sanity checks */
+	address_count  = hostlist_count(address_list);
+	alias_count    = hostlist_count(alias_list);
+	hostname_count = hostlist_count(hostname_list);
 #ifdef HAVE_FRONT_END
-	if ((hostlist_count(hostname_list) != 1) ||
-	    (hostlist_count(address_list)  != 1)) {
-		error("Only one hostname and address allowed "
-		      "in FRONT_END mode");
+	if ((hostname_count != alias_count) && (hostname_count != 1)) {
+		error("NodeHostname count must equal that of NodeName "
+		      "records of there must be no more than one");
+		goto cleanup;
+	}
+	if ((address_count != alias_count) && (address_count != 1)) {
+		error("NodeAddr count must equal that of NodeName "
+		      "records of there must be no more than one");
 		goto cleanup;
 	}
-	hostname = node_ptr->hostnames;
-	address = node_ptr->addresses;
 #else
-	if (hostlist_count(hostname_list) < hostlist_count(alias_list)) {
+	if (hostname_count < alias_count) {
 		error("At least as many NodeHostname are required "
 		      "as NodeName");
 		goto cleanup;
 	}
-	if (hostlist_count(address_list) < hostlist_count(alias_list)) {
+	if (address_count < alias_count) {
 		error("At least as many NodeAddr are required as NodeName");
 		goto cleanup;
 	}
@@ -200,10 +206,18 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 
 	/* now build the individual node structures */
 	while ((alias = hostlist_shift(alias_list))) {
-#ifndef HAVE_FRONT_END
-		hostname = hostlist_shift(hostname_list);
-		address = hostlist_shift(address_list);
-#endif
+		if (hostname_count > 0) {
+			hostname_count--;
+			if (hostname)
+				free(hostname);
+			hostname = hostlist_shift(hostname_list);
+		}
+		if (address_count > 0) {
+			address_count--;
+			if (address)
+				free(address);
+			address = hostlist_shift(address_list);
+		}
 		/* find_node_record locks this to get the
 		 * alias so we need to unlock */
 		node_rec = find_node_record(alias);
@@ -215,6 +229,7 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 				node_rec->node_state = state_val;
 			node_rec->last_response = (time_t) 0;
 			node_rec->comm_name = xstrdup(address);
+			node_rec->node_hostname = xstrdup(hostname);
 			node_rec->port      = node_ptr->port;
 			node_rec->weight    = node_ptr->weight;
 			node_rec->features  = xstrdup(node_ptr->feature);
@@ -224,14 +239,14 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 			error("reconfiguration for node %s, ignoring!", alias);
 		}
 		free(alias);
-#ifndef HAVE_FRONT_END
-		free(hostname);
-		free(address);
-#endif
 	}
 
 	/* free allocated storage */
 cleanup:
+	if (address)
+		free(address);
+	if (hostname)
+		free(hostname);
 	if (alias_list)
 		hostlist_destroy(alias_list);
 	if (hostname_list)
@@ -249,8 +264,9 @@ cleanup:
 static int _delete_config_record (void)
 {
 	last_node_update = time (NULL);
-	(void) list_delete_all (config_list,  &_list_find_config,  NULL);
-	(void) list_delete_all (feature_list, &_list_find_feature, NULL);
+	(void) list_delete_all (config_list,    &_list_find_config,  NULL);
+	(void) list_delete_all (feature_list,   &_list_find_feature, NULL);
+	(void) list_delete_all (front_end_list, &list_find_frontend, NULL);
 	return SLURM_SUCCESS;
 }
 
@@ -359,6 +375,8 @@ static int _hash_index (char *name)
 	for (j = 1; *name; name++, j++)
 		index += (int)*name * j;
 	index %= node_record_count;
+	if (index < 0)
+		index += node_record_count;
 
 	return index;
 }
@@ -408,14 +426,16 @@ static int _list_find_config (void *config_entry, void *key)
 }
 
 /*
- * bitmap2node_name - given a bitmap, build a list of comma separated node
- *	names. names may include regular expressions (e.g. "lx[01-10]")
+ * bitmap2node_name_sortable - given a bitmap, build a list of comma
+ *	separated node names. names may include regular expressions
+ *	(e.g. "lx[01-10]")
  * IN bitmap - bitmap pointer
+ * IN sort   - returned sorted list or not
  * RET pointer to node list or NULL on error
  * globals: node_record_table_ptr - pointer to node table
  * NOTE: the caller must xfree the memory at node_list when no longer required
  */
-char * bitmap2node_name (bitstr_t *bitmap)
+char * bitmap2node_name_sortable (bitstr_t *bitmap, bool sort)
 {
 	int i, first, last;
 	hostlist_t hl;
@@ -437,13 +457,28 @@ char * bitmap2node_name (bitstr_t *bitmap)
 			continue;
 		hostlist_push(hl, node_record_table_ptr[i].name);
 	}
-	hostlist_uniq(hl);
+	if (sort)
+		hostlist_sort(hl);
 	buf = hostlist_ranged_string_xmalloc(hl);
 	hostlist_destroy(hl);
 
 	return buf;
 }
 
+/*
+ * bitmap2node_name - given a bitmap, build a list of sorted, comma
+ *	separated node names. names may include regular expressions
+ *	(e.g. "lx[01-10]")
+ * IN bitmap - bitmap pointer
+ * RET pointer to node list or NULL on error
+ * globals: node_record_table_ptr - pointer to node table
+ * NOTE: the caller must xfree the memory at node_list when no longer required
+ */
+char * bitmap2node_name (bitstr_t *bitmap)
+{
+	return bitmap2node_name_sortable(bitmap, 1);
+}
+
 /*
  * _list_find_feature - find an entry in the feature list, see list.h for
  *	documentation
@@ -463,6 +498,81 @@ static int _list_find_feature (void *feature_entry, void *key)
 	return 0;
 }
 
+#ifdef HAVE_FRONT_END
+/* Log the contents of a frontend record */
+static void _dump_front_end(slurm_conf_frontend_t *fe_ptr)
+{
+	info("fe name:%s addr:%s port:%u state:%u reason:%s",
+	     fe_ptr->frontends, fe_ptr->addresses,
+	     fe_ptr->port, fe_ptr->node_state, fe_ptr->reason);
+}
+#endif
+
+/*
+ * build_all_frontend_info - get a array of slurm_conf_frontend_t structures
+ *	from the slurm.conf reader, build table, and set values
+ * is_slurmd_context: set to true if run from slurmd
+ * RET 0 if no error, error code otherwise
+ */
+extern int build_all_frontend_info (bool is_slurmd_context)
+{
+	slurm_conf_frontend_t **ptr_array;
+#ifdef HAVE_FRONT_END
+	slurm_conf_frontend_t *fe_single, *fe_line;
+	int i, count, max_rc = SLURM_SUCCESS;
+	bool front_end_debug;
+
+	if (slurm_get_debug_flags() & DEBUG_FLAG_FRONT_END)
+		front_end_debug = true;
+	else
+		front_end_debug = false;
+	count = slurm_conf_frontend_array(&ptr_array);
+	if (count == 0)
+		fatal("No FrontendName information available!");
+
+	for (i = 0; i < count; i++) {
+		hostlist_t hl_name, hl_addr;
+		char *fe_name, *fe_addr;
+
+		fe_line = ptr_array[i];
+		hl_name = hostlist_create(fe_line->frontends);
+		if (hl_name == NULL)
+			fatal("Invalid FrontendName:%s", fe_line->frontends);
+		hl_addr = hostlist_create(fe_line->addresses);
+		if (hl_addr == NULL)
+			fatal("Invalid FrontendAddr:%s", fe_line->addresses);
+		if (hostlist_count(hl_name) != hostlist_count(hl_addr)) {
+			fatal("Inconsistent node count between "
+			      "FrontendName(%s) and FrontendAddr(%s)",
+			      fe_line->frontends, fe_line->addresses);
+		}
+		while ((fe_name = hostlist_shift(hl_name))) {
+			fe_addr = hostlist_shift(hl_addr);
+			fe_single = xmalloc(sizeof(slurm_conf_frontend_t));
+			if (list_append(front_end_list, fe_single) == NULL)
+				fatal("list_append: malloc failure");
+			fe_single->frontends = xstrdup(fe_name);
+			fe_single->addresses = xstrdup(fe_addr);
+			free(fe_name);
+			free(fe_addr);
+			fe_single->port = fe_line->port;
+			if (fe_line->reason && fe_line->reason[0])
+				fe_single->reason = xstrdup(fe_line->reason);
+			fe_single->node_state = fe_line->node_state;
+			if (front_end_debug && !is_slurmd_context)
+				_dump_front_end(fe_single);
+		}
+		hostlist_destroy(hl_addr);
+		hostlist_destroy(hl_name);
+	}
+	return max_rc;
+#else
+	if (slurm_conf_frontend_array(&ptr_array) != 0)
+		fatal("FrontendName information configured!");
+	return SLURM_SUCCESS;
+#endif
+}
+
 /*
  * build_all_nodeline_info - get a array of slurm_conf_node_t structures
  *	from the slurm.conf reader, build table, and set values
@@ -543,7 +653,7 @@ extern void  build_config_feature_list(struct config_record *config_ptr)
 		tmp_str = xmalloc(i);
 		/* Remove white space from feature specification */
 		for (i=0, j=0; config_ptr->feature[i]; i++) {
-			if (!isspace(config_ptr->feature[i]))
+			if (!isspace((int)config_ptr->feature[i]))
 				tmp_str[j++] = config_ptr->feature[i];
 		}
 		if (i != j)
@@ -625,7 +735,7 @@ extern struct node_record *create_node_record (
 	node_ptr->threads = config_ptr->threads;
 	node_ptr->real_memory = config_ptr->real_memory;
 	node_ptr->tmp_disk = config_ptr->tmp_disk;
-	node_ptr->select_nodeinfo = select_g_select_nodeinfo_alloc(NO_VAL);
+	node_ptr->select_nodeinfo = select_g_select_nodeinfo_alloc();
 	xassert (node_ptr->magic = NODE_MAGIC)  /* set value */;
 	return node_ptr;
 }
@@ -704,9 +814,11 @@ extern int init_node_conf (void)
 	if (config_list)	/* delete defunct configuration entries */
 		(void) _delete_config_record ();
 	else {
-		config_list  = list_create (_list_delete_config);
-		feature_list = list_create (_list_delete_feature);
-		if ((config_list == NULL) || (feature_list == NULL))
+		config_list    = list_create (_list_delete_config);
+		feature_list   = list_create (_list_delete_feature);
+		front_end_list = list_create (destroy_frontend);
+		if ((config_list == NULL) || (feature_list == NULL) ||
+		    (front_end_list == NULL))
 			fatal("list_create malloc failure");
 	}
 
@@ -725,6 +837,8 @@ extern void node_fini2 (void)
 		config_list = NULL;
 		list_destroy(feature_list);
 		feature_list = NULL;
+		list_destroy(front_end_list);
+		front_end_list = NULL;
 	}
 
 	node_ptr = node_record_table_ptr;
@@ -802,6 +916,7 @@ extern void purge_node_rec (struct node_record *node_ptr)
 	if (node_ptr->gres_list)
 		list_destroy(node_ptr->gres_list);
 	xfree(node_ptr->name);
+	xfree(node_ptr->node_hostname);
 	xfree(node_ptr->os);
 	xfree(node_ptr->part_pptr);
 	xfree(node_ptr->reason);
@@ -838,7 +953,7 @@ extern void rehash_node (void)
 }
 
 /* Convert a node state string to it's equivalent enum value */
-extern int state_str2int(const char *state_str)
+extern int state_str2int(const char *state_str, char *node_name)
 {
 	int state_val = NO_VAL;
 	int i;
@@ -858,7 +973,7 @@ extern int state_str2int(const char *state_str)
 			state_val = NODE_STATE_IDLE | NODE_STATE_FAIL;
 	}
 	if (state_val == NO_VAL) {
-		error("invalid node state %s", state_str);
+		error("node %s has invalid state %s", node_name, state_str);
 		errno = EINVAL;
 	}
 	return state_val;
diff --git a/src/common/node_conf.h b/src/common/node_conf.h
index 0c572563b..8eb96d5d3 100644
--- a/src/common/node_conf.h
+++ b/src/common/node_conf.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -81,6 +81,8 @@ struct config_record {
 };
 extern List config_list;	/* list of config_record entries */
 
+extern List front_end_list;	/* list of slurm_conf_frontend_t entries */
+
 struct features_record {
 	uint32_t magic;		/* magic cookie to test data integrity */
 	char *name;		/* name of a feature */
@@ -91,6 +93,7 @@ extern List feature_list;	/* list of features_record entries */
 struct node_record {
 	uint32_t magic;			/* magic cookie for data integrity */
 	char *name;			/* name of the node. NULL==defunct */
+	char *node_hostname;		/* hostname of the node */
 	uint16_t node_state;		/* enum node_states, ORed with
 					 * NODE_STATE_NO_RESPOND if not
 					 * responding */
@@ -146,6 +149,7 @@ struct node_record {
 #ifdef HAVE_CRAY
 	uint32_t basil_node_id;		/* Cray-XT BASIL node ID,
 					 * no need to save/restore */
+	time_t down_time;		/* When first set to DOWN state */
 #endif	/* HAVE_CRAY */
 	dynamic_plugin_data_t *select_nodeinfo; /* opaque data structure,
 						 * use select_g_get_nodeinfo()
@@ -158,6 +162,18 @@ extern time_t last_node_update;		/* time of last node record update */
 
 
 
+/*
+ * bitmap2node_name_sortable - given a bitmap, build a list of comma
+ *	separated node names. names may include regular expressions
+ *	(e.g. "lx[01-10]")
+ * IN bitmap - bitmap pointer
+ * IN sort   - returned ordered list or not
+ * RET pointer to node list or NULL on error
+ * globals: node_record_table_ptr - pointer to node table
+ * NOTE: the caller must xfree the memory at node_list when no longer required
+ */
+char * bitmap2node_name_sortable (bitstr_t *bitmap, bool sort);
+
 /*
  * bitmap2node_name - given a bitmap, build a list of comma separated node
  *	names. names may include regular expressions (e.g. "lx[01-10]")
@@ -169,13 +185,21 @@ extern time_t last_node_update;		/* time of last node record update */
 char * bitmap2node_name (bitstr_t *bitmap);
 
 /*
- * _build_all_nodeline_info - get a array of slurm_conf_node_t structures
+ * build_all_nodeline_info - get a array of slurm_conf_node_t structures
  *	from the slurm.conf reader, build table, and set values
  * IN set_bitmap - if true, set node_bitmap in config record (used by slurmd)
  * RET 0 if no error, error code otherwise
  */
 extern int build_all_nodeline_info (bool set_bitmap);
 
+/*
+ * build_all_frontend_info - get a array of slurm_conf_frontend_t structures
+ *	from the slurm.conf reader, build table, and set values
+ * is_slurmd_context: set to true if run from slurmd
+ * RET 0 if no error, error code otherwise
+ */
+extern int build_all_frontend_info (bool is_slurmd_context);
+
 /* Given a config_record with it's bitmap already set, update feature_list */
 extern void  build_config_feature_list (struct config_record *config_ptr);
 
@@ -242,6 +266,6 @@ extern void purge_node_rec (struct node_record *node_ptr);
 extern void rehash_node (void);
 
 /* Convert a node state string to it's equivalent enum value */
-extern int state_str2int(const char *state_str);
+extern int state_str2int(const char *state_str, char *node_name);
 
 #endif /* !_HAVE_NODE_CONF_H */
diff --git a/src/common/node_select.c b/src/common/node_select.c
index 13e2f8d76..e999f8aae 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -16,7 +16,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,6 +58,8 @@
 #include "src/slurmctld/slurmctld.h"
 #include "src/common/node_select.h"
 
+strong_alias(destroy_select_ba_request,	slurm_destroy_select_ba_request);
+
 static int select_context_cnt = -1;
 static int select_context_default = -1;
 /* If there is a new select plugin, list it here */
@@ -73,21 +75,29 @@ static int _select_get_ops(char *select_type,
 {
 	/*
 	 * Must be synchronized with slurm_select_ops_t in node_select.h.
+	 * Also must be synchronized with the other_plugin.[c|h] in
+	 * the select/cray plugin.
 	 */
 	static const char *syms[] = {
 		"plugin_id",
 		"select_p_state_save",
 		"select_p_state_restore",
 		"select_p_job_init",
+		"select_p_node_ranking",
 		"select_p_node_init",
 		"select_p_block_init",
 		"select_p_job_test",
 		"select_p_job_begin",
 		"select_p_job_ready",
+		"select_p_job_expand_allow",
+		"select_p_job_expand",
 		"select_p_job_resized",
+		"select_p_job_signal",
 		"select_p_job_fini",
 		"select_p_job_suspend",
 		"select_p_job_resume",
+		"select_p_step_pick_nodes",
+		"select_p_step_finish",
 		"select_p_pack_select_info",
                 "select_p_select_nodeinfo_pack",
                 "select_p_select_nodeinfo_unpack",
@@ -112,6 +122,10 @@ static int _select_get_ops(char *select_type,
 		"select_p_update_node_state",
 		"select_p_alter_node_cnt",
 		"select_p_reconfigure",
+		"select_p_resv_test",
+		"select_p_ba_init",
+		"select_p_ba_fini",
+		"select_p_ba_get_dims",
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
@@ -196,6 +210,65 @@ static int _select_context_destroy( slurm_select_context_t *c )
 	return rc;
 }
 
+/**
+ * delete a block request
+ */
+extern void destroy_select_ba_request(void *arg)
+{
+	select_ba_request_t *ba_request = (select_ba_request_t *)arg;
+
+	if (ba_request) {
+		xfree(ba_request->save_name);
+		if (ba_request->elongate_geos)
+			list_destroy(ba_request->elongate_geos);
+
+		xfree(ba_request->blrtsimage);
+		xfree(ba_request->linuximage);
+		xfree(ba_request->mloaderimage);
+		xfree(ba_request->ramdiskimage);
+
+		xfree(ba_request);
+	}
+}
+
+/**
+ * print a block request
+ */
+extern void print_select_ba_request(select_ba_request_t* ba_request)
+{
+	int dim;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	uint16_t cluster_dims = slurmdb_setup_cluster_name_dims();
+
+	if (ba_request == NULL){
+		error("print_ba_request Error, request is NULL");
+		return;
+	}
+	debug("  ba_request:");
+	debug("    geometry:\t");
+	for (dim=0; dim<cluster_dims; dim++){
+		debug("%d", ba_request->geometry[dim]);
+	}
+	debug("        size:\t%d", ba_request->size);
+	if (cluster_flags & CLUSTER_FLAG_BGQ) {
+		for (dim=0; dim<cluster_dims; dim++)
+			debug("   conn_type:\t%d", ba_request->conn_type[dim]);
+	} else
+		debug("   conn_type:\t%d", ba_request->conn_type[0]);
+
+	debug("      rotate:\t%d", ba_request->rotate);
+	debug("    elongate:\t%d", ba_request->elongate);
+}
+
+extern int select_char2coord(char coord)
+{
+	if ((coord >= '0') && (coord <= '9'))
+		return (coord - '0');
+	if ((coord >= 'A') && (coord <= 'Z'))
+		return ((coord - 'A') + 10);
+	return -1;
+}
+
 /*
  * Initialize context for node selection plugin
  */
@@ -214,35 +287,50 @@ extern int slurm_select_init(bool only_default)
 		goto done;
 
 	select_type = slurm_get_select_type();
-	if(working_cluster_rec) {
+	if (working_cluster_rec) {
 		/* just ignore warnings here */
 	} else {
 #ifdef HAVE_XCPU
-		if(strcasecmp(select_type, "select/linear")) {
+		if (strcasecmp(select_type, "select/linear")) {
 			error("%s is incompatible with XCPU use", select_type);
 			fatal("Use SelectType=select/linear");
 		}
 #endif
-#ifdef HAVE_BG
-#  ifdef HAVE_BGQ
-		if(strcasecmp(select_type, "select/bgq")) {
-			error("%s is incompatible with BlueGene/Q",
-			      select_type);
-			fatal("Use SelectType=select/bgq");
+		if (!strcasecmp(select_type, "select/linear")) {
+			uint16_t cr_type = slurm_get_select_type_param();
+			if ((cr_type & CR_SOCKET) || (cr_type & CR_CORE) ||
+			    (cr_type & CR_CPU))
+				fatal("Invalid SelectTypeParameter "
+				      "for select/linear");
 		}
-#  else
-		if(strcasecmp(select_type, "select/bluegene")) {
+
+#ifdef HAVE_BG
+		if (strcasecmp(select_type, "select/bluegene")) {
 			error("%s is incompatible with BlueGene", select_type);
 			fatal("Use SelectType=select/bluegene");
 		}
-#  endif
+#else
+		if (!strcasecmp(select_type, "select/bluegene")) {
+			fatal("Requested SelectType=select/bluegene "
+			      "in slurm.conf, but not running on a BG[L|P|Q] "
+			      "system.  If looking to emulate a BG[L|P|Q] "
+			      "system use --enable-bgl-emulation or "
+			      "--enable-bgp-emulation respectively.");
+		}
 #endif
 
 #ifdef HAVE_CRAY
-		if(strcasecmp(select_type, "select/cray")) {
+		if (strcasecmp(select_type, "select/cray")) {
 			error("%s is incompatible with Cray", select_type);
 			fatal("Use SelectType=select/cray");
 		}
+#else
+		if (!strcasecmp(select_type, "select/cray")) {
+			fatal("Requested SelectType=select/cray "
+			      "in slurm.conf, but not running on a Cray "
+			      "system.  If looking to emulate a Cray "
+			      "system use --enable-cray-emulation.");
+		}
 #endif
 	}
 
@@ -286,11 +374,21 @@ extern int slurm_select_init(bool only_default)
 			if (strncmp(e->d_name, "select_", 7))
 				continue;
 
-			len = strlen(e->d_name)-3;
+			len = strlen(e->d_name);
+#if defined(__CYGWIN__)
+			len -= 4;
+#else
+			len -= 3;
+#endif
 			/* Check only shared object files */
-			if (strcmp(e->d_name+len, ".so"))
+			if (strcmp(e->d_name+len,
+#if defined(__CYGWIN__)
+				   ".dll"
+#else
+				   ".so"
+#endif
+				    ))
 				continue;
-
 			/* add one for the / */
 			len++;
 			xassert(len<sizeof(full_name));
@@ -300,10 +398,7 @@ extern int slurm_select_init(bool only_default)
 					    select_context[j].select_type))
 					break;
 			}
-			if (j < select_context_cnt) {
-				error("Duplicate plugin %s ignored",
-				      select_context[j].select_type);
-			} else {
+			if (j >= select_context_cnt) {
 				xrealloc(select_context,
 					 (sizeof(slurm_select_context_t) *
 					  (select_context_cnt + 1)));
@@ -382,6 +477,7 @@ fini:	slurm_mutex_unlock(&select_context_lock);
 	return rc;
 }
 
+/* Get this plugin's sequence number in SLURM's internal tables */
 extern int select_get_plugin_id_pos(uint32_t plugin_id)
 {
 	int i;
@@ -390,15 +486,16 @@ extern int select_get_plugin_id_pos(uint32_t plugin_id)
 		return SLURM_ERROR;
 
 	for (i=0; i<select_context_cnt; i++) {
-		if(*(select_context[i].ops.plugin_id) == plugin_id)
+		if (*(select_context[i].ops.plugin_id) == plugin_id)
 			break;
 	}
-	if(i >= select_context_cnt)
+	if (i >= select_context_cnt)
 		return SLURM_ERROR;
 	return i;
 }
 
-extern int select_get_plugin_id()
+/* Get the plugin ID number. Unique for each select plugin type */
+extern int select_get_plugin_id(void)
 {
 	if (slurm_select_init(0) < 0)
 		return 0;
@@ -446,6 +543,21 @@ extern int select_g_job_init(List job_list)
 		(job_list);
 }
 
+/*
+ * Assign a 'node_rank' value to each of the node_ptr entries.
+ * IN node_ptr - current node data
+ * IN node_count - number of node entries
+ * Return true if node ranking was performed, false if not.
+ */
+extern bool select_g_node_ranking(struct node_record *node_ptr, int node_cnt)
+{
+	if (slurm_select_init(0) < 0)
+		return SLURM_ERROR;
+
+	return (*(select_context[select_context_default].ops.node_ranking))
+		(node_ptr, node_cnt);
+}
+
 /*
  * Note re/initialization of node record data structure
  * IN node_ptr - current node data
@@ -539,6 +651,34 @@ extern int select_g_job_ready(struct job_record *job_ptr)
 		(job_ptr);
 }
 
+/*
+ * Test if job expansion is supported
+ */
+extern bool select_g_job_expand_allow(void)
+{
+	if (slurm_select_init(0) < 0)
+		return false;
+
+	return (*(select_context[select_context_default].ops.job_expand_allow))
+		();
+}
+
+/*
+ * Move the resource allocated to one job into that of another job.
+ *	All resources are removed from "from_job_ptr" and moved into
+ *	"to_job_ptr". Also see other_job_resized().
+ * RET: 0 or an error code
+ */
+extern int select_g_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr)
+{
+	if (slurm_select_init(0) < 0)
+		return -1;
+
+	return (*(select_context[select_context_default].ops.job_expand))
+		(from_job_ptr, to_job_ptr);
+}
+
 /*
  * Modify internal data structures for a job that has changed size
  *	Only support jobs shrinking now.
@@ -554,6 +694,21 @@ extern int select_g_job_resized(struct job_record *job_ptr,
 		(job_ptr, node_ptr);
 }
 
+/*
+ * Pass job-step signal to plugin before signalling any job steps, so that
+ * any signal-dependent actions can be taken.
+ * IN job_ptr - job to be signalled
+ * IN signal  - signal(7) number
+ */
+extern int select_g_job_signal(struct job_record *job_ptr, int signal)
+{
+	if (slurm_select_init(0) < 0)
+		return SLURM_ERROR;
+
+	return (*(select_context[select_context_default].ops.job_signal))
+		(job_ptr, signal);
+}
+
 /*
  * Note termination of job is starting. Executed from slurmctld.
  * IN job_ptr - pointer to job being terminated
@@ -570,29 +725,71 @@ extern int select_g_job_fini(struct job_record *job_ptr)
 /*
  * Suspend a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being suspended
+ * IN indf_susp - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int select_g_job_suspend(struct job_record *job_ptr)
+extern int select_g_job_suspend(struct job_record *job_ptr, bool indf_susp)
 {
 	if (slurm_select_init(0) < 0)
 		return SLURM_ERROR;
 
 	return (*(select_context[select_context_default].ops.job_suspend))
-		(job_ptr);
+		(job_ptr, indf_susp);
 }
 
 /*
  * Resume a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being resumed
+ * IN indf_susp - set if job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int select_g_job_resume(struct job_record *job_ptr)
+extern int select_g_job_resume(struct job_record *job_ptr, bool indf_susp)
 {
 	if (slurm_select_init(0) < 0)
 		return SLURM_ERROR;
 
 	return (*(select_context[select_context_default].ops.job_resume))
-		(job_ptr);
+		(job_ptr, indf_susp);
+}
+
+/*
+ * Select the "best" nodes for given job step from those available in
+ * a job allocation.
+ *
+ * IN/OUT job_ptr - pointer to job already allocated and running in a
+ *                  block where the step is to run.
+ *                  set's start_time when job expected to start
+ * OUT step_jobinfo - Fill in the resources to be used if not
+ *                    full size of job.
+ * IN node_count  - How many nodes we are looking for.
+ * RET map of slurm nodes to be used for step, NULL on failure
+ */
+extern bitstr_t *select_g_step_pick_nodes(struct job_record *job_ptr,
+					  dynamic_plugin_data_t *step_jobinfo,
+					  uint32_t node_count)
+{
+	if (slurm_select_init(0) < 0)
+		return NULL;
+
+	xassert(step_jobinfo);
+
+	return (*(select_context[select_context_default].ops.step_pick_nodes))
+		(job_ptr, step_jobinfo->data, node_count);
+}
+
+/*
+ * clear what happened in select_g_step_pick_nodes
+ * IN/OUT step_ptr - Flush the resources from the job and step.
+ */
+extern int select_g_step_finish(struct step_record *step_ptr)
+{
+	if (slurm_select_init(0) < 0)
+		return SLURM_ERROR;
+
+	return (*(select_context[select_context_default].ops.step_finish))
+		(step_ptr);
 }
 
 extern int select_g_pack_select_info(time_t last_query_time,
@@ -657,16 +854,21 @@ extern int select_g_select_nodeinfo_unpack(dynamic_plugin_data_t **nodeinfo,
 		}
 	} else
 		nodeinfo_ptr->plugin_id = select_context_default;
+	if ((*(select_context[nodeinfo_ptr->plugin_id].ops.nodeinfo_unpack))
+	   ((select_nodeinfo_t **)&nodeinfo_ptr->data, buffer,
+	    protocol_version) != SLURM_SUCCESS)
+		goto unpack_error;
+
+	return SLURM_SUCCESS;
 
-	return (*(select_context[nodeinfo_ptr->plugin_id].ops.nodeinfo_unpack))
-		((select_nodeinfo_t **)&nodeinfo_ptr->data, buffer,
-		 protocol_version);
 unpack_error:
+	select_g_select_nodeinfo_free(nodeinfo_ptr);
+	*nodeinfo = NULL;
 	error("select_g_select_nodeinfo_unpack: unpack error");
 	return SLURM_ERROR;
 }
 
-extern dynamic_plugin_data_t *select_g_select_nodeinfo_alloc(uint32_t size)
+extern dynamic_plugin_data_t *select_g_select_nodeinfo_alloc(void)
 {
 	dynamic_plugin_data_t *nodeinfo_ptr = NULL;
 	uint32_t plugin_id;
@@ -680,7 +882,7 @@ extern dynamic_plugin_data_t *select_g_select_nodeinfo_alloc(uint32_t size)
 	nodeinfo_ptr = xmalloc(sizeof(dynamic_plugin_data_t));
 	nodeinfo_ptr->plugin_id = plugin_id;
 	nodeinfo_ptr->data = (*(select_context[plugin_id].ops.
-				nodeinfo_alloc))(size);
+				nodeinfo_alloc))();
 	return nodeinfo_ptr;
 }
 
@@ -766,10 +968,11 @@ extern int select_g_select_jobinfo_free(dynamic_plugin_data_t *jobinfo)
 
 	if (slurm_select_init(0) < 0)
 		return SLURM_ERROR;
-	if(jobinfo) {
-		if(jobinfo->data)
+	if (jobinfo) {
+		if (jobinfo->data) {
 			rc = (*(select_context[jobinfo->plugin_id].ops.
 				jobinfo_free))(jobinfo->data);
+		}
 		xfree(jobinfo);
 	}
 	return rc;
@@ -905,9 +1108,13 @@ extern int select_g_select_jobinfo_unpack(dynamic_plugin_data_t **jobinfo,
 	} else
 		jobinfo_ptr->plugin_id = select_context_default;
 
-	return (*(select_context[jobinfo_ptr->plugin_id].ops.jobinfo_unpack))
-		((select_jobinfo_t **)&jobinfo_ptr->data, buffer, 
-		 protocol_version);
+	if ((*(select_context[jobinfo_ptr->plugin_id].ops.jobinfo_unpack))
+		((select_jobinfo_t **)&jobinfo_ptr->data, buffer,
+		 protocol_version) != SLURM_SUCCESS)
+		goto unpack_error;
+
+	return SLURM_SUCCESS;
+
 unpack_error:
 	select_g_select_jobinfo_free(jobinfo_ptr);
 	*jobinfo = NULL;
@@ -1029,13 +1236,13 @@ extern int select_g_update_node_config (int index)
  * IN state  - state to update to
  * RETURN SLURM_SUCCESS on success || SLURM_ERROR else wise
  */
-extern int select_g_update_node_state (int index, uint16_t state)
+extern int select_g_update_node_state (struct node_record *node_ptr)
 {
 	if (slurm_select_init(0) < 0)
 		return SLURM_ERROR;
 
 	return (*(select_context[select_context_default].ops.update_node_state))
-		(index, state);
+		(node_ptr);
 }
 
 /*
@@ -1067,4 +1274,72 @@ extern int select_g_reconfigure (void)
 	return (*(select_context[select_context_default].ops.reconfigure))();
 }
 
+/*
+ * select_g_resv_test - Identify the nodes which "best" satisfy a reservation
+ *	request. "best" is defined as either single set of consecutive nodes
+ *	satisfying the request and leaving the minimum number of unused nodes
+ *	OR the fewest number of consecutive node sets
+ * IN avail_bitmap - nodes available for the reservation
+ * IN node_cnt - count of required nodes
+ * RET - nodes selected for use by the reservation
+ */
+extern bitstr_t * select_g_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+#if 0
+	/* Wait for Danny to checkin select/bgq logic before using new plugin
+	 * function calls. The select_p_resv_test() function is currently only
+	 * available in select/linear and select/cons_res */
+	if (slurm_select_init(0) < 0)
+		return NULL;
+
+	return (*(select_context[select_context_default].ops.resv_test))
+		(avail_bitmap, node_cnt);
+#else
+	return bit_pick_cnt(avail_bitmap, node_cnt);
+#endif
+}
+
+extern void select_g_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	uint32_t plugin_id;
+
+	if (slurm_select_init(0) < 0)
+		return;
+
+	if (working_cluster_rec)
+		plugin_id = working_cluster_rec->plugin_id_select;
+	else
+		plugin_id = select_context_default;
+
+	(*(select_context[plugin_id].ops.ba_init))(node_info_ptr, sanity_check);
+}
+
+extern void select_g_ba_fini(void)
+{
+	uint32_t plugin_id;
 
+	if (slurm_select_init(0) < 0)
+		return;
+
+	if (working_cluster_rec)
+		plugin_id = working_cluster_rec->plugin_id_select;
+	else
+		plugin_id = select_context_default;
+
+	(*(select_context[plugin_id].ops.ba_fini))();
+}
+
+extern int *select_g_ba_get_dims(void)
+{
+	uint32_t plugin_id;
+
+	if (slurm_select_init(0) < 0)
+		return NULL;
+
+	if (working_cluster_rec)
+		plugin_id = working_cluster_rec->plugin_id_select;
+	else
+		plugin_id = select_context_default;
+
+	return (*(select_context[plugin_id].ops.ba_get_dims))();
+}
diff --git a/src/common/node_select.h b/src/common/node_select.h
index 5cd33ab23..e55b58b27 100644
--- a/src/common/node_select.h
+++ b/src/common/node_select.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,12 +40,20 @@
 #ifndef _NODE_SELECT_H
 #define _NODE_SELECT_H
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/list.h"
 #include "src/common/plugrack.h"
 #include "src/slurmctld/slurmctld.h"
 
+/* NO_JOB_RUNNING is used by select/blugene, select/bgq, smap and sview */
+#define NO_JOB_RUNNING -1
+#define NOT_FROM_CONTROLLER -2
+
 typedef struct {
 	bitstr_t *avail_nodes;      /* usable nodes are set on input, nodes
 				     * not required to satisfy the request
@@ -61,6 +69,56 @@ typedef struct {
 	uint32_t req_nodes;         /* requested (or desired) count of nodes */
 } select_will_run_t;
 
+/*
+ * structure that holds the configuration settings for each request
+ */
+typedef struct {
+	bitstr_t *avail_mp_bitmap;   /* pointer to available mps */
+	char *blrtsimage;            /* BlrtsImage for this block */
+	uint16_t conn_type[HIGHEST_DIMENSIONS]; /* mesh, torus, or small */
+	bool elongate;                 /* whether allow elongation or not */
+	int elongate_count;            /* place in elongate_geos list
+					  we are at */
+	List elongate_geos;            /* used on L and P systems for
+					  geos */
+	void *geo_table;               /* pointer to a list of
+					* pointer to different geos */
+	uint16_t geometry[HIGHEST_DIMENSIONS]; /* size of block in geometry */
+	char *linuximage;              /* LinuxImage for this block */
+	char *mloaderimage;            /* mloaderImage for this block */
+	uint16_t deny_pass;            /* PASSTHROUGH_FOUND is set if there are
+					  passthroughs in the block
+					  created you can deny
+					  passthroughs by setting the
+					  appropriate bits */
+	int procs;                     /* Number of Real processors in
+					  block */
+	char *ramdiskimage;            /* RamDiskImage for this block */
+	bool rotate;                   /* whether allow elongation or not */
+	int rotate_count;              /* number of times rotated */
+	char *save_name;               /* name of midplanes in block */
+	int size;                      /* count of midplanes in block */
+	uint16_t small16;              /* number of blocks using 16 cnodes in
+					* block, only used for small
+					* block creation */
+	uint16_t small32;              /* number of blocks using 32 cnodes in
+					* block, only used for small
+					* block creation */
+	uint16_t small64;              /* number of blocks using 64 cnodes in
+					* block, only used for small
+					* block creation */
+	uint16_t small128;             /* number of blocks using 128 cnodes in
+					* block, only used for small
+					* block creation */
+	uint16_t small256;             /* number of blocks using 256 cnodes in
+					* block, only used for small
+					* block creation */
+	uint16_t start[HIGHEST_DIMENSIONS]; /* where to start creation of
+					    block */
+	int start_req;                 /* state there was a start
+					  request */
+} select_ba_request_t;
+
 /*
  * Local data
  */
@@ -69,6 +127,8 @@ typedef struct slurm_select_ops {
 	int		(*state_save)		(char *dir_name);
 	int		(*state_restore)	(char *dir_name);
 	int		(*job_init)		(List job_list);
+	int		(*node_ranking)		(struct node_record *node_ptr,
+						 int node_cnt);
 	int		(*node_init)		(struct node_record *node_ptr,
 						 int node_cnt);
 	int		(*block_init)		(List block_list);
@@ -82,11 +142,22 @@ typedef struct slurm_select_ops {
 						 List *preemptee_job_list);
 	int		(*job_begin)		(struct job_record *job_ptr);
 	int		(*job_ready)		(struct job_record *job_ptr);
+	bool		(*job_expand_allow)	(void);
+	int		(*job_expand)		(struct job_record *from_job_ptr,
+						 struct job_record *to_job_ptr);
 	int		(*job_resized)		(struct job_record *job_ptr,
 						 struct node_record *node_ptr);
+	int		(*job_signal)		(struct job_record *job_ptr,
+						 int signal);
 	int		(*job_fini)		(struct job_record *job_ptr);
-	int		(*job_suspend)		(struct job_record *job_ptr);
-	int		(*job_resume)		(struct job_record *job_ptr);
+	int		(*job_suspend)		(struct job_record *job_ptr,
+						 bool indf_susp);
+	int		(*job_resume)		(struct job_record *job_ptr,
+						 bool indf_susp);
+	bitstr_t *      (*step_pick_nodes)      (struct job_record *job_ptr,
+						 select_jobinfo_t *step_jobinfo,
+						 uint32_t node_count);
+	int             (*step_finish)          (struct step_record *step_ptr);
 	int		(*pack_select_info)	(time_t last_query_time,
 						 uint16_t show_flags,
 						 Buf *buffer_ptr,
@@ -97,7 +168,7 @@ typedef struct slurm_select_ops {
 	int		(*nodeinfo_unpack)	(select_nodeinfo_t **nodeinfo,
 						 Buf buffer,
 						 uint16_t protocol_version);
-	select_nodeinfo_t *(*nodeinfo_alloc)	(uint32_t size);
+	select_nodeinfo_t *(*nodeinfo_alloc)	(void);
 	int		(*nodeinfo_free)	(select_nodeinfo_t *nodeinfo);
 	int		(*nodeinfo_set_all)	(time_t last_query_time);
 	int		(*nodeinfo_set)		(struct job_record *job_ptr);
@@ -137,10 +208,17 @@ typedef struct slurm_select_ops {
 						 struct job_record *job_ptr,
 						 void *data);
 	int		(*update_node_config)	(int index);
-	int		(*update_node_state)	(int index, uint16_t state);
+	int		(*update_node_state)	(struct node_record *node_ptr);
 	int		(*alter_node_cnt)	(enum select_node_cnt type,
 						 void *data);
 	int		(*reconfigure)		(void);
+	bitstr_t *      (*resv_test)            (bitstr_t *avail_bitmap,
+						 uint32_t node_cnt);
+	void            (*ba_init)              (node_info_msg_t *node_info_ptr,
+						 bool sanity_check);
+	void            (*ba_fini)              (void);
+	int *           (*ba_get_dims)          (void);
+
 } slurm_select_ops_t;
 
 typedef struct slurm_select_context {
@@ -151,17 +229,14 @@ typedef struct slurm_select_context {
 	slurm_select_ops_t ops;
 } slurm_select_context_t;
 
+/* Convert a node coordinate character into its equivalent number:
+ * '0' = 0; '9' = 9; 'A' = 10; etc. */
+extern int select_char2coord(char coord);
+
 /*******************************************\
  * GLOBAL SELECT STATE MANAGEMENT FUNCIONS *
 \*******************************************/
 
-extern int node_select_free_block_info(block_info_t *block_info);
-
-extern void node_select_pack_block_info(block_info_t *block_info, Buf buffer,
-					uint16_t protocol_version);
-extern int node_select_unpack_block_info(block_info_t **block_info, Buf buffer,
-					 uint16_t protocol_version);
-
 /*
  * Initialize context for node selection plugin
  */
@@ -172,8 +247,11 @@ extern int slurm_select_init(bool only_default);
  */
 extern int slurm_select_fini(void);
 
+/* Get this plugin's sequence number in SLURM's internal tables */
 extern int select_get_plugin_id_pos(uint32_t plugin_id);
-extern int select_get_plugin_id();
+
+/* Get the plugin ID number. Unique for each select plugin type */
+extern int select_get_plugin_id(void);
 
 /*
  * Save any global state information
@@ -188,11 +266,9 @@ extern int select_g_state_save(char *dir_name);
  */
 extern int select_g_state_restore(char *dir_name);
 
-/*
- * Note the initialization of job records, issued upon restart of
- * slurmctld and used to synchronize any job state.
- */
-extern int select_g_job_init(List job_list);
+/*********************************\
+ * STATE INITIALIZATION FUNCIONS *
+\*********************************/
 
 /*
  * Note re/initialization of node record data structure
@@ -208,28 +284,74 @@ extern int select_g_node_init(struct node_record *node_ptr, int node_cnt);
 extern int select_g_block_init(List part_list);
 
 /*
- * Update specific block (usually something has gone wrong)
- * IN block_desc_ptr - information about the block
+ * Note the initialization of job records, issued upon restart of
+ * slurmctld and used to synchronize any job state.
+ * IN job_list - List of SLURM jobs from slurmctld
  */
-extern int select_g_update_block (update_block_msg_t *block_desc_ptr);
+extern int select_g_job_init(List job_list);
+
+/* Note reconfiguration or change in partition configuration */
+extern int select_g_reconfigure(void);
+
+/**************************\
+ * NODE SPECIFIC FUNCIONS *
+\**************************/
 
 /*
- * Update specific sub nodes (usually something has gone wrong)
- * IN block_desc_ptr - information about the block
+ * Allocate a select plugin node record.
+ *
+ * NOTE: Call select_g_select_nodeinfo_free() to release the memory in the
+ * returned value
  */
-extern int select_g_update_sub_node (update_block_msg_t *block_desc_ptr);
+extern dynamic_plugin_data_t *select_g_select_nodeinfo_alloc(void);
 
 /*
- * Get select data from a plugin
- * IN node_pts  - current node record
- * IN dinfo   - type of data to get from the node record
- *                (see enum select_plugindata_info)
- * IN job_ptr   - pointer to the job that's related to this query (may be NULL)
- * IN/OUT data  - the data to get from node record
+ * Pack a select plugin node record into a buffer.
+ * IN nodeinfo - The node record to pack
+ * IN/OUT buffer - The buffer to pack the record into
+ * IN protocol_version - Version used for packing the record
  */
-extern int select_g_get_info_from_plugin (enum select_plugindata_info dinfo,
-					  struct job_record *job_ptr,
-					  void *data);
+extern int select_g_select_nodeinfo_pack(dynamic_plugin_data_t *nodeinfo,
+					 Buf buffer,
+					 uint16_t protocol_version);
+
+/*
+ * Unpack a select plugin node record from a buffer.
+ * OUT nodeinfo - The unpacked node record
+ * IN/OUT buffer - The buffer to unpack the record from
+ * IN protocol_version - Version used for unpacking the record
+ *
+ * NOTE: Call select_g_select_nodeinfo_free() to release the memory in the
+ * returned value
+ */
+extern int select_g_select_nodeinfo_unpack(dynamic_plugin_data_t **nodeinfo,
+					   Buf buffer,
+					   uint16_t protocol_version);
+
+/* Free the memory allocated for a select plugin node record */
+extern int select_g_select_nodeinfo_free(dynamic_plugin_data_t *nodeinfo);
+
+/* Reset select plugin specific information about a job
+ * IN job_ptr - The updated job */
+extern int select_g_select_nodeinfo_set(struct job_record *job_ptr);
+
+/* Update slect plugin information about every node as needed (if changed since
+ * previous query)
+ * IN query_time - Time of previous update */
+extern int select_g_select_nodeinfo_set_all(time_t last_query_time);
+
+/*
+ * Get information from a slect plugin node record
+ * IN nodeinfo - The record to get information from
+ * IN dinfo - The data type to be retrieved
+ * IN state - Node state filter to be applied (ie. only get information about
+ *            ALLOCATED nodes
+ * OUT data - The retrieved data
+ */
+extern int select_g_select_nodeinfo_get(dynamic_plugin_data_t *nodeinfo,
+					enum select_nodedata_type dinfo,
+					enum node_states state,
+					void *data);
 
 /*
  * Updated a node configuration. This happens when a node registers with
@@ -239,24 +361,48 @@ extern int select_g_get_info_from_plugin (enum select_plugindata_info dinfo,
  */
 extern int select_g_update_node_config (int index);
 
+/*
+ * Assign a 'node_rank' value to each of the node_ptr entries.
+ * IN node_ptr - current node data
+ * IN node_count - number of node entries
+ * Return true if node ranking was performed, false if not.
+ */
+extern bool select_g_node_ranking(struct node_record *node_ptr, int node_cnt);
+
 /*
  * Updated a node state in the plugin, this should happen when a node is
  * drained or put into a down state then changed back.
- * IN index  - index into the node record list
- * IN state  - state to update to
+ * IN node_ptr - Pointer to the node that has been updated
  * RETURN SLURM_SUCCESS on success || SLURM_ERROR else wise
  */
-extern int select_g_update_node_state (int index, uint16_t state);
+extern int select_g_update_node_state (struct node_record *node_ptr);
 
 /*
- * Alter the node count for a job given the type of system we are on
- * IN/OUT job_desc  - current job desc
+ * Alter the node count based upon system architecture (i.e. on Bluegene
+ * systems, one node/midplane is equivalent to 512 compute nodes
+ * IN type - an enum describing how to transform the count
+ * IN/OUT data - The data to be modified
  */
 extern int select_g_alter_node_cnt (enum select_node_cnt type, void *data);
 
+/***************************\
+ * BLOCK SPECIFIC FUNCIONS *
+\***************************/
+
+/*
+ * Update specific sub nodes (usually something has gone wrong)
+ * IN block_desc_ptr - information about the block
+ */
+extern int select_g_update_sub_node (update_block_msg_t *block_desc_ptr);
+
+/*
+ * Update specific block (usually something has gone wrong)
+ * IN block_desc_ptr - information about the block
+ */
+extern int select_g_update_block (update_block_msg_t *block_desc_ptr);
 
 /******************************************************\
- * JOB-SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS *
+ * JOB SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS *
 \******************************************************/
 
 #define SELECT_MODE_BASE         0x00ff
@@ -295,6 +441,84 @@ extern int select_g_alter_node_cnt (enum select_node_cnt type, void *data);
 	((SELECT_IS_MODE_TEST_ONLY(_X) || SELECT_IS_MODE_WILL_RUN(_X))	\
 	 && SELECT_IS_PREEMPT_SET(_X))
 
+/* allocate storage for a select job credential
+ * RET jobinfo - storage for a select job credential
+ * NOTE: storage must be freed using select_g_free_jobinfo
+ */
+extern dynamic_plugin_data_t *select_g_select_jobinfo_alloc(void);
+
+/* copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_g_select_jobinfo_free
+ */
+extern dynamic_plugin_data_t *select_g_select_jobinfo_copy(
+	dynamic_plugin_data_t *jobinfo);
+
+/* free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ * RET         - slurm error code
+ */
+extern int select_g_select_jobinfo_free(dynamic_plugin_data_t *jobinfo);
+
+/* pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * IN protocol_version - slurm protocol version of client
+ * RET         - slurm error code
+ */
+extern int select_g_select_jobinfo_pack(dynamic_plugin_data_t *jobinfo,
+					Buf buffer,
+					uint16_t protocol_version);
+
+/* unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * IN protocol_version - slurm protocol version of client
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_g_select_jobinfo_free
+ */
+extern int select_g_select_jobinfo_unpack(dynamic_plugin_data_t **jobinfo,
+					  Buf buffer,
+					  uint16_t protocol_version);
+
+/* fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
+extern int select_g_select_jobinfo_set(dynamic_plugin_data_t *jobinfo,
+				       enum select_jobdata_type data_type,
+				       void *data);
+
+/* get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * OUT data - the data to get from job credential, caller must xfree
+ *	data for data_type == SELECT_JOBDATA_PART_ID
+ */
+extern int select_g_select_jobinfo_get(dynamic_plugin_data_t *jobinfo,
+				       enum select_jobdata_type data_type,
+				       void *data);
+
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job info contents
+ * IN size    - byte size of buf
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_select_jobinfo_sprint(dynamic_plugin_data_t *jobinfo,
+					    char *buf, size_t size, int mode);
+
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job info contents
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_select_jobinfo_xstrdup(dynamic_plugin_data_t *jobinfo,
+					     int mode);
 
 /*
  * Select the "best" nodes for given job from those available
@@ -335,141 +559,118 @@ extern int select_g_job_begin(struct job_record *job_ptr);
  */
 extern int select_g_job_ready(struct job_record *job_ptr);
 
-/*
- * Modify internal data structures for a job that has changed size
- *	Only support jobs shrinking now.
- * RET: 0 or an error code
- */
-extern int select_g_job_resized(struct job_record *job_ptr,
-				struct node_record *node_ptr);
-
 /*
  * Note termination of job is starting. Executed from slurmctld.
  * IN job_ptr - pointer to job being terminated
  */
 extern int select_g_job_fini(struct job_record *job_ptr);
 
+/*
+ * Pass job-step signal to plugin before signalling any job steps, so that
+ * any signal-dependent actions can be taken.
+ * IN job_ptr - job to be signalled
+ * IN signal  - signal(7) number
+ */
+extern int select_g_job_signal(struct job_record *job_ptr, int signal);
+
 /*
  * Suspend a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being suspended
+ * IN indf_susp - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int select_g_job_suspend(struct job_record *job_ptr);
+extern int select_g_job_suspend(struct job_record *job_ptr, bool indf_susp);
 
 /*
  * Resume a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being resumed
+ * IN indf_susp - set if job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int select_g_job_resume(struct job_record *job_ptr);
-
-/* allocate storage for a select job credential
- * RET jobinfo - storage for a select job credential
- * NOTE: storage must be freed using select_g_free_jobinfo
- */
-extern dynamic_plugin_data_t *select_g_select_jobinfo_alloc(void);
+extern int select_g_job_resume(struct job_record *job_ptr, bool indf_susp);
 
-/* free storage previously allocated for a select job credential
- * IN jobinfo  - the select job credential to be freed
- * RET         - slurm error code
+/*
+ * Test if job expansion is supported
  */
-extern int select_g_select_jobinfo_free(dynamic_plugin_data_t *jobinfo);
+extern bool select_g_job_expand_allow(void);
 
-/* fill in a previously allocated select job credential
- * IN/OUT jobinfo  - updated select job credential
- * IN data_type - type of data to enter into job credential
- * IN data - the data to enter into job credential
+/*
+ * Move the resource allocated to one job into that of another job.
+ *	All resources are removed from "from_job_ptr" and moved into
+ *	"to_job_ptr". Also see other_job_resized().
+ * RET: 0 or an error code
  */
-extern int select_g_select_jobinfo_set(dynamic_plugin_data_t *jobinfo,
-				       enum select_jobdata_type data_type,
-				       void *data);
+extern int select_g_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr);
 
-/* get data from a select job credential
- * IN jobinfo  - updated select job credential
- * IN data_type - type of data to enter into job credential
- * OUT data - the data to get from job credential, caller must xfree
- *	data for data_type == SELECT_JOBDATA_PART_ID
+/*
+ * Modify internal data structures for a job that has changed size
+ *	Only support jobs shrinking now.
+ * RET: 0 or an error code
  */
-extern int select_g_select_jobinfo_get(dynamic_plugin_data_t *jobinfo,
-				       enum select_jobdata_type data_type,
-				       void *data);
+extern int select_g_job_resized(struct job_record *job_ptr,
+				struct node_record *node_ptr);
 
-/* copy a select job credential
- * IN jobinfo - the select job credential to be copied
- * RET        - the copy or NULL on failure
- * NOTE: returned value must be freed using select_g_select_jobinfo_free
- */
-extern dynamic_plugin_data_t *select_g_select_jobinfo_copy(
-	dynamic_plugin_data_t *jobinfo);
+/*******************************************************\
+ * STEP SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS *
+\*******************************************************/
 
-/* pack a select job credential into a buffer in machine independent form
- * IN jobinfo  - the select job credential to be saved
- * OUT buffer  - buffer with select credential appended
- * IN protocol_version - slurm protocol version of client
- * RET         - slurm error code
+/*
+ * Select the "best" nodes for given job from those available
+ * IN/OUT job_ptr - pointer to job already allocated and running in a
+ *                  block where the step is to run.
+ *                  set's start_time when job expected to start
+ * OUT step_jobinfo - Fill in the resources to be used if not
+ *                    full size of job.
+ * IN node_count  - How many nodes we are looking for.
+ * RET map of slurm nodes to be used for step, NULL if resources not selected
+*
+ * NOTE: Most select plugins return NULL and use common code slurmctld to
+ * select resources for a job step. Only on IBM Bluegene systems does the
+ * select plugin need to select resources and take system topology into
+ * consideration.
  */
-extern int select_g_select_jobinfo_pack(dynamic_plugin_data_t *jobinfo,
-					Buf buffer,
-					uint16_t protocol_version);
-
-/* unpack a select job credential from a buffer
- * OUT jobinfo - the select job credential read
- * IN  buffer  - buffer with select credential read from current pointer loc
- * IN protocol_version - slurm protocol version of client
- * RET         - slurm error code
- * NOTE: returned value must be freed using select_g_select_jobinfo_free
+extern bitstr_t * select_g_step_pick_nodes(struct job_record *job_ptr,
+					   dynamic_plugin_data_t *step_jobinfo,
+					   uint32_t node_count);
+/*
+ * clear what happened in select_g_step_pick_nodes
+ * IN/OUT step_ptr - Flush the resources from the job and step.
  */
-extern int select_g_select_jobinfo_unpack(dynamic_plugin_data_t **jobinfo,
-					  Buf buffer,
-					  uint16_t protocol_version);
+extern int select_g_step_finish(struct step_record *step_ptr);
 
-/* write select job info to a string
- * IN jobinfo - a select job credential
- * OUT buf    - location to write job info contents
- * IN size    - byte size of buf
- * IN mode    - print mode, see enum select_print_mode
- * RET        - the string, same as buf
- */
-extern char *select_g_select_jobinfo_sprint(dynamic_plugin_data_t *jobinfo,
-					    char *buf, size_t size, int mode);
+/*********************************\
+ * ADVANCE RESERVATION FUNCTIONS *
+\*********************************/
 
-/* write select job info to a string
- * IN jobinfo - a select job credential
- * OUT buf    - location to write job info contents
- * IN mode    - print mode, see enum select_print_mode
- * RET        - the string, same as buf
+/*
+ * select_g_resv_test - Identify the nodes which "best" satisfy a reservation
+ *	request. "best" is defined as either single set of consecutive nodes
+ *	satisfying the request and leaving the minimum number of unused nodes
+ *	OR the fewest number of consecutive node sets
+ * IN avail_bitmap - nodes available for the reservation
+ * IN node_cnt - count of required nodes
+ * RET - nodes selected for use by the reservation
  */
-extern char *select_g_select_jobinfo_xstrdup(dynamic_plugin_data_t *jobinfo,
-					     int mode);
+extern bitstr_t * select_g_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt);
 
-/*******************************************************\
- * NODE-SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS *
-\*******************************************************/
-
-extern int select_g_select_nodeinfo_pack(
-	dynamic_plugin_data_t *nodeinfo, Buf buffer, uint16_t protocol_version);
-
-extern int select_g_select_nodeinfo_unpack(
-	dynamic_plugin_data_t **nodeinfo, Buf buffer,
-	uint16_t protocol_version);
-
-extern dynamic_plugin_data_t *select_g_select_nodeinfo_alloc(uint32_t size);
-
-extern int select_g_select_nodeinfo_free(dynamic_plugin_data_t *nodeinfo);
-
-extern int select_g_select_nodeinfo_set_all(time_t last_query_time);
-
-extern int select_g_select_nodeinfo_set(struct job_record *job_ptr);
+/*****************************\
+ * GET INFORMATION FUNCTIONS *
+\*****************************/
 
-extern int select_g_select_nodeinfo_get(dynamic_plugin_data_t *nodeinfo,
-					enum select_nodedata_type dinfo,
-					enum node_states state,
-					void *data);
-
-
-/******************************************************\
- * NODE-SELECT PLUGIN SPECIFIC INFORMATION FUNCTIONS  *
-\******************************************************/
+/*
+ * Get select data from a plugin
+ * IN node_pts  - current node record
+ * IN dinfo     - type of data to get from the node record
+ *                (see enum select_plugindata_info)
+ * IN job_ptr   - pointer to the job that's related to this query (may be NULL)
+ * IN/OUT data  - the data to get from node record
+ */
+extern int select_g_get_info_from_plugin (enum select_plugindata_info dinfo,
+					  struct job_record *job_ptr,
+					  void *data);
 
 /* pack node-select plugin specific information into a buffer in
  *	machine independent form
@@ -478,12 +679,31 @@ extern int select_g_select_nodeinfo_get(dynamic_plugin_data_t *nodeinfo,
  * OUT buffer - location to hold the data, consumer must free
  * IN protocol_version - slurm protocol version of client
  * RET - slurm error code
+ *
+ * NOTE: The unpack for this is in common/slurm_protocol_pack.c
  */
 extern int select_g_pack_select_info(time_t last_query_time,
 				     uint16_t show_flags, Buf *buffer,
 				     uint16_t protocol_version);
 
-/* Note reconfiguration or change in partition configuration */
-extern int select_g_reconfigure(void);
+/* Free ba_request value's memory which was allocted by
+ * select_g_pack_select_info() */
+extern void destroy_select_ba_request(void *arg);
+
+/* Log's the ba_request value generated by select_g_pack_select_info() */
+extern void print_select_ba_request(select_ba_request_t* ba_request);
+
+/* Get the number of elements in each dimension of a system
+ * RET - An array of element counts, one element per dimension */
+extern int *select_g_ba_get_dims(void);
+
+/* Construct an internal block allocation table
+ * IN node_info_ptr - Node state information read from slurmctld daemon
+ * IN sanity_check - If set, then verify each node's suffix contains values
+ *                   within the system dimension limits */
+extern void select_g_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check);
+
+/* Free storage allocated by select_g_ba_init() */
+extern void select_g_ba_fini(void);
 
 #endif /*__SELECT_PLUGIN_API_H__*/
diff --git a/src/common/optz.c b/src/common/optz.c
index 79106f0ee..99ddb5654 100644
--- a/src/common/optz.c
+++ b/src/common/optz.c
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,8 @@
 #endif
 
 #include <string.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/optz.h"
 #include "src/common/xmalloc.h"
diff --git a/src/common/optz.h b/src/common/optz.h
index 37d14278c..1b5379502 100644
--- a/src/common/optz.h
+++ b/src/common/optz.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/pack.c b/src/common/pack.c
index b50ad75a7..fbeda0937 100644
--- a/src/common/pack.c
+++ b/src/common/pack.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,7 +49,8 @@
 #include <string.h>
 #include <time.h>
 #include <inttypes.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/pack.h"
 #include "src/common/macros.h"
@@ -73,6 +74,8 @@ strong_alias(init_buf,		slurm_init_buf);
 strong_alias(xfer_buf_data,	slurm_xfer_buf_data);
 strong_alias(pack_time,		slurm_pack_time);
 strong_alias(unpack_time,	slurm_unpack_time);
+strong_alias(packdouble,	slurm_packdouble);
+strong_alias(unpackdouble,	slurm_unpackdouble);
 strong_alias(pack64,		slurm_pack64);
 strong_alias(unpack64,		slurm_unpack64);
 strong_alias(pack32,		slurm_pack32);
diff --git a/src/common/pack.h b/src/common/pack.h
index 9636b2d1f..f86338249 100644
--- a/src/common/pack.h
+++ b/src/common/pack.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -127,7 +127,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack_time(valp,buf) do {			\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(time_t));	\
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpack_time(valp,buf))			\
@@ -141,7 +140,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackdouble(valp,buf) do {		\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(double));        \
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpackdouble(valp,buf))			\
@@ -155,7 +153,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack64(valp,buf) do {			\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(uint64_t));      \
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpack64(valp,buf))				\
@@ -169,7 +166,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack32(valp,buf) do {			\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(uint32_t));      \
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpack32(valp,buf))				\
@@ -183,7 +179,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack16(valp,buf) do {			\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(uint16_t)); 	\
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpack16(valp,buf))				\
@@ -197,7 +192,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack8(valp,buf) do {			\
-	assert((valp) != NULL); 			\
 	assert(sizeof(*valp) == sizeof(uint8_t)); 	\
 	assert(buf->magic == BUF_MAGIC);		\
         if (unpack8(valp,buf))				\
@@ -211,7 +205,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack16_array(valp,size_valp,buf) do {    \
-        assert(valp != NULL);                           \
         assert(sizeof(*size_valp) == sizeof(uint32_t)); \
         assert(buf->magic == BUF_MAGIC);                \
         if (unpack16_array(valp,size_valp,buf))         \
@@ -219,7 +212,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpack32_array(valp,size_valp,buf) do {	\
-	assert(valp != NULL);				\
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpack32_array(valp,size_valp,buf))		\
@@ -234,7 +226,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackmem(valp,size_valp,buf) do {		\
-	assert(valp != NULL);		                \
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpackmem(valp,size_valp,buf))		\
@@ -242,7 +233,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackmem_ptr(valp,size_valp,buf) do {	\
-	assert(valp != NULL);				\
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpackmem_ptr(valp,size_valp,buf))		\
@@ -250,7 +240,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackmem_xmalloc(valp,size_valp,buf) do {	\
-	assert(valp != NULL);				\
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpackmem_xmalloc(valp,size_valp,buf))	\
@@ -258,7 +247,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackmem_malloc(valp,size_valp,buf) do {	\
-	assert(valp != NULL);				\
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpackmem_malloc(valp,size_valp,buf))	\
@@ -377,8 +365,6 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 } while (0)
 
 #define safe_unpackstr_array(valp,size_valp,buf) do {	\
-	assert(valp != NULL);				\
-	assert(size_valp != NULL);			\
 	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
 	assert(buf->magic == BUF_MAGIC);		\
 	if (unpackstr_array(valp,size_valp,buf))	\
diff --git a/src/common/parse_config.c b/src/common/parse_config.c
index 0b821605c..acf7e1424 100644
--- a/src/common/parse_config.c
+++ b/src/common/parse_config.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,13 @@
 /* #include "src/common/slurm_rlimits_info.h" */
 #include "src/common/parse_config.h"
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
+
+strong_alias(s_p_get_string,		slurm_s_p_get_string);
+strong_alias(s_p_get_uint32,		slurm_s_p_get_uint32);
+strong_alias(s_p_hashtbl_create,	slurm_s_p_hashtbl_create);
+strong_alias(s_p_hashtbl_destroy,	slurm_s_p_hashtbl_destroy);
+strong_alias(s_p_parse_file,		slurm_s_p_parse_file);
 
 #define BUFFER_SIZE 4096
 
@@ -101,7 +107,7 @@ static int _conf_hashtbl_index(const char *key)
 	for (i = 0; i < 10; i++) {
 		if (key[i] == '\0')
 			break;
-		idx += tolower(key[i]);
+		idx += tolower((int)key[i]);
 	}
 	return idx % CONF_HASH_LEN;
 }
@@ -163,6 +169,25 @@ s_p_hashtbl_t *s_p_hashtbl_create(s_p_options_t options[])
 	return hashtbl;
 }
 
+/* Swap the data in two data structures without changing the linked list
+ * pointers */
+static void _conf_hashtbl_swap_data(s_p_values_t *data_1,
+				    s_p_values_t *data_2)
+{
+	s_p_values_t *next_1, *next_2;
+	s_p_values_t tmp_values;
+
+	next_1 = data_1->next;
+	next_2 = data_2->next;
+
+	memcpy(&tmp_values, data_1, sizeof(s_p_values_t));
+	memcpy(data_1, data_2, sizeof(s_p_values_t));
+	memcpy(data_2, &tmp_values, sizeof(s_p_values_t));
+
+	data_1->next = next_1;
+	data_2->next = next_2;
+}
+
 static void _conf_file_values_free(s_p_values_t *p)
 {
 	int i;
@@ -268,7 +293,7 @@ static int _strip_continuation(char *buf, int len)
 	for (ptr = buf+len-1; ptr >= buf; ptr--) {
 		if (*ptr == '\\')
 			bs++;
-		else if (isspace(*ptr) && bs == 0)
+		else if (isspace((int)*ptr) && bs == 0)
 			continue;
 		else
 			break;
@@ -727,7 +752,7 @@ static int _line_is_space(const char *line)
 	}
 	len = strlen(line);
 	for (i = 0; i < len; i++) {
-		if (!isspace(line[i]))
+		if (!isspace((int)line[i]))
 			return 0;
 	}
 
@@ -767,9 +792,10 @@ int s_p_parse_line(s_p_hashtbl_t *hashtbl, const char *line, char **leftover)
 
 /*
  * Returns 1 if the line is parsed cleanly, and 0 otherwise.
+ * IN ingore_new - if set do not treat unrecongized input as a fatal error
  */
 static int _parse_next_key(s_p_hashtbl_t *hashtbl,
-			   const char *line, char **leftover)
+			   const char *line, char **leftover, bool ignore_new)
 {
 	char *key, *value;
 	s_p_values_t *p;
@@ -782,6 +808,9 @@ static int _parse_next_key(s_p_hashtbl_t *hashtbl,
 			_handle_keyvalue_match(p, value,
 					       new_leftover, &new_leftover);
 			*leftover = new_leftover;
+		} else if (ignore_new) {
+			debug("Parsing error at unrecognized key: %s", key);
+			*leftover = (char *)line;
 		} else {
 			error("Parsing error at unrecognized key: %s", key);
 			xfree(key);
@@ -805,7 +834,8 @@ static int _parse_next_key(s_p_hashtbl_t *hashtbl,
  * no include directive is found.
  */
 static int _parse_include_directive(s_p_hashtbl_t *hashtbl, uint32_t *hash_val,
-				    const char *line, char **leftover)
+				    const char *line, char **leftover,
+				    bool ignore_new)
 {
 	char *ptr;
 	char *fn_start, *fn_stop;
@@ -814,16 +844,16 @@ static int _parse_include_directive(s_p_hashtbl_t *hashtbl, uint32_t *hash_val,
 	*leftover = NULL;
 	if (strncasecmp("include", line, strlen("include")) == 0) {
 		ptr = (char *)line + strlen("include");
-		if (!isspace(*ptr))
+		if (!isspace((int)*ptr))
 			return 0;
-		while (isspace(*ptr))
+		while (isspace((int)*ptr))
 			ptr++;
 		fn_start = ptr;
-		while (!isspace(*ptr))
+		while (!isspace((int)*ptr))
 			ptr++;
 		fn_stop = *leftover = ptr;
 		filename = xstrndup(fn_start, fn_stop-fn_start);
-		if (s_p_parse_file(hashtbl, hash_val, filename)
+		if (s_p_parse_file(hashtbl, hash_val, filename, ignore_new)
 		    == SLURM_SUCCESS) {
 			xfree(filename);
 			return 1;
@@ -836,7 +866,8 @@ static int _parse_include_directive(s_p_hashtbl_t *hashtbl, uint32_t *hash_val,
 	}
 }
 
-int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename)
+int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename,
+		   bool ignore_new)
 {
 	FILE *f;
 	char line[BUFFER_SIZE];
@@ -878,9 +909,9 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename)
 		}
 
 		inc_rc = _parse_include_directive(hashtbl, hash_val,
-						  line, &leftover);
+						  line, &leftover, ignore_new);
 		if (inc_rc == 0) {
-			_parse_next_key(hashtbl, line, &leftover);
+			_parse_next_key(hashtbl, line, &leftover, ignore_new);
 		} else if (inc_rc < 0) {
 			error("\"Include\" failed in file %s line %d",
 			      filename, line_number);
@@ -893,10 +924,15 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename)
 		if (!_line_is_space(leftover)) {
 			char *ptr = xstrdup(leftover);
 			_strip_cr_nl(ptr);
-			error("Parse error in file %s line %d: \"%s\"",
-			      filename, line_number, ptr);
+			if (ignore_new) {
+				debug("Parse error in file %s line %d: \"%s\"",
+				      filename, line_number, ptr);
+			} else {
+				error("Parse error in file %s line %d: \"%s\"",
+				      filename, line_number, ptr);
+				rc = SLURM_ERROR;
+			}
 			xfree(ptr);
-			rc = SLURM_ERROR;
 		}
 		line_number += merged_lines;
 	}
@@ -905,6 +941,55 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename)
 	return rc;
 }
 
+/*
+ * s_p_hashtbl_merge
+ * 
+ * Merge the contents of two s_p_hashtbl_t data structures. Anything in
+ * from_hashtbl that does not also appear in to_hashtbl is transfered to it.
+ * This is intended primary to support multiple lines of DEFAULT configuration
+ * information and preserve the default values while adding new defaults.
+ *
+ * IN from_hashtbl - Source of old data
+ * IN to_hashtbl - Destination for old data
+ */
+void s_p_hashtbl_merge(s_p_hashtbl_t *to_hashtbl, s_p_hashtbl_t *from_hashtbl)
+{
+	int i;
+	s_p_values_t **val_pptr, *val_ptr, *match_ptr;
+
+	if (!to_hashtbl || !from_hashtbl)
+		return;
+
+	for (i = 0; i < CONF_HASH_LEN; i++) {
+		val_pptr = &from_hashtbl[i];
+		val_ptr = from_hashtbl[i];
+		while (val_ptr) {
+			if (val_ptr->data_count == 0) {
+				/* No data in from_hashtbl record to move.
+				 * Skip record */
+				val_pptr = &val_ptr->next;
+				val_ptr = val_ptr->next;
+				continue;
+			}
+			match_ptr = _conf_hashtbl_lookup(to_hashtbl,
+							 val_ptr->key);
+			if (match_ptr) {	/* Found matching key */
+				if (match_ptr->data_count == 0) {
+					_conf_hashtbl_swap_data(val_ptr,
+								match_ptr);
+				}
+				val_pptr = &val_ptr->next;
+				val_ptr = val_ptr->next;
+			} else {	/* No match, move record */
+				*val_pptr = val_ptr->next;
+				val_ptr->next = NULL;
+				_conf_hashtbl_insert(to_hashtbl, val_ptr);
+				val_ptr = *val_pptr;
+			}
+		}
+	}
+}
+
 /*
  * s_p_get_string
  *
diff --git a/src/common/parse_config.h b/src/common/parse_config.h
index 571b9bee1..d4d6d5a94 100644
--- a/src/common/parse_config.h
+++ b/src/common/parse_config.h
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,7 @@
  * looking for code specific to slurm.conf, look in
  * src/common/slurm_conf.[hc].
  *
- * In the parsed file, any amount of white-space is allowed between the the
+ * In the parsed file, any amount of white-space is allowed between the
  * key, equal-sign, and value.  The parser handles comments, line
  * continuations, and escaped characters automatically.  Double-quotes can
  * be used to surround an entire value if white-space is needed within
@@ -132,7 +132,7 @@
  * a "handler" function and a "destroy" function.  The prototypes for each
  * are available below in the typedef of s_p_options_t.
  *
- * The "handler" function is given the the "key" string, "value" string, and a
+ * The "handler" function is given the "key" string, "value" string, and a
  * pointer to the remainder of the "line" on which the key-value pair was found
  * (this is the line after the parser has removed comments and concatenated
  * continued lines).  The handler can transform the value any way it desires,
@@ -182,14 +182,30 @@ void s_p_hashtbl_destroy(s_p_hashtbl_t *hashtbl);
 /* Returns SLURM_SUCCESS if file was opened and parse correctly
  * OUT hash_val - cyclic redundancy check (CRC) character-wise value
  *                of file.
+ * IN ignore_new - do not treat unrecognized keywords as a fatal error,
+ *                 print debug() message and continue
  */
-int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename);
+int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename,
+		   bool ignore_new);
 
 /*
  * Returns 1 if the line is parsed cleanly, and 0 otherwise.
  */
 int s_p_parse_line(s_p_hashtbl_t *hashtbl, const char *line, char **leftover);
 
+/*
+ * s_p_hashtbl_merge
+ * 
+ * Merge the contents of two s_p_hashtbl_t data structures. Anything in
+ * from_hashtbl that does not also appear in to_hashtbl is transfered to it.
+ * This is intended primary to support multiple lines of DEFAULT configuration
+ * information and preserve the old default values while adding new defaults.
+ *
+ * IN from_hashtbl - Source of old data
+ * IN to_hashtbl - Destination for old data (if new value not already set)
+ */
+void s_p_hashtbl_merge(s_p_hashtbl_t *to_hashtbl, s_p_hashtbl_t *from_hashtbl);
+
 /*
  * s_p_get_string
  *
diff --git a/src/common/parse_spec.c b/src/common/parse_spec.c
index d07abc120..21a4254aa 100644
--- a/src/common/parse_spec.c
+++ b/src/common/parse_spec.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/parse_spec.h b/src/common/parse_spec.h
index 6db55110f..f8a4273f7 100644
--- a/src/common/parse_spec.h
+++ b/src/common/parse_spec.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/parse_time.c b/src/common/parse_time.c
index 24cddb06e..e134c049f 100644
--- a/src/common/parse_time.c
+++ b/src/common/parse_time.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,7 @@
 #endif
 #include <ctype.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 #include "src/common/macros.h"
 
 #define _RUN_STAND_ALONE 0
@@ -92,7 +92,7 @@ static int _get_delta(char *time_str, int *pos, long *delta)
 	for (offset = (*pos) + 1;
 	     ((time_str[offset] != '\0') && (time_str[offset] != '\n'));
 	     offset++) {
-		if (isspace(time_str[offset]))
+		if (isspace((int)time_str[offset]))
 			continue;
 		for (i=0; un[i].name; i++) {
 			if (!strncasecmp((time_str + offset),
@@ -180,7 +180,7 @@ _get_time(char *time_str, int *pos, int *hour, int *minute, int * second)
 	} else
 		sec = 0;
 
-	while (isspace(time_str[offset])) {
+	while (isspace((int)time_str[offset])) {
 		offset++;
 	}
 	if (strncasecmp(time_str+offset, "pm", 2)== 0) {
@@ -357,7 +357,7 @@ extern time_t parse_time(char *time_str, int past)
 
 	for (pos=0; ((time_str[pos] != '\0') && (time_str[pos] != '\n'));
 	     pos++) {
-		if (isblank(time_str[pos]) ||
+		if (isblank((int)time_str[pos]) ||
 		    (time_str[pos] == '-') || (time_str[pos] == 'T'))
 			continue;
 		if (strncasecmp(time_str+pos, "today", 5) == 0) {
@@ -409,7 +409,7 @@ extern time_t parse_time(char *time_str, int past)
 						goto prob;
 					break;
 				}
-				if (isblank(time_str[i]))
+				if (isblank((int)time_str[i]))
 					continue;
 				if ((time_str[i] == '\0')
 				    || (time_str[i] == '\n')) {
@@ -534,8 +534,50 @@ int main(int argc, char *argv[])
 #endif
 
 /*
- * slurm_make_time_str - convert time_t to string with a format of
- *	"month/date hour:min:sec" for use in user command output
+ * Smart date for @epoch, relative to current date.
+ * Maximum output length: 12 characters + '\0'
+ *      19 Jan 2003	(distant past or future)
+ *     Ystday 20:13
+ *         12:26:38	(today)
+ *     Tomorr 03:22
+ *        Sat 02:17	(next Saturday)
+ *     18 Jun 13:14	(non-close past or future)
+ *     012345678901
+ * Uses base-10 YYYYddd numbers to compute date distances.
+ */
+static char *_relative_date_fmt(const struct tm *when)
+{
+	static int todays_date;
+	int distance = 1000 * (when->tm_year + 1900) + when->tm_yday;
+
+	if (!todays_date) {
+		time_t now = time(NULL);
+		struct tm tm;
+
+		localtime_r(&now, &tm);
+		todays_date = 1000 * (tm.tm_year + 1900) + tm.tm_yday;
+	}
+
+	distance -= todays_date;
+	if (distance == -1)			/* yesterday */
+		return "Ystday %H:%M";
+	if (distance == 0)			/* same day */
+		return "%H:%M:%S";
+	if (distance == 1)			/* tomorrow */
+		return "Tomorr %H:%M";
+	if (distance < -365 || distance > 365)	/* far distance */
+		return "%-d %b %Y";
+	if (distance < -1 || distance > 6)	/* medium distance */
+		return "%-d %b %H:%M";
+	return "%a %H:%M";			/* near distance */
+}
+
+/*
+ * slurm_make_time_str - convert time_t to formatted string for user output
+ *
+ * The format depends on the environment variable SLURM_TIME_FORMAT, which may
+ * be set to 'standard' (fallback, same as if not set), 'relative' (format is
+ * relative to today's date and optimized for space), or a strftime(3) string.
  *
  * IN time - a time stamp
  * OUT string - pointer user defined buffer
@@ -551,23 +593,39 @@ slurm_make_time_str (time_t *time, char *string, int size)
 	if ((*time == (time_t) 0) || (*time == (time_t) INFINITE)) {
 		snprintf(string, size, "Unknown");
 	} else {
-#ifdef USE_ISO_8601
-		/* Format YYYY-MM-DDTHH:MM:SS, ISO8601 standard format,
-		 * NOTE: This is expected to break Maui, Moab and LSF
-		 * schedulers management of SLURM. */
-		snprintf(string, size,
-			"%4.4u-%2.2u-%2.2uT%2.2u:%2.2u:%2.2u",
-			(time_tm.tm_year + 1900), (time_tm.tm_mon+1),
-			time_tm.tm_mday, time_tm.tm_hour, time_tm.tm_min,
-			time_tm.tm_sec);
+		static char fmt_buf[32];
+		static const char *display_fmt;
+		static bool use_relative_format;
+
+		if (!display_fmt) {
+			char *fmt = getenv("SLURM_TIME_FORMAT");
+
+#if defined USE_ISO_8601/*
+			 * ISO-8601 Standard Format YYYY-MM-DDTHH:MM:SS
+			 * NOTE: This is expected to break Maui, Moab
+			 *       and LSF schedulers management of SLURM.
+			 */
+			display_fmt = "%FT%T";
 #else
-		/* Format MM/DD-HH:MM:SS */
-		snprintf(string, size,
-			"%2.2u/%2.2u-%2.2u:%2.2u:%2.2u",
-			(time_tm.tm_mon+1), time_tm.tm_mday,
-			time_tm.tm_hour, time_tm.tm_min, time_tm.tm_sec);
-
+			/* Format MM/DD-HH:MM:SS */
+			display_fmt = "%m/%d-%T";
 #endif
+			if ((!fmt) || (!*fmt) || (!strcmp(fmt, "standard"))) {
+				;
+			} else if (strcmp(fmt, "relative") == 0) {
+				use_relative_format = true;
+			} else if ((strchr(fmt, '%')  == NULL) ||
+				   (strlen(fmt) >= sizeof(fmt_buf))) {
+				error("invalid SLURM_TIME_FORMAT = '%s'", fmt);
+			} else {
+				strncpy(fmt_buf, fmt, sizeof(fmt_buf));
+				display_fmt = fmt_buf;
+			}
+		}
+		if (use_relative_format)
+			display_fmt = _relative_date_fmt(&time_tm);
+
+		strftime(string, size, display_fmt, &time_tm);
 	}
 }
 
diff --git a/src/common/parse_time.h b/src/common/parse_time.h
index cd31d81e1..5983ebce4 100644
--- a/src/common/parse_time.h
+++ b/src/common/parse_time.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/plugin.c b/src/common/plugin.c
index c71469a7b..bb88fa6dc 100644
--- a/src/common/plugin.c
+++ b/src/common/plugin.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,7 @@
 #include "src/common/plugin.h"
 #include "src/common/xstring.h"
 #include "src/common/slurm_protocol_api.h"
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #  if HAVE_UNISTD_H
 #    include <unistd.h>
@@ -217,9 +217,11 @@ plugin_load_and_link(const char *type_name, int n_syms,
 
 	if (!type_name)
 		return plug;
-
+#if defined(__CYGWIN__)
+	so_name = xstrdup_printf("%s.dll", type_name);
+#else
 	so_name = xstrdup_printf("%s.so", type_name);
-
+#endif
 	while(so_name[i]) {
 		if(so_name[i] == '/')
 			so_name[i] = '_';
diff --git a/src/common/plugin.h b/src/common/plugin.h
index 06f6d44f3..85c161548 100644
--- a/src/common/plugin.h
+++ b/src/common/plugin.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,7 +55,7 @@
 #  include <inttypes.h>
 #endif /* HAVE_CONFIG_H */
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 /*
  * These symbols are required to be defined in any plugin managed by
diff --git a/src/common/plugrack.c b/src/common/plugrack.c
index 1cc2fd09f..f5676c4b1 100644
--- a/src/common/plugrack.c
+++ b/src/common/plugrack.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/plugrack.h b/src/common/plugrack.h
index b0ed3f78a..d6c29e1dc 100644
--- a/src/common/plugrack.h
+++ b/src/common/plugrack.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 98e66f232..e95898979 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -62,7 +62,7 @@
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 /*#include "src/srun/srun_job.h"*/
 
-#include <slurm/spank.h>
+#include "slurm/spank.h"
 
 #define REQUIRED "required"
 #define OPTIONAL "optional"
@@ -1036,7 +1036,7 @@ _find_word_boundary(char *str, char *from, char **next)
 	 * Back up past any non-whitespace if we are pointing in
 	 *  the middle of a word.
 	 */
-	while ((p != str) && !isspace (*p))
+	while ((p != str) && !isspace ((int)*p))
 		--p;
 
 	/*
@@ -1047,7 +1047,7 @@ _find_word_boundary(char *str, char *from, char **next)
 	/*
 	 * Now move back to the end of the previous word
 	 */
-	while ((p != str) && isspace (*p))
+	while ((p != str) && isspace ((int)*p))
 		--p;
 
 	if (p == str) {
@@ -1182,7 +1182,7 @@ int spank_print_options(FILE * fp, int left_pad, int width)
 
 static char _canonical_char (char c)
 {
-	if (!isalnum (c))
+	if (!isalnum ((int)c))
 		return '_';
 	else
 		return c;
diff --git a/src/common/plugstack.h b/src/common/plugstack.h
index 5d1d5977a..550621394 100644
--- a/src/common/plugstack.h
+++ b/src/common/plugstack.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/print_fields.c b/src/common/print_fields.c
index 97014a486..91e1bf5ff 100644
--- a/src/common/print_fields.c
+++ b/src/common/print_fields.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/print_fields.h b/src/common/print_fields.h
index a6fea8e39..92153e05c 100644
--- a/src/common/print_fields.h
+++ b/src/common/print_fields.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -62,7 +62,7 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index e48d43051..b3f999fe7 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -7,7 +7,7 @@
  *  from existing SLURM source code, particularly src/srun/opt.c
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -141,7 +141,7 @@ task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size)
 		} else if (strncasecmp(arg, "block", len) == 0) {
 			result = SLURM_DIST_BLOCK;
 		} else if ((strncasecmp(arg, "arbitrary", len) == 0) ||
-		           (strncasecmp(arg, "hostfile", len) == 0)) {
+			   (strncasecmp(arg, "hostfile", len) == 0)) {
 			result = SLURM_DIST_ARBITRARY;
 		}
 	}
@@ -149,25 +149,10 @@ task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size)
 	return result;
 }
 
-/*
- * verify that a connection type in arg is of known form
- * returns the connection_type or -1 if not recognized
- */
-uint16_t verify_conn_type(const char *arg)
+static uint16_t _get_conn_type(char *arg, bool bgp)
 {
 	uint16_t len = strlen(arg);
-	bool no_bgl = 1;
-
-	if(working_cluster_rec) {
-		if(working_cluster_rec->flags & CLUSTER_FLAG_BGL)
-			no_bgl = 0;
-	} else {
-#ifdef HAVE_BGL
-		no_bgl = 0;
-#endif
-	}
-
-	if(!len) {
+	if (!len) {
 		/* no input given */
 		error("no conn-type argument given.");
 		return (uint16_t)NO_VAL;
@@ -177,9 +162,11 @@ uint16_t verify_conn_type(const char *arg)
 		return SELECT_TORUS;
 	else if (!strncasecmp(arg, "NAV", len))
 		return SELECT_NAV;
-	else if (no_bgl) {
-		if(!strncasecmp(arg, "HTC", len)
-		   || !strncasecmp(arg, "HTC_S", len))
+	else if (!strncasecmp(arg, "SMALL", len))
+		return SELECT_SMALL;
+	else if (bgp) {
+		if (!strncasecmp(arg, "HTC", len) ||
+		    !strncasecmp(arg, "HTC_S", len))
 			return SELECT_HTC_S;
 		else if (!strncasecmp(arg, "HTC_D", len))
 			return SELECT_HTC_D;
@@ -188,10 +175,56 @@ uint16_t verify_conn_type(const char *arg)
 		else if (!strncasecmp(arg, "HTC_L", len))
 			return SELECT_HTC_L;
 	}
+
 	error("invalid conn-type argument '%s' ignored.", arg);
 	return (uint16_t)NO_VAL;
 }
 
+/*
+ * verify comma separated list of connection types to array of uint16_t
+ * connection_types or NO_VAL if not recognized
+ */
+extern void verify_conn_type(const char *arg, uint16_t *conn_type)
+{
+	bool got_bgp = 0;
+	int inx = 0;
+	int highest_dims = 1;
+	char *arg_tmp = xstrdup(arg), *tok, *save_ptr = NULL;
+
+	if (working_cluster_rec) {
+		if (working_cluster_rec->flags & CLUSTER_FLAG_BGP)
+			got_bgp = 1;
+		else if (working_cluster_rec->flags & CLUSTER_FLAG_BGQ)
+			highest_dims = 4;
+	} else {
+#ifdef HAVE_BGP
+		got_bgp = 1;
+# elif defined HAVE_BGQ
+		highest_dims = 4;
+#endif
+	}
+
+	tok = strtok_r(arg_tmp, ",", &save_ptr);
+	while (tok) {
+		if (inx >= highest_dims) {
+			error("too many conn-type arguments: %s", arg);
+			break;
+		}
+		conn_type[inx++] = _get_conn_type(tok, got_bgp);
+		tok = strtok_r(NULL, ",", &save_ptr);
+	}
+	if (inx == 0)
+		error("invalid conn-type argument '%s' ignored.", arg);
+	/* Fill the rest in with NO_VALS (use HIGHEST_DIMS here
+	 * instead of highest_dims since that is the size of the
+	 * array. */
+	for ( ; inx < HIGHEST_DIMENSIONS; inx++) {
+		conn_type[inx] = (uint16_t)NO_VAL;
+	}
+
+	xfree(arg_tmp);
+}
+
 /*
  * verify geometry arguments, must have proper count
  * returns -1 on error, 0 otherwise
@@ -297,6 +330,10 @@ _str_to_nodes(const char *num_str, char **leftover)
 		num *= 1024;
 		endptr++;
 	}
+	if (*endptr != '\0' && (*endptr == 'm' || *endptr == 'M')) {
+		num *= (1024 * 1024);
+		endptr++;
+	}
 	*leftover = endptr;
 
 	return (int)num;
@@ -378,7 +415,7 @@ bool verify_node_list(char **node_list_pptr, enum task_dist_states dist,
 	   saying, lay it out this way! */
 	if(dist == SLURM_DIST_ARBITRARY)
 		nodelist = slurm_read_hostfile(*node_list_pptr, task_count);
-        else
+	else
 		nodelist = slurm_read_hostfile(*node_list_pptr, NO_VAL);
 
 	if (!nodelist)
@@ -417,7 +454,7 @@ bool get_resource_arg_range(const char *arg, const char *what, int* min,
 	}
 
 	result = strtol(arg, &p, 10);
-        if (*p == 'k' || *p == 'K') {
+	if (*p == 'k' || *p == 'K') {
 		result *= 1024;
 		p++;
 	} else if(*p == 'm' || *p == 'M') {
@@ -445,7 +482,7 @@ bool get_resource_arg_range(const char *arg, const char *what, int* min,
 		p++;
 
 	result = strtol(p, &p, 10);
-        if ((*p == 'k') || (*p == 'K')) {
+	if ((*p == 'k') || (*p == 'K')) {
 		result *= 1024;
 		p++;
 	} else if(*p == 'm' || *p == 'M') {
@@ -514,7 +551,7 @@ bool verify_socket_core_thread_count(const char *arg, int *min_sockets,
 		} else if (j == 2) {
 			*cpu_bind_type |= CPU_BIND_TO_THREADS;
 		}
-        }
+	}
 	buf[j][i] = '\0';
 
 	ret_val = true;
@@ -544,7 +581,7 @@ bool verify_socket_core_thread_count(const char *arg, int *min_sockets,
  * RET true if valid
  */
 bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
-		 int *min_threads, int *ntasks_per_core, 
+		 int *min_threads, int *ntasks_per_core,
 		 cpu_bind_type_t *cpu_bind_type)
 {
 	char *buf, *p, *tok;
@@ -557,7 +594,7 @@ bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
 	/* change all ',' delimiters not followed by a digit to ';'  */
 	/* simplifies parsing tokens while keeping map/mask together */
 	while (p[0] != '\0') {
-		if ((p[0] == ',') && (!isdigit(p[1])))
+		if ((p[0] == ',') && (!isdigit((int)p[1])))
 			p[0] = ';';
 		p++;
 	}
@@ -574,21 +611,21 @@ bool verify_hint(const char *arg, int *min_sockets, int *min_cores,
 "        help            show this help message\n");
 			return 1;
 		} else if (strcasecmp(tok, "compute_bound") == 0) {
-		        *min_sockets = NO_VAL;
-		        *min_cores   = NO_VAL;
-		        *min_threads = 1;
+			*min_sockets = NO_VAL;
+			*min_cores   = NO_VAL;
+			*min_threads = 1;
 			*cpu_bind_type |= CPU_BIND_TO_CORES;
 		} else if (strcasecmp(tok, "memory_bound") == 0) {
-		        *min_cores   = 1;
-		        *min_threads = 1;
+			*min_cores   = 1;
+			*min_threads = 1;
 			*cpu_bind_type |= CPU_BIND_TO_CORES;
 		} else if (strcasecmp(tok, "multithread") == 0) {
-		        *min_threads = NO_VAL;
+			*min_threads = NO_VAL;
 			*cpu_bind_type |= CPU_BIND_TO_THREADS;
 			if (*ntasks_per_core == NO_VAL)
 				*ntasks_per_core = INFINITE;
 		} else if (strcasecmp(tok, "nomultithread") == 0) {
-		        *min_threads = 1;
+			*min_threads = 1;
 			*cpu_bind_type |= CPU_BIND_TO_THREADS;
 		} else {
 			error("unrecognized --hint argument \"%s\", "
@@ -615,7 +652,7 @@ uint16_t parse_mail_type(const char *arg)
 	else if (strcasecmp(arg, "REQUEUE") == 0)
 		rc = MAIL_JOB_REQUEUE;
 	else if (strcasecmp(arg, "ALL") == 0)
-		rc = MAIL_JOB_BEGIN |  MAIL_JOB_END |  MAIL_JOB_FAIL | 
+		rc = MAIL_JOB_BEGIN |  MAIL_JOB_END |  MAIL_JOB_FAIL |
 		     MAIL_JOB_REQUEUE;
 	else
 		rc = 0;		/* failure */
@@ -690,7 +727,7 @@ search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode)
 	char *path, *fullpath = NULL;
 
 	if (  (cmd[0] == '.' || cmd[0] == '/')
-           && (access(cmd, access_mode) == 0 ) ) {
+	   && (access(cmd, access_mode) == 0 ) ) {
 		if (cmd[0] == '.')
 			xstrfmtcat(fullpath, "%s/", cwd);
 		xstrcat(fullpath, cmd);
@@ -813,7 +850,7 @@ int sig_name2num(char *signal_name)
 			return 0;
 	} else {
 		ptr = (char *)signal_name;
-		while (isspace(*ptr))
+		while (isspace((int)*ptr))
 			ptr++;
 		if (strncasecmp(ptr, "SIG", 3) == 0)
 			ptr += 3;
@@ -823,7 +860,7 @@ int sig_name2num(char *signal_name)
 			if (strncasecmp(ptr, sig_name[i],
 					strlen(sig_name[i])) == 0) {
 				/* found the signal name */
-				if (!xstring_is_whitespace(ptr + 
+				if (!xstring_is_whitespace(ptr +
 							   strlen(sig_name[i])))
 					return 0;
 				sig = sig_num[i];
diff --git a/src/common/proc_args.h b/src/common/proc_args.h
index 392a33b8a..c23195b0d 100644
--- a/src/common/proc_args.h
+++ b/src/common/proc_args.h
@@ -7,7 +7,7 @@
  *  from existing SLURM source code, particularly src/srun/opt.c
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -71,8 +71,11 @@ void print_gres_help(void);
 /* verify the requested distribution type */
 task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size);
 
-/* verify the requested connection type */
-uint16_t verify_conn_type(const char *arg);
+/*
+ * verify comma separated list of connection types to array of uint16_t 
+ * connection_types or NO_VAL if not recognized
+ */
+extern void verify_conn_type(const char *arg, uint16_t *conn_type);
 
 /* verify the requested geometry arguments */
 int verify_geometry(const char *arg, uint16_t *geometry);
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 1bafc7fb1..d78e8a5eb 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,24 +58,26 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
-#include "src/common/slurm_protocol_defs.h"
-#include "src/common/slurm_protocol_api.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/node_conf.h"
+#include "src/common/parse_config.h"
 #include "src/common/parse_spec.h"
+#include "src/common/parse_time.h"
 #include "src/common/read_config.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_rlimits_info.h"
-#include "src/common/parse_config.h"
-#include "src/common/parse_time.h"
 #include "src/common/slurm_selecttype_info.h"
-#include "src/common/util-net.h"
-#include "src/common/uid.h"
 #include "src/common/strlcpy.h"
+#include "src/common/uid.h"
+#include "src/common/util-net.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
 
 /*
 ** Define slurm-specific aliases for use by plugins, see slurm_xlator.h
@@ -93,6 +95,7 @@ static s_p_hashtbl_t *conf_hashtbl = NULL;
 static slurm_ctl_conf_t *conf_ptr = &slurmctld_conf;
 static bool conf_initialized = false;
 
+static s_p_hashtbl_t *default_frontend_tbl;
 static s_p_hashtbl_t *default_nodename_tbl;
 static s_p_hashtbl_t *default_partition_tbl;
 
@@ -114,14 +117,17 @@ typedef struct names_ll_s {
 	struct names_ll_s *next_alias;
 	struct names_ll_s *next_hostname;
 } names_ll_t;
-bool nodehash_initialized = false;
+static bool nodehash_initialized = false;
 static names_ll_t *host_to_node_hashtbl[NAME_HASH_LEN] = {NULL};
 static names_ll_t *node_to_host_hashtbl[NAME_HASH_LEN] = {NULL};
 
+static void _destroy_nodename(void *ptr);
+static int _parse_frontend(void **dest, slurm_parser_enum_t type,
+			   const char *key, const char *value,
+			   const char *line, char **leftover);
 static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			   const char *key, const char *value,
 			   const char *line, char **leftover);
-static void _destroy_nodename(void *ptr);
 static bool _is_valid_path(char *path, char *msg);
 static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 				const char *key, const char *value,
@@ -146,6 +152,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"AccountingStoragePort", S_P_UINT32},
 	{"AccountingStorageType", S_P_STRING},
 	{"AccountingStorageUser", S_P_STRING},
+	{"AccountingStoreJobComment", S_P_BOOLEAN},
 	{"AuthType", S_P_STRING},
 	{"BackupAddr", S_P_STRING},
 	{"BackupController", S_P_STRING},
@@ -199,8 +206,10 @@ s_p_options_t slurm_conf_options[] = {
 	{"Licenses", S_P_STRING},
 	{"MailProg", S_P_STRING},
 	{"MaxJobCount", S_P_UINT32},
+	{"MaxJobId", S_P_UINT32},
 	{"MaxMemPerCPU", S_P_UINT32},
 	{"MaxMemPerNode", S_P_UINT32},
+	{"MaxStepCount", S_P_UINT32},
 	{"MaxTasksPerNode", S_P_UINT16},
 	{"MessageTimeout", S_P_UINT16},
 	{"MinJobAge", S_P_UINT16},
@@ -282,6 +291,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"VSizeFactor", S_P_UINT16},
 	{"WaitTime", S_P_UINT16},
 
+	{"FrontendName", S_P_ARRAY, _parse_frontend, destroy_frontend},
 	{"NodeName", S_P_ARRAY, _parse_nodename, _destroy_nodename},
 	{"PartitionName", S_P_ARRAY, _parse_partitionname,
 	 _destroy_partitionname},
@@ -393,6 +403,86 @@ static void _set_node_prefix(const char *nodenames)
 }
 #endif /* SYSTEM_DIMENSIONS > 1 */
 
+static int _parse_frontend(void **dest, slurm_parser_enum_t type,
+			   const char *key, const char *value,
+			   const char *line, char **leftover)
+{
+	s_p_hashtbl_t *tbl, *dflt;
+	slurm_conf_frontend_t *n;
+	char *node_state = NULL;
+	static s_p_options_t _frontend_options[] = {
+		{"FrontendAddr", S_P_STRING},
+		{"Port", S_P_UINT16},
+		{"Reason", S_P_STRING},
+		{"State", S_P_STRING},
+		{NULL}
+	};
+
+#ifndef HAVE_FRONT_END
+	fatal("Use of FrontendName in slurm.conf without SLURM being "
+	      "configured/built with the --enable-front-end option");
+#endif
+
+	tbl = s_p_hashtbl_create(_frontend_options);
+	s_p_parse_line(tbl, *leftover, leftover);
+	/* s_p_dump_values(tbl, _frontend_options); */
+
+	if (strcasecmp(value, "DEFAULT") == 0) {
+		char *tmp;
+		if (s_p_get_string(&tmp, "FrontendAddr", tbl)) {
+			error("FrontendAddr not allowed with "
+			      "FrontendName=DEFAULT");
+			xfree(tmp);
+			s_p_hashtbl_destroy(tbl);
+			return -1;
+		}
+
+		if (default_frontend_tbl != NULL) {
+			s_p_hashtbl_merge(tbl, default_frontend_tbl);
+			s_p_hashtbl_destroy(default_frontend_tbl);
+		}
+		default_frontend_tbl = tbl;
+
+		return 0;
+	} else {
+		n = xmalloc(sizeof(slurm_conf_frontend_t));
+		dflt = default_frontend_tbl;
+
+		n->frontends = xstrdup(value);
+
+		if (!s_p_get_string(&n->addresses, "FrontendAddr", tbl))
+			n->addresses = xstrdup(n->frontends);
+
+		if (!s_p_get_uint16(&n->port, "Port", tbl) &&
+		    !s_p_get_uint16(&n->port, "Port", dflt)) {
+			/* This gets resolved in slurm_conf_get_port()
+			 * and slurm_conf_get_addr(). For now just
+			 * leave with a value of zero */
+			n->port = 0;
+		}
+
+		if (!s_p_get_string(&n->reason, "Reason", tbl))
+			s_p_get_string(&n->reason, "Reason", dflt);
+
+		if (!s_p_get_string(&node_state, "State", tbl) &&
+		    !s_p_get_string(&node_state, "State", dflt))
+			n->node_state = NODE_STATE_UNKNOWN;
+		else {
+			n->node_state = state_str2int(node_state,
+						      (char *) value);
+			if (n->node_state == (uint16_t) NO_VAL)
+				n->node_state = NODE_STATE_UNKNOWN;
+			xfree(node_state);
+		}
+
+		*dest = (void *)n;
+
+		s_p_hashtbl_destroy(tbl);
+		return 1;
+	}
+
+	/* should not get here */
+}
 
 static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			   const char *key, const char *value,
@@ -403,6 +493,7 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 	int computed_procs;
 	static s_p_options_t _nodename_options[] = {
 		{"CoresPerSocket", S_P_UINT16},
+		{"CPUs", S_P_UINT16},
 		{"Feature", S_P_STRING},
 		{"Gres", S_P_STRING},
 		{"NodeAddr", S_P_STRING},
@@ -439,8 +530,10 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			return -1;
 		}
 
-		if (default_nodename_tbl != NULL)
+		if (default_nodename_tbl != NULL) {
+			s_p_hashtbl_merge(tbl, default_nodename_tbl);
 			s_p_hashtbl_destroy(default_nodename_tbl);
+		}
 		default_nodename_tbl = tbl;
 
 		return 0;
@@ -484,8 +577,10 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			n->port = 0;
 		}
 
-		if (!s_p_get_uint16(&n->cpus, "Procs", tbl)
-		    && !s_p_get_uint16(&n->cpus, "Procs", dflt)) {
+		if (!s_p_get_uint16(&n->cpus, "CPUs",  tbl)  &&
+		    !s_p_get_uint16(&n->cpus, "CPUs",  dflt) &&
+		    !s_p_get_uint16(&n->cpus, "Procs", tbl)  &&
+		    !s_p_get_uint16(&n->cpus, "Procs", dflt)) {
 			n->cpus = 1;
 			no_cpus = true;
 		}
@@ -545,18 +640,18 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			n->sockets = 1;
 		}
 
-		if (no_cpus) {		/* infer missing Procs= */
+		if (no_cpus) {		/* infer missing CPUs= */
 			n->cpus = n->sockets * n->cores * n->threads;
 		}
 
-		/* if only Procs= and Sockets= specified check for match */
+		/* if only CPUs= and Sockets= specified check for match */
 		if (!no_cpus    &&
 		    !no_sockets &&
 		    no_cores    &&
 		    no_threads) {
 			if (n->cpus != n->sockets) {
 				n->sockets = n->cpus;
-				error("NodeNames=%s Procs doesn't match "
+				error("NodeNames=%s CPUs doesn't match "
 				      "Sockets, setting Sockets to %d",
 				      n->nodenames, n->sockets);
 			}
@@ -566,9 +661,9 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 		if ((n->cpus != n->sockets) &&
 		    (n->cpus != n->sockets * n->cores) &&
 		    (n->cpus != computed_procs)) {
-			error("NodeNames=%s Procs=%d doesn't match "
+			error("NodeNames=%s CPUs=%d doesn't match "
 			      "Sockets*CoresPerSocket*ThreadsPerCore (%d), "
-			      "resetting Procs",
+			      "resetting CPUs",
 			      n->nodenames, n->cpus, computed_procs);
 			n->cpus = computed_procs;
 		}
@@ -581,6 +676,35 @@ static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 	/* should not get here */
 }
 
+/* Destroy a front_end record built by slurm_conf_frontend_array() */
+extern void destroy_frontend(void *ptr)
+{
+	slurm_conf_frontend_t *n = (slurm_conf_frontend_t *) ptr;
+	xfree(n->frontends);
+	xfree(n->addresses);
+	xfree(n->reason);
+	xfree(ptr);
+}
+
+/*
+ * list_find_frontend - find an entry in the front_end list, see list.h for
+ *	documentation
+ * IN key - is feature name or NULL for all features
+ * RET 1 if found, 0 otherwise
+ */
+extern int list_find_frontend (void *front_end_entry, void *key)
+{
+	slurm_conf_frontend_t *front_end_ptr;
+
+	if (key == NULL)
+		return 1;
+
+	front_end_ptr = (slurm_conf_frontend_t *) front_end_entry;
+	if (strcmp(front_end_ptr->frontends, (char *) key) == 0)
+		return 1;
+	return 0;
+}
+
 static void _destroy_nodename(void *ptr)
 {
 	slurm_conf_node_t *n = (slurm_conf_node_t *)ptr;
@@ -594,6 +718,55 @@ static void _destroy_nodename(void *ptr)
 	xfree(ptr);
 }
 
+int slurm_conf_frontend_array(slurm_conf_frontend_t **ptr_array[])
+{
+	int count;
+	slurm_conf_frontend_t **ptr;
+
+	if (s_p_get_array((void ***)&ptr, &count, "FrontendName",
+			  conf_hashtbl)) {
+		*ptr_array = ptr;
+		return count;
+	} else {
+#ifdef HAVE_FRONT_END
+		/* No FrontendName in slurm.conf. Take the NodeAddr and
+		 * NodeHostName from the first node's record and use that to
+		 * build an equivalent structure to that constructed when
+		 * FrontendName is configured. This is intended for backward
+		 * compatability with SLURM version 2.2. */
+		static slurm_conf_frontend_t local_front_end;
+		static slurm_conf_frontend_t *local_front_end_array[2] =
+			{NULL, NULL};
+		static char addresses[1024], hostnames[1024];
+
+		if (local_front_end_array[0] == NULL) {
+			slurm_conf_node_t **node_ptr;
+			int node_count = 0;
+			if (!s_p_get_array((void ***)&node_ptr, &node_count,
+					   "NodeName", conf_hashtbl) ||
+			    (node_count == 0))
+				fatal("No front end nodes configured");
+			strncpy(addresses, node_ptr[0]->addresses,
+				sizeof(addresses));
+			strncpy(hostnames, node_ptr[0]->hostnames,
+				sizeof(hostnames));
+			local_front_end.addresses = addresses;
+			local_front_end.frontends = hostnames;
+			local_front_end.port = node_ptr[0]->port;
+			local_front_end.reason = NULL;
+			local_front_end.node_state = NODE_STATE_UNKNOWN;
+			local_front_end_array[0] = &local_front_end;
+		}
+		*ptr_array = local_front_end_array;
+		return 1;
+#else
+		*ptr_array = NULL;
+		return 0;
+#endif
+	}
+}
+
+
 int slurm_conf_nodename_array(slurm_conf_node_t **ptr_array[])
 {
 	int count;
@@ -608,7 +781,6 @@ int slurm_conf_nodename_array(slurm_conf_node_t **ptr_array[])
 	}
 }
 
-
 static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 			       const char *key, const char *value,
 			       const char *line, char **leftover)
@@ -617,12 +789,18 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 	slurm_conf_partition_t *p;
 	char *tmp = NULL;
 	static s_p_options_t _partition_options[] = {
+		{"AllocNodes", S_P_STRING},
 		{"AllowGroups", S_P_STRING},
 		{"Alternate", S_P_STRING},
+		{"DefMemPerCPU", S_P_UINT32},
+		{"DefMemPerNode", S_P_UINT32},
 		{"Default", S_P_BOOLEAN}, /* YES or NO */
 		{"DefaultTime", S_P_STRING},
 		{"DisableRootJobs", S_P_BOOLEAN}, /* YES or NO */
+		{"GraceTime", S_P_UINT32},
 		{"Hidden", S_P_BOOLEAN}, /* YES or NO */
+		{"MaxMemPerCPU", S_P_UINT32},
+		{"MaxMemPerNode", S_P_UINT32},
 		{"MaxTime", S_P_STRING},
 		{"MaxNodes", S_P_UINT32}, /* INFINITE or a number */
 		{"MinNodes", S_P_UINT32},
@@ -632,7 +810,6 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"RootOnly", S_P_BOOLEAN}, /* YES or NO */
 		{"Shared", S_P_STRING}, /* YES, NO, or FORCE */
 		{"State", S_P_STRING}, /* UP, DOWN, INACTIVE or DRAIN */
-		{"AllocNodes", S_P_STRING},
 		{NULL}
 	};
 
@@ -642,8 +819,10 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 	/* s_p_dump_values(tbl, _partition_options); */
 
 	if (strcasecmp(value, "DEFAULT") == 0) {
-		if (default_partition_tbl != NULL)
+		if (default_partition_tbl != NULL) {
+			s_p_hashtbl_merge(tbl, default_partition_tbl);
 			s_p_hashtbl_destroy(default_partition_tbl);
+		}
 		default_partition_tbl = tbl;
 
 		return 0;
@@ -677,6 +856,34 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		    && !s_p_get_boolean(&p->default_flag, "Default", dflt))
 			p->default_flag = false;
 
+		if (!s_p_get_uint32(&p->def_mem_per_cpu, "DefMemPerNode",
+				    tbl) &&
+		    !s_p_get_uint32(&p->def_mem_per_cpu, "DefMemPerNode", 
+				    dflt)) {
+			if (s_p_get_uint32(&p->def_mem_per_cpu,
+					   "DefMemPerCPU", tbl) ||
+			    s_p_get_uint32(&p->def_mem_per_cpu,
+					   "DefMemPerCPU", dflt)) {
+				p->def_mem_per_cpu |= MEM_PER_CPU;
+			} {
+				p->def_mem_per_cpu = 0;
+			}
+		}
+
+		if (!s_p_get_uint32(&p->max_mem_per_cpu, "MaxMemPerNode",
+				    tbl) &&
+		    !s_p_get_uint32(&p->max_mem_per_cpu, "MaxMemPerNode", 
+				    dflt)) {
+			if (s_p_get_uint32(&p->max_mem_per_cpu,
+					   "MaxMemPerCPU", tbl) ||
+			    s_p_get_uint32(&p->max_mem_per_cpu,
+					   "MaxMemPerCPU", dflt)) {
+				p->max_mem_per_cpu |= MEM_PER_CPU;
+			} else {
+				p->max_mem_per_cpu = 0;
+			}
+		}
+
 		if (!s_p_get_boolean((bool *)&p->disable_root_jobs,
 				     "DisableRootJobs", tbl))
 			p->disable_root_jobs = (uint16_t)NO_VAL;
@@ -701,6 +908,10 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 			xfree(tmp);
 		}
 
+		if (!s_p_get_uint32(&p->grace_time, "GraceTime", tbl) &&
+		    !s_p_get_uint32(&p->grace_time, "GraceTime", dflt))
+			p->grace_time = 0;
+
 		if (!s_p_get_string(&tmp, "DefaultTime", tbl) &&
 		    !s_p_get_string(&tmp, "DefaultTime", dflt))
 			p->default_time = NO_VAL;
@@ -731,7 +942,7 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		else {
 			int i;
 			for (i=0; p->nodes[i]; i++) {
-				if (isspace(p->nodes[i]))
+				if (isspace((int)p->nodes[i]))
 					p->nodes[i] = ',';
 			}
 		}
@@ -858,7 +1069,7 @@ static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
 			   const char *key, const char *value,
 			   const char *line, char **leftover)
 {
-	s_p_hashtbl_t *tbl, *dflt;
+	s_p_hashtbl_t *tbl;
 	slurm_conf_downnodes_t *n;
 	static s_p_options_t _downnodes_options[] = {
 		{"Reason", S_P_STRING},
@@ -871,8 +1082,6 @@ static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
 	/* s_p_dump_values(tbl, _downnodes_options); */
 
 	n = xmalloc(sizeof(slurm_conf_node_t));
-	dflt = default_nodename_tbl;
-
 	n->nodenames = xstrdup(value);
 
 	if (!s_p_get_string(&n->reason, "Reason", tbl))
@@ -911,7 +1120,7 @@ extern int slurm_conf_downnodes_array(slurm_conf_downnodes_t **ptr_array[])
 	}
 }
 
-static void _free_name_hashtbl()
+static void _free_name_hashtbl(void)
 {
 	int i;
 	names_ll_t *p, *q;
@@ -932,18 +1141,30 @@ static void _free_name_hashtbl()
 	nodehash_initialized = false;
 }
 
-static void _init_name_hashtbl()
+static void _init_name_hashtbl(void)
 {
 	return;
 }
 
-static int _get_hash_idx(const char *s)
+static int _get_hash_idx(const char *name)
 {
-	int i;
+	int index = 0;
+	int j;
+
+	if (name == NULL)
+		return 0;	/* degenerate case */
 
-	i = 0;
-	while (*s) i += (int)*s++;
-	return i % NAME_HASH_LEN;
+	/* Multiply each character by its numerical position in the
+	 * name string to add a bit of entropy, because host names such
+	 * as cluster[0001-1000] can cause excessive index collisions.
+	 */
+	for (j = 1; *name; name++, j++)
+		index += (int)*name * j;
+	index %= NAME_HASH_LEN;
+	if (index < 0)
+		index += NAME_HASH_LEN;
+
+	return index;
 }
 
 static void _push_to_hashtbls(char *alias, char *hostname,
@@ -961,8 +1182,8 @@ static void _push_to_hashtbls(char *alias, char *hostname,
 	/* Ensure only one slurmd configured on each host */
 	p = host_to_node_hashtbl[hostname_idx];
 	while (p) {
-		if (strcmp(p->hostname, hostname)==0) {
-			error("Duplicated NodeHostname %s in the config file",
+		if (strcmp(p->hostname, hostname) == 0) {
+			error("Duplicated NodeHostName %s in the config file",
 			      hostname);
 			return;
 		}
@@ -981,7 +1202,7 @@ static void _push_to_hashtbls(char *alias, char *hostname,
 	}
 
 	/* Create the new data structure and link it into the hash tables */
-	new = (names_ll_t *)xmalloc(sizeof(*new));
+	new = (names_ll_t *)xmalloc(sizeof(names_ll_t));
 	new->alias	= xstrdup(alias);
 	new->hostname	= xstrdup(hostname);
 	new->address	= xstrdup(address);
@@ -1028,8 +1249,9 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 	char *hostname = NULL;
 	char *address = NULL;
 	int error_code = SLURM_SUCCESS;
+	int address_count, alias_count, hostname_count;
 
-	if (node_ptr->nodenames == NULL || *node_ptr->nodenames == '\0')
+	if ((node_ptr->nodenames == NULL) || (node_ptr->nodenames[0] == '\0'))
 		return -1;
 
 	if ((alias_list = hostlist_create(node_ptr->nodenames)) == NULL) {
@@ -1057,23 +1279,27 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 #endif
 
 	/* some sanity checks */
+	address_count  = hostlist_count(address_list);
+	alias_count    = hostlist_count(alias_list);
+	hostname_count = hostlist_count(hostname_list);
 #ifdef HAVE_FRONT_END
-	if (hostlist_count(hostname_list) != 1
-	    || hostlist_count(address_list) != 1) {
-		error("Only one hostname and address allowed "
-		      "in FRONT_END mode");
+	if ((hostname_count != alias_count) && (hostname_count != 1)) {
+		error("NodeHostname count must equal that of NodeName "
+		      "records of there must be no more than one");
+		goto cleanup;
+	}
+	if ((address_count != alias_count) && (address_count != 1)) {
+		error("NodeAddr count must equal that of NodeName "
+		      "records of there must be no more than one");
 		goto cleanup;
 	}
-
-	hostname = node_ptr->hostnames;
-	address = node_ptr->addresses;
 #else
-	if (hostlist_count(hostname_list) < hostlist_count(alias_list)) {
+	if (hostname_count < alias_count) {
 		error("At least as many NodeHostname are required "
 		      "as NodeName");
 		goto cleanup;
 	}
-	if (hostlist_count(address_list) < hostlist_count(alias_list)) {
+	if (address_count < alias_count) {
 		error("At least as many NodeAddr are required as NodeName");
 		goto cleanup;
 	}
@@ -1081,22 +1307,29 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 
 	/* now build the individual node structures */
 	while ((alias = hostlist_shift(alias_list))) {
-#ifndef HAVE_FRONT_END
-		hostname = hostlist_shift(hostname_list);
-		address = hostlist_shift(address_list);
-#endif
-
+		if ((address_count > 1)  || (address == NULL))
+			address = hostlist_shift(address_list);
+		if ((hostname_count > 1) || (hostname == NULL))
+			hostname = hostlist_shift(hostname_list);
 		_push_to_hashtbls(alias, hostname, address, node_ptr->port,
 				  node_ptr->cpus, node_ptr->sockets,
 				  node_ptr->cores, node_ptr->threads);
-
 		free(alias);
-#ifndef HAVE_FRONT_END
-		free(hostname);
-		free(address);
-#endif
-
+		if (address_count > 1) {
+			address_count--;
+			free(address);
+			address = NULL;
+		}
+		if (hostname_count > 1) {
+			hostname_count--;
+			free(hostname);
+			hostname = NULL;
+		}
 	}
+	if (address)
+		free(address);
+	if (hostname)
+		free(hostname);
 
 	/* free allocated storage */
 cleanup:
@@ -1109,30 +1342,79 @@ cleanup:
 	return error_code;
 }
 
+static int _register_front_ends(slurm_conf_frontend_t *front_end_ptr)
+{
+	hostlist_t hostname_list = NULL;
+	hostlist_t address_list = NULL;
+	char *hostname = NULL;
+	char *address = NULL;
+	int error_code = SLURM_SUCCESS;
+
+	if ((front_end_ptr->frontends == NULL) ||
+	    (front_end_ptr->frontends[0] == '\0'))
+		return -1;
+
+	if ((hostname_list = hostlist_create(front_end_ptr->frontends))
+	     == NULL) {
+		error("Unable to create FrontendNames list from %s",
+		      front_end_ptr->frontends);
+		error_code = errno;
+		goto cleanup;
+	}
+	if ((address_list = hostlist_create(front_end_ptr->addresses))
+	     == NULL) {
+		error("Unable to create FrontendAddr list from %s",
+		      front_end_ptr->addresses);
+		error_code = errno;
+		goto cleanup;
+	}
+	if (hostlist_count(address_list) != hostlist_count(hostname_list)) {
+		error("Node count mismatch between FrontendNames and "
+		      "FrontendAddr");
+		goto cleanup;
+	}
+
+	while ((hostname = hostlist_shift(hostname_list))) {
+		address = hostlist_shift(address_list);
+
+		_push_to_hashtbls(hostname, hostname, address,
+				  front_end_ptr->port, 1, 1, 1, 1);
+		free(hostname);
+		free(address);
+	}
+
+	/* free allocated storage */
+cleanup:
+	if (hostname_list)
+		hostlist_destroy(hostname_list);
+	if (address_list)
+		hostlist_destroy(address_list);
+	return error_code;
+}
+
 static void _init_slurmd_nodehash(void)
 {
 	slurm_conf_node_t **ptr_array;
-	int count;
-	int i;
+	slurm_conf_frontend_t **ptr_front_end;
+	int count, i;
 
 	if (nodehash_initialized)
 		return;
 	else
 		nodehash_initialized = true;
 
-	if(!conf_initialized) {
+	if (!conf_initialized) {
 		_init_slurm_conf(NULL);
 		conf_initialized = true;
 	}
 
 	count = slurm_conf_nodename_array(&ptr_array);
-	if (count == 0) {
-		return;
-	}
-
-	for (i = 0; i < count; i++) {
+	for (i = 0; i < count; i++)
 		_register_conf_node_aliases(ptr_array[i]);
-	}
+
+	count = slurm_conf_frontend_array(&ptr_front_end);
+	for (i = 0; i < count; i++)
+		_register_front_ends(ptr_front_end[i]);
 }
 
 /*
@@ -1179,25 +1461,43 @@ extern char *slurm_conf_get_hostname(const char *node_name)
  */
 extern char *slurm_conf_get_nodename(const char *node_hostname)
 {
+	char *alias = NULL;
 	int idx;
 	names_ll_t *p;
-
+#ifdef HAVE_FRONT_END
+	slurm_conf_frontend_t *front_end_ptr = NULL;
+ 
+ 	slurm_conf_lock();
+	if (!front_end_list) {
+		debug("front_end_list is NULL");
+	} else {
+		front_end_ptr = list_find_first(front_end_list,
+						list_find_frontend,
+						(char *) node_hostname);
+		if (front_end_ptr) {
+			alias = xstrdup(front_end_ptr->frontends);
+			slurm_conf_unlock();
+			return alias;
+		}
+	}
+#else
 	slurm_conf_lock();
+#endif
+
 	_init_slurmd_nodehash();
 	idx = _get_hash_idx(node_hostname);
 
 	p = host_to_node_hashtbl[idx];
 	while (p) {
 		if (strcmp(p->hostname, node_hostname) == 0) {
-			char *alias = xstrdup(p->alias);
-			slurm_conf_unlock();
-			return alias;
+			alias = xstrdup(p->alias);
+			break;
 		}
 		p = p->next_hostname;
 	}
 	slurm_conf_unlock();
 
-	return NULL;
+	return alias;
 }
 
 /*
@@ -1578,7 +1878,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->epilog);
 	ctl_conf_ptr->epilog_msg_time		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->fast_schedule		= (uint16_t) NO_VAL;
-	ctl_conf_ptr->first_job_id		= (uint32_t) NO_VAL;
+	ctl_conf_ptr->first_job_id		= NO_VAL;
 	ctl_conf_ptr->get_env_timeout		= 0;
 	xfree(ctl_conf_ptr->gres_plugins);
 	ctl_conf_ptr->group_info		= (uint16_t) NO_VAL;
@@ -1602,8 +1902,10 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->kill_wait			= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->licenses);
 	xfree (ctl_conf_ptr->mail_prog);
-	ctl_conf_ptr->max_job_cnt		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->max_job_cnt		= (uint32_t) NO_VAL;
+	ctl_conf_ptr->max_job_id		= NO_VAL;
 	ctl_conf_ptr->max_mem_per_cpu           = 0;
+	ctl_conf_ptr->max_step_cnt		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->min_job_age		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->mpi_default);
 	xfree (ctl_conf_ptr->mpi_params);
@@ -1701,8 +2003,8 @@ static void _init_slurm_conf(const char *file_name)
 
 	/* init hash to 0 */
 	conf_ptr->hash_val = 0;
-	if(s_p_parse_file(conf_hashtbl, &conf_ptr->hash_val, name)
-	   == SLURM_ERROR)
+	if (s_p_parse_file(conf_hashtbl, &conf_ptr->hash_val, name, false)
+	    == SLURM_ERROR)
 		fatal("something wrong with opening/reading conf file");
 	/* s_p_dump_values(conf_hashtbl, slurm_conf_options); */
 	_validate_and_set_defaults(conf_ptr, conf_hashtbl);
@@ -1714,6 +2016,10 @@ static void
 _destroy_slurm_conf(void)
 {
 	s_p_hashtbl_destroy(conf_hashtbl);
+	if (default_frontend_tbl != NULL) {
+		s_p_hashtbl_destroy(default_frontend_tbl);
+		default_frontend_tbl = NULL;
+	}
 	if (default_nodename_tbl != NULL) {
 		s_p_hashtbl_destroy(default_nodename_tbl);
 		default_nodename_tbl = NULL;
@@ -1916,7 +2222,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		int i;
 		for (i = 0; conf->cluster_name[i] != '\0'; i++)
 			conf->cluster_name[i] =
-				(char)tolower(conf->cluster_name[i]);
+				(char)tolower((int)conf->cluster_name[i]);
 	}
 
 	if (!s_p_get_uint16(&conf->complete_wait, "CompleteWait", hashtbl))
@@ -1961,6 +2267,11 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (s_p_get_uint32(&conf->max_job_cnt, "MaxJobCount", hashtbl) &&
 	    (conf->max_job_cnt < 1))
 		fatal("MaxJobCount=%u, No jobs permitted", conf->max_job_cnt);
+	if (s_p_get_uint32(&conf->max_step_cnt, "MaxStepCount", hashtbl) &&
+	    (conf->max_step_cnt < 1)) {
+		fatal("MaxStepCount=%u, No steps permitted",
+		      conf->max_step_cnt);
+	}
 
 	if (!s_p_get_string(&conf->authtype, "AuthType", hashtbl))
 		conf->authtype = xstrdup(DEFAULT_AUTH_TYPE);
@@ -2026,6 +2337,8 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_uint32(&conf->first_job_id, "FirstJobId", hashtbl))
 		conf->first_job_id = DEFAULT_FIRST_JOB_ID;
+	if (!s_p_get_uint32(&conf->max_job_id, "MaxJobId", hashtbl))
+		conf->max_job_id = DEFAULT_MAX_JOB_ID;
 
 	s_p_get_string(&conf->gres_plugins, "GresTypes", hashtbl);
 
@@ -2149,6 +2462,9 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint32(&conf->max_job_cnt, "MaxJobCount", hashtbl))
 		conf->max_job_cnt = DEFAULT_MAX_JOB_COUNT;
 
+	if (!s_p_get_uint32(&conf->max_job_id, "MaxJobId", hashtbl))
+		conf->max_job_id = DEFAULT_MAX_JOB_ID;
+
 	if (s_p_get_uint32(&conf->max_mem_per_cpu,
 			   "MaxMemPerCPU", hashtbl)) {
 		conf->max_mem_per_cpu |= MEM_PER_CPU;
@@ -2157,6 +2473,9 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->max_mem_per_cpu = DEFAULT_MAX_MEM_PER_CPU;
 	}
 
+	if (!s_p_get_uint32(&conf->max_step_cnt, "MaxStepCount", hashtbl))
+		conf->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
+
 	if (!s_p_get_uint16(&conf->max_tasks_per_node, "MaxTasksPerNode",
 			    hashtbl)) {
 		conf->max_tasks_per_node = DEFAULT_MAX_TASKS_PER_NODE;
@@ -2272,6 +2591,12 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			conf->accounting_storage_pass =
 				xstrdup(default_storage_pass);
 	}
+	if (s_p_get_boolean(&truth, "AccountingStoreJobComment", hashtbl)
+	    && !truth)
+		conf->acctng_store_job_comment = 0;
+	else
+		conf->acctng_store_job_comment = 1;
+
 	if (!s_p_get_uint32(&conf->accounting_storage_port,
 			    "AccountingStoragePort", hashtbl)) {
 		if(default_storage_port)
@@ -2445,6 +2770,11 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			conf->proctrack_type =
 				xstrdup(DEFAULT_PROCTRACK_TYPE);
 	}
+#ifdef HAVE_REAL_CRAY
+	if (strcmp(conf->proctrack_type, "proctrack/sgi_job"))
+		fatal("On Cray ProctrackType=proctrack/sgi_job is required to "
+		      "ensure collision-free tracking of ALPS reservations");
+#endif
 	if ((!strcmp(conf->switch_type, "switch/elan"))
 	    && (!strcmp(conf->proctrack_type,"proctrack/linuxproc")))
 		fatal("proctrack/linuxproc is incompatible with switch/elan");
@@ -2498,6 +2828,10 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_uint16(&conf->ret2service, "ReturnToService", hashtbl))
 		conf->ret2service = DEFAULT_RETURN_TO_SERVICE;
+#ifdef HAVE_CRAY
+	if (conf->ret2service > 1)
+		fatal("ReturnToService > 1 is not supported on Cray");
+#endif
 
 	s_p_get_uint16(&conf->resv_over_run, "ResvOverRun", hashtbl);
 
@@ -2528,6 +2862,10 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->sched_time_slice, "SchedulerTimeSlice",
 	    hashtbl))
 		conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE;
+	else if (conf->sched_time_slice < 5) {
+		error("SchedulerTimeSlice must be at least 5 seconds");
+		conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE;
+	}
 
 	if (!s_p_get_string(&conf->schedtype, "SchedulerType", hashtbl))
 		conf->schedtype = xstrdup(DEFAULT_SCHEDTYPE);
@@ -2581,6 +2919,17 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			conf->slurm_user_id = my_uid;
 		}
 	}
+#ifdef HAVE_REAL_CRAY
+	/*
+	 * This requirement derives from Cray ALPS:
+	 * - ALPS reservations can only be created by the job owner or root
+	 *   (confirmation may be done by other non-privileged users);
+	 * - freeing a reservation always requires root privileges.
+	 */
+	if (conf->slurm_user_id != 0)
+		fatal("Cray requires SlurmUser=root (default), but have '%s'.",
+			conf->slurm_user_name);
+#endif
 
 	if (!s_p_get_string( &conf->slurmd_user_name, "SlurmdUser", hashtbl)) {
 		conf->slurmd_user_name = xstrdup("root");
@@ -2758,7 +3107,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (s_p_get_uint16(&conf->tree_width, "TreeWidth", hashtbl)) {
 		if (conf->tree_width == 0) {
 			error("TreeWidth=0 is invalid");
-			conf->tree_width = DEFAULT_TREE_WIDTH; /* default? */
+			conf->tree_width = DEFAULT_TREE_WIDTH;
 		}
 	} else {
 		conf->tree_width = DEFAULT_TREE_WIDTH;
@@ -2851,6 +3200,11 @@ extern char * debug_flags2str(uint32_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "CPU_Bind");
 	}
+	if (debug_flags & DEBUG_FLAG_FRONT_END) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "FrontEnd");
+	}
 	if (debug_flags & DEBUG_FLAG_GANG) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -2928,6 +3282,8 @@ extern uint32_t debug_str2flags(char *debug_flags)
 			rc |= DEBUG_FLAG_BG_WIRES;
 		else if (strcasecmp(tok, "CPU_Bind") == 0)
 			rc |= DEBUG_FLAG_CPU_BIND;
+		else if (strcasecmp(tok, "FrontEnd") == 0)
+			rc |= DEBUG_FLAG_FRONT_END;
 		else if (strcasecmp(tok, "Gang") == 0)
 			rc |= DEBUG_FLAG_GANG;
 		else if (strcasecmp(tok, "Gres") == 0)
diff --git a/src/common/read_config.h b/src/common/read_config.h
index aff22aa52..05fc4fc03 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -43,6 +43,7 @@
 #ifndef _READ_CONFIG_H
 #define _READ_CONFIG_H
 
+#include "src/common/list.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_protocol_socket_common.h"
 #include "src/common/parse_config.h"
@@ -86,6 +87,8 @@ extern char *default_plugstack;
 #define DEFAULT_KILL_WAIT           30
 #define DEFAULT_MAIL_PROG           "/bin/mail"
 #define DEFAULT_MAX_JOB_COUNT       10000
+#define DEFAULT_MAX_JOB_ID          0xffff0000
+#define DEFAULT_MAX_STEP_COUNT      40000
 #define DEFAULT_MEM_PER_CPU         0
 #define DEFAULT_MAX_MEM_PER_CPU     0
 #define DEFAULT_MIN_JOB_AGE         300
@@ -96,7 +99,11 @@ extern char *default_plugstack;
 #  define DEFAULT_PROCTRACK_TYPE    "proctrack/aix"
 #else
 #  define DEFAULT_CHECKPOINT_TYPE   "checkpoint/none"
-#  define DEFAULT_PROCTRACK_TYPE    "proctrack/pgid"
+#  if defined HAVE_REAL_CRAY/* ALPS requires cluster-unique job container IDs */
+#    define DEFAULT_PROCTRACK_TYPE    "proctrack/sgi_job"
+#  else
+#    define DEFAULT_PROCTRACK_TYPE    "proctrack/pgid"
+#  endif
 #endif
 #define DEFAULT_PREEMPT_TYPE        "preempt/none"
 #define DEFAULT_PRIORITY_DECAY      604800 /* 7 days */
@@ -112,17 +119,11 @@ extern char *default_plugstack;
 #define DEFAULT_SCHED_TIME_SLICE    30
 #define DEFAULT_SCHEDTYPE           "sched/builtin"
 #ifdef HAVE_BG	/* Blue Gene specific default configuration parameters */
-#  ifdef HAVE_BGQ
-#     define DEFAULT_SELECT_TYPE       "select/bgq"
-#  else
-#     define DEFAULT_SELECT_TYPE       "select/bluegene"
-#  endif
+#  define DEFAULT_SELECT_TYPE       "select/bluegene"
+#elif defined HAVE_CRAY
+#  define DEFAULT_SELECT_TYPE       "select/cray"
 #else
-#  ifdef HAVE_CRAY /* Cray specific default configuration parameters */
-#     define DEFAULT_SELECT_TYPE       "select/cray"
-#  else
-#     define DEFAULT_SELECT_TYPE       "select/linear"
-#  endif
+#  define DEFAULT_SELECT_TYPE       "select/linear"
 #endif
 #define DEFAULT_SLURMCTLD_PIDFILE   "/var/run/slurmctld.pid"
 #define DEFAULT_SLURMCTLD_TIMEOUT   120
@@ -141,16 +142,26 @@ extern char *default_plugstack;
 #define DEFAULT_SWITCH_TYPE         "switch/none"
 #define DEFAULT_TASK_PLUGIN         "task/none"
 #define DEFAULT_TMP_FS              "/tmp"
-#ifdef HAVE_3D
+#if defined HAVE_3D && !defined HAVE_CRAY
 #  define DEFAULT_TOPOLOGY_PLUGIN     "topology/3d_torus"
 #else
 #  define DEFAULT_TOPOLOGY_PLUGIN     "topology/none"
 #endif
 #define DEFAULT_WAIT_TIME           0
-#define DEFAULT_TREE_WIDTH          50
+#  define DEFAULT_TREE_WIDTH        50
 #define DEFAULT_UNKILLABLE_TIMEOUT  60 /* seconds */
 #define DEFAULT_MAX_TASKS_PER_NODE  128
 
+typedef struct slurm_conf_frontend {
+	char *frontends;		/* frontend node name */
+	char *addresses;		/* frontend node address */
+	uint16_t port;			/* frontend specific port */
+	char *reason;			/* reason for down frontend node */
+	uint16_t node_state;		/* enum node_states, ORed with
+					 * NODE_STATE_NO_RESPOND if not
+					 * responding */
+} slurm_conf_frontend_t;
+
 typedef struct slurm_conf_node {
 	char *nodenames;
 	char *hostnames;
@@ -177,15 +188,17 @@ typedef struct slurm_conf_partition {
 	char *allow_groups;	/* comma delimited list of groups,
 				 * NULL indicates all */
 	char *alternate;	/* name of alternate partition */
+	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	bool default_flag;	/* Set if default partition */
 	uint32_t default_time;	/* minutes or INFINITE */
 	uint16_t disable_root_jobs; /* if set then user root can't run
 				     * jobs if NO_VAL use global
 				     * default */
-
+	uint32_t grace_time;	/* default grace time for partition */
 	bool     hidden_flag;	/* 1 if hidden by default */
 	uint16_t max_share;	/* number of jobs to gang schedule */
 	uint32_t max_time;	/* minutes or INFINITE */
+	uint32_t max_mem_per_cpu; /* maximum MB memory per allocated CPU */
 	uint32_t max_nodes;	/* per job or INFINITE */
 	uint32_t min_nodes;	/* per job */
 	char	*name;		/* name of the partition */
@@ -210,6 +223,17 @@ typedef struct {
 	char *value;
 } config_key_pair_t;
 
+/* Destroy a front_end record built by slurm_conf_frontend_array() */
+extern void destroy_frontend(void *ptr);
+
+/*
+ * list_find_frontend - find an entry in the front_end list, see list.h for
+ *	documentation
+ * IN key - is feature name or NULL for all features
+ * RET 1 if found, 0 otherwise
+ */
+extern int list_find_frontend (void *front_end_entry, void *key);
+
 /*
  * slurm_conf_init - load the slurm configuration from the a file.
  * IN file_name - name of the slurm configuration file to be read
@@ -257,6 +281,15 @@ extern slurm_ctl_conf_t *slurm_conf_lock(void);
 
 extern void slurm_conf_unlock(void);
 
+
+/*
+ * Set "ptr_array" with the pointer to an array of pointers to
+ * slurm_conf_frontend_t structures.
+ *
+ * Return value is the length of the array.
+ */
+extern int slurm_conf_frontend_array(slurm_conf_frontend_t **ptr_array[]);
+
 /*
  * Set "ptr_array" with the pointer to an array of pointers to
  * slurm_conf_node_t structures.
diff --git a/src/common/safeopen.c b/src/common/safeopen.c
index a6c06373d..8a18c3504 100644
--- a/src/common/safeopen.c
+++ b/src/common/safeopen.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/safeopen.h b/src/common/safeopen.h
index 81e70eba2..5c93cd1b2 100644
--- a/src/common/safeopen.h
+++ b/src/common/safeopen.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 37cc143ba..937a9fcd7 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -166,6 +166,9 @@ typedef struct slurm_acct_storage_ops {
 	int  (*cluster_cpus)      (void *db_conn, char *cluster_nodes,
 				   uint32_t cpus, time_t event_time);
 	int  (*register_ctld)      (void *db_conn, uint16_t port);
+	int  (*register_disconn_ctld)(void *db_conn, char *control_host);
+	int  (*fini_ctld)          (void *db_conn,
+				    slurmdb_cluster_rec_t *cluster_rec);
 	int  (*job_start)          (void *db_conn, struct job_record *job_ptr);
 	int  (*job_complete)       (void *db_conn,
 				    struct job_record *job_ptr);
@@ -263,6 +266,8 @@ static slurm_acct_storage_ops_t * _acct_storage_get_ops(
 		"clusteracct_storage_p_node_up",
 		"clusteracct_storage_p_cluster_cpus",
 		"clusteracct_storage_p_register_ctld",
+		"clusteracct_storage_p_register_disconn_ctld",
+		"clusteracct_storage_p_fini_ctld",
 		"jobacct_storage_p_job_start",
 		"jobacct_storage_p_job_complete",
 		"jobacct_storage_p_step_start",
@@ -817,19 +822,39 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 
 	/* on some systems we need to make sure we don't say something
 	   is completely up if there are cpus in an error state */
-	if(node_ptr->select_nodeinfo) {
+	if (node_ptr->select_nodeinfo) {
 		uint16_t err_cpus = 0;
+		static uint32_t node_scaling = 0;
+		static uint16_t cpu_cnt = 1;
+
+		if (!node_scaling) {
+			select_g_alter_node_cnt(SELECT_GET_NODE_SCALING,
+						&node_scaling);
+			select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
+						&cpu_cnt);
+			if (!node_scaling)
+				node_scaling = 1;
+		}
+
 		select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 					     SELECT_NODEDATA_SUBCNT,
 					     NODE_STATE_ERROR,
 					     &err_cpus);
-		if(err_cpus) {
+		if (err_cpus) {
 			char *reason = "Setting partial node down.";
 			struct node_record send_node;
 			struct config_record config_rec;
-			uint16_t cpu_cnt = 0;
-			select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
-						&cpu_cnt);
+
+			if (!node_ptr->reason) {
+				if (err_cpus == node_scaling)
+					reason = "Setting node down.";
+				node_ptr->reason = xstrdup(reason);
+				node_ptr->reason_time = event_time;
+				node_ptr->reason_uid =
+					slurm_get_slurm_user_id();
+			} else
+				reason = node_ptr->reason;
+
 			err_cpus *= cpu_cnt;
 			memset(&send_node, 0, sizeof(struct node_record));
 			memset(&config_rec, 0, sizeof(struct config_record));
@@ -843,7 +868,15 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 			return (*(g_acct_storage_context->ops.node_down))
 				(db_conn, &send_node,
 				 event_time, reason, slurm_get_slurm_user_id());
+		} else {
+			xfree(node_ptr->reason);
+			node_ptr->reason_time = 0;
+			node_ptr->reason_uid = NO_VAL;
 		}
+	} else {
+		xfree(node_ptr->reason);
+		node_ptr->reason_time = 0;
+		node_ptr->reason_uid = NO_VAL;
 	}
 
  	return (*(g_acct_storage_context->ops.node_up))
@@ -867,16 +900,32 @@ extern int clusteracct_storage_g_register_ctld(void *db_conn, uint16_t port)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
- 	return (*(g_acct_storage_context->ops.register_ctld))
-		(db_conn, port);
+ 	return (*(g_acct_storage_context->ops.register_ctld))(db_conn, port);
+}
+
+extern int clusteracct_storage_g_register_disconn_ctld(
+	void *db_conn, char *control_host)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return SLURM_ERROR;
+	return (*(g_acct_storage_context->ops.register_disconn_ctld))
+		(db_conn, control_host);
+}
+
+extern int clusteracct_storage_g_fini_ctld(void *db_conn,
+					   slurmdb_cluster_rec_t *cluster_rec)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return SLURM_ERROR;
+ 	return (*(g_acct_storage_context->ops.fini_ctld))(db_conn, cluster_rec);
 }
 
 /*
  * load into the storage information about a job,
  * typically when it begins execution, but possibly earlier
  */
-extern int jobacct_storage_g_job_start (void *db_conn,
-					struct job_record *job_ptr)
+extern int jobacct_storage_g_job_start(void *db_conn,
+				       struct job_record *job_ptr)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
@@ -901,8 +950,8 @@ extern int jobacct_storage_g_job_start (void *db_conn,
 /*
  * load into the storage the end of a job
  */
-extern int jobacct_storage_g_job_complete  (void *db_conn,
-					    struct job_record *job_ptr)
+extern int jobacct_storage_g_job_complete(void *db_conn,
+					  struct job_record *job_ptr)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
@@ -912,8 +961,8 @@ extern int jobacct_storage_g_job_complete  (void *db_conn,
 /*
  * load into the storage the start of a job step
  */
-extern int jobacct_storage_g_step_start (void *db_conn,
-					 struct step_record *step_ptr)
+extern int jobacct_storage_g_step_start(void *db_conn,
+					struct step_record *step_ptr)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
@@ -923,8 +972,8 @@ extern int jobacct_storage_g_step_start (void *db_conn,
 /*
  * load into the storage the end of a job step
  */
-extern int jobacct_storage_g_step_complete (void *db_conn,
-					    struct step_record *step_ptr)
+extern int jobacct_storage_g_step_complete(void *db_conn,
+					   struct step_record *step_ptr)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
@@ -935,8 +984,8 @@ extern int jobacct_storage_g_step_complete (void *db_conn,
 /*
  * load into the storage a suspention of a job
  */
-extern int jobacct_storage_g_job_suspend (void *db_conn,
-					  struct job_record *job_ptr)
+extern int jobacct_storage_g_job_suspend(void *db_conn,
+					 struct job_record *job_ptr)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index ca573b4e1..2b9bd7916 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,8 +45,8 @@
 #include "src/common/slurmdb_defs.h"
 #include "src/common/slurmdb_pack.h"
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include <sys/types.h>
 #include <pwd.h>
 
@@ -451,36 +451,40 @@ extern int clusteracct_storage_g_cluster_cpus(void *db_conn,
 					      time_t event_time);
 
 extern int clusteracct_storage_g_register_ctld(void *db_conn, uint16_t port);
+extern int clusteracct_storage_g_register_disconn_ctld(
+	void *db_conn, char *control_host);
+extern int clusteracct_storage_g_fini_ctld(void *db_conn,
+					   slurmdb_cluster_rec_t *cluster_rec);
 
 /*
  * load into the storage the start of a job
  */
-extern int jobacct_storage_g_job_start (void *db_conn,
-					struct job_record *job_ptr);
+extern int jobacct_storage_g_job_start(void *db_conn,
+				       struct job_record *job_ptr);
 
 /*
  * load into the storage the end of a job
  */
-extern int jobacct_storage_g_job_complete (void *db_conn,
-					   struct job_record *job_ptr);
+extern int jobacct_storage_g_job_complete(void *db_conn,
+					  struct job_record *job_ptr);
 
 /*
  * load into the storage the start of a job step
  */
-extern int jobacct_storage_g_step_start (void *db_conn,
-					 struct step_record *step_ptr);
+extern int jobacct_storage_g_step_start(void *db_conn,
+					struct step_record *step_ptr);
 
 /*
  * load into the storage the end of a job step
  */
-extern int jobacct_storage_g_step_complete (void *db_conn,
-					    struct step_record *step_ptr);
+extern int jobacct_storage_g_step_complete(void *db_conn,
+					   struct step_record *step_ptr);
 
 /*
  * load into the storage a suspention of a job
  */
-extern int jobacct_storage_g_job_suspend (void *db_conn,
-					  struct job_record *job_ptr);
+extern int jobacct_storage_g_job_suspend(void *db_conn,
+					 struct job_record *job_ptr);
 
 /*
  * get info from the storage
diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c
index a24f26da8..264c605a7 100644
--- a/src/common/slurm_auth.c
+++ b/src/common/slurm_auth.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -310,8 +310,7 @@ _slurm_auth_context_destroy( slurm_auth_context_t c )
         return rc;
 }
 
-inline int
-slurm_auth_init( char *auth_type )
+extern int slurm_auth_init( char *auth_type )
 {
         int retval = SLURM_SUCCESS;
 	char *auth_type_local = NULL;
diff --git a/src/common/slurm_auth.h b/src/common/slurm_auth.h
index 55c024dff..2f5fea3d5 100644
--- a/src/common/slurm_auth.h
+++ b/src/common/slurm_auth.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c
index 50ed08c8d..79c6dcb00 100644
--- a/src/common/slurm_cred.c
+++ b/src/common/slurm_cred.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,8 +41,6 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm_errno.h>
-
 #include <fcntl.h>
 #include <stdarg.h>
 #include <stdlib.h>
@@ -52,6 +50,7 @@
 #  include <pthread.h>
 #endif /* WITH_PTHREADS */
 
+#include "slurm/slurm_errno.h"
 #include "src/common/bitstring.h"
 #include "src/common/gres.h"
 #include "src/common/io_hdr.h"
@@ -820,8 +819,7 @@ void slurm_cred_free_args(slurm_cred_arg_t *arg)
 	xfree(arg->sockets_per_node);
 }
 
-int
-slurm_cred_get_args(slurm_cred_t *cred, slurm_cred_arg_t *arg)
+int slurm_cred_get_args(slurm_cred_t *cred, slurm_cred_arg_t *arg)
 {
 	xassert(cred != NULL);
 	xassert(arg  != NULL);
diff --git a/src/common/slurm_cred.h b/src/common/slurm_cred.h
index 9d175e69a..e3469d923 100644
--- a/src/common/slurm_cred.h
+++ b/src/common/slurm_cred.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index 1ffe20847..df158b94b 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/switch.h"
@@ -245,7 +245,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ ESLURM_RESERVATION_OVERLAP,
 	  "Requested reservation overlaps with another reservation"	},
 	{ ESLURM_PORTS_BUSY,
-	  "Requires ports are in use"				},
+	  "Required ports are in use"				},
 	{ ESLURM_PORTS_INVALID,
 	  "Requires more ports than can be reserved"		},
 	{ ESLURM_PROLOG_RUNNING,
@@ -274,6 +274,12 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Job is no longer pending execution"			},
 	{ ESLURM_QOS_THRES,
 	  "Requested account has breached requested QOS usage threshold"},
+	{ ESLURM_PARTITION_IN_USE,
+	  "Partition is in use"					},
+	{ ESLURM_STEP_LIMIT,
+	  "Step limit reached for this job"			},
+	{ ESLURM_JOB_SUSPENDED,
+	  "Job is current suspended, requested operation disabled"	},
 
 	/* slurmd error codes */
 
@@ -328,7 +334,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ ESLURMD_TOOMANYSTEPS,
 	  "Too many job steps on node"		        	},
 	{ ESLURMD_STEP_EXISTS,
-	  "Job step already in shared memory"	        	},
+	  "Job step already exists"		        	},
 	{ ESLURMD_JOB_NOTRUNNING,
 	  "Job step not running"	        	        },
  	{ ESLURMD_STEP_SUSPENDED,
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index 2cf9d23a5..44615ce88 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -10,7 +10,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -84,7 +84,7 @@ typedef struct slurm_jobacct_gather_ops {
 	void (*jobacct_gather_change_poll)   (uint16_t frequency);
 	void (*jobacct_gather_suspend_poll)  ();
 	void (*jobacct_gather_resume_poll)   ();
-	int (*jobacct_gather_set_proctrack_container_id)(uint32_t id);
+	int (*jobacct_gather_set_proctrack_container_id)(uint64_t id);
 	int (*jobacct_gather_add_task) (pid_t pid, jobacct_id_t *jobacct_id);
 	jobacctinfo_t *(*jobacct_gather_stat_task)(pid_t pid);
 	jobacctinfo_t *(*jobacct_gather_remove_task)(pid_t pid);
@@ -478,7 +478,7 @@ extern void jobacct_gather_g_resume_poll()
 	return;
 }
 
-extern int jobacct_gather_g_set_proctrack_container_id(uint32_t id)
+extern int jobacct_gather_g_set_proctrack_container_id(uint64_t id)
 {
 	int retval = SLURM_SUCCESS;
 	if (_slurm_jobacct_gather_init() < 0)
diff --git a/src/common/slurm_jobacct_gather.h b/src/common/slurm_jobacct_gather.h
index bc072a334..62a3ed578 100644
--- a/src/common/slurm_jobacct_gather.h
+++ b/src/common/slurm_jobacct_gather.h
@@ -10,7 +10,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -69,8 +69,8 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/macros.h"
 #include "src/common/pack.h"
@@ -101,7 +101,7 @@ extern int  jobacct_gather_g_endpoll();
 extern void jobacct_gather_g_suspend_poll();
 extern void jobacct_gather_g_resume_poll();
 
-extern int jobacct_gather_g_set_proctrack_container_id(uint32_t id);
+extern int jobacct_gather_g_set_proctrack_container_id(uint64_t id);
 extern int jobacct_gather_g_add_task(pid_t pid, jobacct_id_t *jobacct_id);
 /* must free jobacctinfo_t if not NULL */
 extern jobacctinfo_t *jobacct_gather_g_stat_task(pid_t pid);
diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c
index df0da5f48..4abd0a447 100644
--- a/src/common/slurm_jobcomp.c
+++ b/src/common/slurm_jobcomp.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_jobcomp.h b/src/common/slurm_jobcomp.h
index 5ac68fc15..4e667be14 100644
--- a/src/common/slurm_jobcomp.h
+++ b/src/common/slurm_jobcomp.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_priority.c b/src/common/slurm_priority.c
index 4eb53d2d4..1011723a1 100644
--- a/src/common/slurm_priority.c
+++ b/src/common/slurm_priority.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_priority.h b/src/common/slurm_priority.h
index 10c66815f..65da5b306 100644
--- a/src/common/slurm_priority.h
+++ b/src/common/slurm_priority.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index 6bc9a12d8..3593cfee5 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -331,6 +331,20 @@ uint32_t slurm_get_debug_flags(void)
 	return debug_flags;
 }
 
+/* slurm_set_debug_flags
+ */
+void slurm_set_debug_flags(uint32_t debug_flags)
+{
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		conf->debug_flags = debug_flags;
+		slurm_conf_unlock();
+	}
+}
+
 /* slurm_get_max_mem_per_cpu
  * RET MaxMemPerCPU/Node value from slurm.conf
  */
@@ -368,7 +382,7 @@ uint32_t slurm_get_epilog_msg_time(void)
 /* slurm_get_env_timeout
  * return default timeout for srun/sbatch --get-user-env option
  */
-inline int slurm_get_env_timeout(void)
+extern int slurm_get_env_timeout(void)
 {
 	int timeout = 0;
 	slurm_ctl_conf_t *conf;
@@ -1665,6 +1679,24 @@ char *slurm_get_select_type(void)
 	return select_type;
 }
 
+/* slurm_get_select_type_param
+ * get select_type_param from slurmctld_conf object
+ * RET uint16_t   - select_type_param
+ */
+uint16_t slurm_get_select_type_param(void)
+{
+	uint16_t select_type_param = 0;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		select_type_param = conf->select_type_param;
+		slurm_conf_unlock();
+	}
+	return select_type_param;
+}
+
 /** Return true if (remote) system runs Cray XT/XE */
 bool is_cray_select_type(void)
 {
@@ -2642,15 +2674,13 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
 	Buf      buffer;
 	int      rc;
 	void *   auth_cred;
-	uint16_t auth_flags = SLURM_PROTOCOL_NO_FLAGS;
 
 	/*
 	 * Initialize header with Auth credential and message type.
 	 */
-	if (msg->flags & SLURM_GLOBAL_AUTH_KEY) {
-		auth_flags = SLURM_GLOBAL_AUTH_KEY;
+	if (msg->flags & SLURM_GLOBAL_AUTH_KEY)
 		auth_cred = g_slurm_auth_create(NULL, 2, _global_auth_key());
-	} else
+	else
 		auth_cred = g_slurm_auth_create(NULL, 2, NULL);
 	if (auth_cred == NULL) {
 		error("authentication: %s",
@@ -2699,10 +2729,15 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
 				get_buf_offset(buffer),
 				SLURM_PROTOCOL_NO_SEND_RECV_FLAGS );
 
-	if (rc < 0) {
+	if ((rc < 0) && (errno == ENOTCONN)) {
+		debug3("slurm_msg_sendto: peer has disappeared for msg_type=%u",
+		       msg->msg_type);
+	} else if (rc < 0) {
+		slurm_addr_t peer_addr;
 		char addr_str[32];
-		slurm_print_slurm_addr(&msg->address, addr_str,
-				       sizeof(addr_str));
+
+		slurm_get_peer_addr(fd, &peer_addr);
+		slurm_print_slurm_addr(&peer_addr, addr_str, sizeof(addr_str));
 		error("slurm_msg_sendto: address:port=%s msg_type=%u: %m",
 		      addr_str, msg->msg_type);
 	}
@@ -2944,23 +2979,6 @@ void slurm_print_slurm_addr(slurm_addr_t * address, char *buf, size_t n)
  * slurm_addr_t pack routines
 \**********************************************************************/
 
-/*
- *  Pack just the message with no header and send back the buffer.
- */
-Buf slurm_pack_msg_no_header(slurm_msg_t * msg)
-{
-	Buf      buffer = NULL;
-
-	buffer = init_buf(0);
-
-	/*
-	 * Pack message into buffer
-	 */
-	pack_msg(msg, buffer);
-
-	return buffer;
-}
-
 /* slurm_pack_slurm_addr
  * packs a slurm_addr_t into a buffer to serialization transport
  * IN slurm_address	- slurm_addr_t to pack
@@ -3367,26 +3385,7 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 		return NULL;
 	}
 
-#ifdef HAVE_FRONT_END
-	{
-		char *name = NULL;
-		/* only send to the front end node */
-		name = nodelist_nth_host(nodelist, 0);
-		if (!name) {
-			error("slurm_send_recv_msgs: "
-			      "can't get the first name out of %s",
-			      nodelist);
-			return NULL;
-		}
-/* 	info("got %s and %s", nodelist, name); */
-		hl = hostlist_create(name);
-		free(name);
-	}
-#else
-/* 	info("total sending to %s",nodelist); */
 	hl = hostlist_create(nodelist);
-#endif
-
 	if (!hl) {
 		error("slurm_send_recv_msgs: problem creating hostlist");
 		return NULL;
@@ -3441,7 +3440,7 @@ List slurm_send_addr_recv_msgs(slurm_msg_t *msg, char *name, int timeout)
 
 
 /*
- *  Open a connection to the "address" specified in the the slurm msg "req"
+ *  Open a connection to the "address" specified in the slurm msg "req".
  *    Then read back an "rc" message returning the "return_code" specified
  *    in the response in the "rc" parameter.
  * IN req	- a slurm_msg struct to be sent by the function
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 86268ce54..7968bf939 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,7 +57,7 @@
 #include <sys/types.h>
 #include <stdarg.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/pack.h"
 #include "src/common/slurm_protocol_common.h"
@@ -90,13 +90,13 @@ enum {
  * sets the slurm_protocol_config object
  * IN protocol_conf		-  slurm_protocol_config object
  */
-inline int slurm_set_api_config(slurm_protocol_config_t * protocol_conf);
+extern int slurm_set_api_config(slurm_protocol_config_t * protocol_conf);
 
 /* slurm_get_api_config
  * returns a pointer to the current slurm_protocol_config object
  * RET slurm_protocol_config_t	- current slurm_protocol_config object
  */
-inline slurm_protocol_config_t *slurm_get_api_config(void);
+extern slurm_protocol_config_t *slurm_get_api_config(void);
 
 /* slurm_get_batch_start_timeout
  * RET BatchStartTimeout value from slurm.conf
@@ -128,6 +128,10 @@ uint16_t slurm_get_complete_wait(void);
  */
 uint32_t slurm_get_debug_flags(void);
 
+/* slurm_set_debug_flags
+ */
+void slurm_set_debug_flags(uint32_t debug_flags);
+
 /* slurm_get_def_mem_per_cpu
  * RET DefMemPerCPU/Node value from slurm.conf
  */
@@ -151,7 +155,7 @@ uint32_t slurm_get_epilog_msg_time(void);
 /* slurm_get_env_timeout
  * return default timeout for srun/sbatch --get-user-env option
  */
-inline int slurm_get_env_timeout(void);
+extern int slurm_get_env_timeout(void);
 
 /* slurm_get_mpi_default
  * get default mpi value from slurmctld_conf object
@@ -168,7 +172,7 @@ char *slurm_get_mpi_params(void);
 /* slurm_get_msg_timeout
  * get default message timeout value from slurmctld_conf object
  */
-inline uint16_t slurm_get_msg_timeout(void);
+extern uint16_t slurm_get_msg_timeout(void);
 
 /* slurm_api_set_conf_file
  *      set slurm configuration file to a non-default value
@@ -181,11 +185,11 @@ extern void slurm_api_set_conf_file(char *pathname);
  *	the compiled in default slurm_protocol_config object is initialized
  * RET int 		- return code
  */
-inline int slurm_api_set_default_config();
+extern int slurm_api_set_default_config();
 
 /* slurm_api_clear_config
  * execute this only at program termination to free all memory */
-inline void slurm_api_clear_config(void);
+extern void slurm_api_clear_config(void);
 
 /* slurm_get_hash_val
  * get hash val of the slurm.conf from slurmctld_conf object from
@@ -531,7 +535,7 @@ extern uint16_t slurm_get_sched_port(void);
  * returns slurmd port from slurmctld_conf object
  * RET uint16_t	- slurmd port
  */
-inline uint16_t slurm_get_slurmd_port(void);
+extern uint16_t slurm_get_slurmd_port(void);
 
 /* slurm_get_slurm_user_id
  * returns slurm uid from slurmctld_conf object
@@ -557,6 +561,12 @@ char *slurm_get_sched_type(void);
  */
 char *slurm_get_select_type(void);
 
+/* slurm_get_select_type_param
+ * get select_type_param from slurmctld_conf object
+ * RET uint16_t   - select_type_param
+ */
+uint16_t slurm_get_select_type_param(void);
+
 /** Return true if (remote) system runs Cray XT/XE */
 bool is_cray_select_type(void);
 
@@ -609,7 +619,7 @@ uint16_t slurm_get_task_plugin_param(void);
  * IN port		- port to bind the msg server to
  * RET slurm_fd		- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_init_msg_engine_port(uint16_t port);
+extern slurm_fd_t slurm_init_msg_engine_port(uint16_t port);
 
 /* In the socket implementation it creates a socket, binds to it, and
  *	listens for connections.
@@ -618,7 +628,7 @@ inline slurm_fd_t slurm_init_msg_engine_port(uint16_t port);
  * IN port		- port to bind the msg server to
  * RET slurm_fd		- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_init_msg_engine_addrname_port(char *addr_name,
+extern slurm_fd_t slurm_init_msg_engine_addrname_port(char *addr_name,
 						    uint16_t port);
 
 /* In the socket implementation it creates a socket, binds to it, and
@@ -626,14 +636,14 @@ inline slurm_fd_t slurm_init_msg_engine_addrname_port(char *addr_name,
  * IN slurm_address 	- slurm_addr_t to bind the msg server to
  * RET slurm_fd		- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_init_msg_engine(slurm_addr_t * slurm_address);
+extern slurm_fd_t slurm_init_msg_engine(slurm_addr_t * slurm_address);
 
 /* In the bsd implmentation maps directly to a accept call
  * IN open_fd		- file descriptor to accept connection on
  * OUT slurm_address 	- slurm_addr_t of the accepted connection
  * RET slurm_fd		- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_accept_msg_conn(slurm_fd_t open_fd,
+extern slurm_fd_t slurm_accept_msg_conn(slurm_fd_t open_fd,
 				      slurm_addr_t * slurm_address);
 
 /* In the bsd implmentation maps directly to a close call, to close
@@ -641,13 +651,13 @@ inline slurm_fd_t slurm_accept_msg_conn(slurm_fd_t open_fd,
  * IN open_fd		- an open file descriptor to close
  * RET int		- the return code
  */
-inline int slurm_close_accepted_conn(slurm_fd_t open_fd);
+extern int slurm_close_accepted_conn(slurm_fd_t open_fd);
 
 /* just calls close on an established msg connection
  * IN open_fd	- an open file descriptor to close
  * RET int	- the return code
  */
-inline int slurm_shutdown_msg_engine(slurm_fd_t open_fd);
+extern int slurm_shutdown_msg_engine(slurm_fd_t open_fd);
 
 /**********************************************************************\
  * receive message functions
@@ -727,8 +737,8 @@ int slurm_send_node_msg(slurm_fd_t open_fd, slurm_msg_t *msg);
  * IN/OUT addr     - address of controller contacted
  * RET slurm_fd	- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_open_controller_conn(slurm_addr_t *addr);
-inline slurm_fd_t slurm_open_controller_conn_spec(enum controller_id dest);
+extern slurm_fd_t slurm_open_controller_conn(slurm_addr_t *addr);
+extern slurm_fd_t slurm_open_controller_conn_spec(enum controller_id dest);
 /* gets the slurm_addr_t of the specified controller
  *	primary or secondary slurmctld message engine
  * IN dest      - controller to contact, primary or secondary
@@ -745,13 +755,13 @@ void slurm_get_controller_addr_spec(enum controller_id dest,
  * IN slurm_address 	- slurm_addr_t of the connection destination
  * RET slurm_fd		- file descriptor of the connection created
  */
-inline slurm_fd_t slurm_open_msg_conn(slurm_addr_t * slurm_address);
+extern slurm_fd_t slurm_open_msg_conn(slurm_addr_t * slurm_address);
 
 /* just calls close on an established msg connection to close
  * IN open_fd	- an open file descriptor to close
  * RET int	- the return code
  */
-inline int slurm_shutdown_msg_conn(slurm_fd_t open_fd);
+extern int slurm_shutdown_msg_conn(slurm_fd_t open_fd);
 
 
 /**********************************************************************\
@@ -763,7 +773,7 @@ inline int slurm_shutdown_msg_conn(slurm_fd_t open_fd);
  * IN slurm_address 	- slurm_addr_t to bind the server stream to
  * RET slurm_fd		- file descriptor of the stream created
  */
-inline slurm_fd_t slurm_listen_stream(slurm_addr_t * slurm_address);
+extern slurm_fd_t slurm_listen_stream(slurm_addr_t * slurm_address);
 
 /* slurm_accept_stream
  * accepts a incoming stream connection on a stream server slurm_fd
@@ -771,7 +781,7 @@ inline slurm_fd_t slurm_listen_stream(slurm_addr_t * slurm_address);
  * OUT slurm_address 	- slurm_addr_t of the accepted connection
  * RET slurm_fd		- file descriptor of the accepted connection
  */
-inline slurm_fd_t slurm_accept_stream(slurm_fd_t open_fd,
+extern slurm_fd_t slurm_accept_stream(slurm_fd_t open_fd,
 				    slurm_addr_t * slurm_address);
 
 /* slurm_open_stream
@@ -779,14 +789,14 @@ inline slurm_fd_t slurm_accept_stream(slurm_fd_t open_fd,
  * IN slurm_address 	- slurm_addr_t of the connection destination
  * RET slurm_fd_t         - file descriptor of the connection created
  */
-inline slurm_fd_t slurm_open_stream(slurm_addr_t * slurm_address);
+extern slurm_fd_t slurm_open_stream(slurm_addr_t * slurm_address);
 
 /* slurm_close_stream
  * closes either a server or client stream file_descriptor
  * IN open_fd	- an open file descriptor to close
  * RET int	- the return code
  */
-inline int slurm_close_stream(slurm_fd_t open_fd);
+extern int slurm_close_stream(slurm_fd_t open_fd);
 
 /* slurm_write_stream
  * writes a buffer out a stream file descriptor
@@ -796,9 +806,9 @@ inline int slurm_close_stream(slurm_fd_t open_fd);
  * IN timeout		- how long to wait in milliseconds
  * RET size_t		- bytes sent , or -1 on errror
  */
-inline size_t slurm_write_stream(slurm_fd_t open_fd, char *buffer,
+extern size_t slurm_write_stream(slurm_fd_t open_fd, char *buffer,
 				 size_t size);
-inline size_t slurm_write_stream_timeout(slurm_fd_t open_fd,
+extern size_t slurm_write_stream_timeout(slurm_fd_t open_fd,
 					 char *buffer, size_t size,
 					 int timeout);
 
@@ -810,9 +820,9 @@ inline size_t slurm_write_stream_timeout(slurm_fd_t open_fd,
  * IN timeout		- how long to wait in milliseconds
  * RET size_t		- bytes read , or -1 on errror
  */
-inline size_t slurm_read_stream(slurm_fd_t open_fd, char *buffer,
+extern size_t slurm_read_stream(slurm_fd_t open_fd, char *buffer,
 				size_t size);
-inline size_t slurm_read_stream_timeout(slurm_fd_t open_fd,
+extern size_t slurm_read_stream_timeout(slurm_fd_t open_fd,
 					char *buffer, size_t size,
 					int timeout);
 
@@ -821,15 +831,15 @@ inline size_t slurm_read_stream_timeout(slurm_fd_t open_fd,
  * IN open_fd 		- file descriptor to retreive slurm_addr_t for
  * OUT address		- address that open_fd to bound to
  */
-inline int slurm_get_stream_addr(slurm_fd_t open_fd, slurm_addr_t * address);
+extern int slurm_get_stream_addr(slurm_fd_t open_fd, slurm_addr_t * address);
 
 /* make an open slurm connection blocking or non-blocking
  *	(i.e. wait or do not wait for i/o completion )
  * IN open_fd	- an open file descriptor to change the effect
  * RET int	- the return code
  */
-inline int slurm_set_stream_non_blocking(slurm_fd_t open_fd);
-inline int slurm_set_stream_blocking(slurm_fd_t open_fd);
+extern int slurm_set_stream_non_blocking(slurm_fd_t open_fd);
+extern int slurm_set_stream_blocking(slurm_fd_t open_fd);
 
 /**********************************************************************\
  * address conversion and management functions
@@ -841,7 +851,7 @@ inline int slurm_set_stream_blocking(slurm_fd_t open_fd);
  * IN port		- port in host order
  * IN ip_address	- ipv4 address in uint32 host order form
  */
-inline void slurm_set_addr_uint(slurm_addr_t * slurm_address,
+extern void slurm_set_addr_uint(slurm_addr_t * slurm_address,
 				uint16_t port, uint32_t ip_address);
 
 /* reset_slurm_addr
@@ -857,7 +867,7 @@ void reset_slurm_addr(slurm_addr_t * slurm_address, slurm_addr_t new_address);
  * IN port		- port in host order
  * IN host		- hostname or dns name
  */
-inline void slurm_set_addr(slurm_addr_t * slurm_address,
+extern void slurm_set_addr(slurm_addr_t * slurm_address,
 			   uint16_t port, char *host);
 
 /* slurm_set_addr_any
@@ -865,7 +875,7 @@ inline void slurm_set_addr(slurm_addr_t * slurm_address,
  * OUT slurm_address	- slurm_addr_t to be filled in
  * IN port		- port in host order
  */
-inline void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port);
+extern void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port);
 
 /* slurm_set_addr_char
  * initializes the slurm_address with the supplied port and host
@@ -873,7 +883,7 @@ inline void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port);
  * IN port		- port in host order
  * IN host		- hostname or dns name
  */
-inline void slurm_set_addr_char(slurm_addr_t * slurm_address,
+extern void slurm_set_addr_char(slurm_addr_t * slurm_address,
 				uint16_t port, char *host);
 
 /* slurm_get_addr
@@ -883,7 +893,7 @@ inline void slurm_set_addr_char(slurm_addr_t * slurm_address,
  * OUT host		- hostname
  * IN buf_len		- length of hostname buffer
  */
-inline void slurm_get_addr(slurm_addr_t * slurm_address,
+extern void slurm_get_addr(slurm_addr_t * slurm_address,
 			   uint16_t * port, char *host, uint32_t buf_len);
 
 /* slurm_get_ip_str
@@ -893,7 +903,7 @@ inline void slurm_get_addr(slurm_addr_t * slurm_address,
  * OUT ip		- ip address in dotted-quad string form
  * IN buf_len		- length of ip buffer
  */
-inline void slurm_get_ip_str(slurm_addr_t * slurm_address, uint16_t * port,
+extern void slurm_get_ip_str(slurm_addr_t * slurm_address, uint16_t * port,
 			     char *ip, unsigned int buf_len);
 
 /* slurm_get_peer_addr
@@ -901,7 +911,7 @@ inline void slurm_get_ip_str(slurm_addr_t * slurm_address, uint16_t * port,
  * IN fd		- an open connection
  * OUT slurm_address	- place to park the peer's slurm_addr
  */
-inline int slurm_get_peer_addr(slurm_fd_t fd, slurm_addr_t * slurm_address);
+extern int slurm_get_peer_addr(slurm_fd_t fd, slurm_addr_t * slurm_address);
 
 /* slurm_print_slurm_addr
  * prints a slurm_addr_t into a buf
@@ -909,21 +919,19 @@ inline int slurm_get_peer_addr(slurm_fd_t fd, slurm_addr_t * slurm_address);
  * IN buf		- space for string representation of slurm_addr
  * IN n			- max number of bytes to write (including NUL)
  */
-inline void slurm_print_slurm_addr(slurm_addr_t * address,
+extern void slurm_print_slurm_addr(slurm_addr_t * address,
 				   char *buf, size_t n);
 
 /**********************************************************************\
  * slurm_addr_t pack routines
 \**********************************************************************/
 
-Buf slurm_pack_msg_no_header(slurm_msg_t * msg);
-
 /* slurm_pack_slurm_addr
  * packs a slurm_addr_t into a buffer to serialization transport
  * IN slurm_address	- slurm_addr_t to pack
  * IN/OUT buffer	- buffer to pack the slurm_addr_t into
  */
-inline void slurm_pack_slurm_addr(slurm_addr_t * slurm_address, Buf buffer);
+extern void slurm_pack_slurm_addr(slurm_addr_t * slurm_address, Buf buffer);
 
 /* slurm_pack_slurm_addr
  * unpacks a buffer into a slurm_addr_t after serialization transport
@@ -931,7 +939,7 @@ inline void slurm_pack_slurm_addr(slurm_addr_t * slurm_address, Buf buffer);
  * IN/OUT buffer	- buffer to upack the slurm_addr_t from
  * returns 		- SLURM error code
  */
-inline int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t * slurm_address,
+extern int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t * slurm_address,
 					    Buf buffer);
 
 /* slurm_pack_slurm_addr_array
@@ -941,7 +949,7 @@ inline int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t * slurm_address,
  * IN/OUT buffer	- buffer to pack the slurm_addr_t from
  * returns		- SLURM error code
  */
-inline void slurm_pack_slurm_addr_array(slurm_addr_t * slurm_address,
+extern void slurm_pack_slurm_addr_array(slurm_addr_t * slurm_address,
 					uint32_t size_val, Buf buffer);
 /* slurm_unpack_slurm_addr_array
  * unpacks an array of slurm_addrs from a buffer
@@ -950,7 +958,7 @@ inline void slurm_pack_slurm_addr_array(slurm_addr_t * slurm_address,
  * IN/OUT buffer	- buffer to upack the slurm_addr_t from
  * returns		- SLURM error code
  */
-inline int slurm_unpack_slurm_addr_array(slurm_addr_t ** slurm_address,
+extern int slurm_unpack_slurm_addr_array(slurm_addr_t ** slurm_address,
 					 uint32_t * size_val, Buf buffer);
 
 /**********************************************************************\
@@ -995,7 +1003,7 @@ int slurm_send_recv_node_msg(slurm_msg_t * request_msg,
  * IN nodelist	    - list of nodes to send to.
  * IN msg           - a slurm_msg struct to be sent by the function
  * IN timeout	    - how long to wait in milliseconds
- * IN quiet       - if set, reduce logging details
+ * IN quiet         - if set, reduce logging details
  * RET List	    - List containing the responses of the childern
  *                    (if any) we forwarded the message to. List
  *                    containing type (ret_types_t).
diff --git a/src/common/slurm_protocol_common.h b/src/common/slurm_protocol_common.h
index 6fe15a415..06d80fd93 100644
--- a/src/common/slurm_protocol_common.h
+++ b/src/common/slurm_protocol_common.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,7 +47,7 @@
 #include <sys/time.h>
 #include <time.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 /* for sendto and recvfrom commands */
 #define SLURM_PROTOCOL_NO_SEND_RECV_FLAGS 0
@@ -70,7 +70,8 @@
  * In slurm_protocol_util.c check_header_version(), and init_header()
  * need to be updated also when changes are added */
 #define SLURM_PROTOCOL_VERSION ((SLURM_API_MAJOR << 8) | SLURM_API_AGE)
-#define SLURM_2_2_PROTOCOL_VERSION SLURM_PROTOCOL_VERSION
+#define SLURM_2_3_PROTOCOL_VERSION SLURM_PROTOCOL_VERSION
+#define SLURM_2_2_PROTOCOL_VERSION ((22 << 8) | 0)
 #define SLURM_2_1_PROTOCOL_VERSION ((21 << 8) | 0)
 #define SLURM_2_0_PROTOCOL_VERSION ((20 << 8) | 0)
 #define SLURM_1_3_PROTOCOL_VERSION ((13 << 8) | 0)
@@ -80,9 +81,9 @@
 #define SLURM_GLOBAL_AUTH_KEY   0x0001
 
 #if MONGO_IMPLEMENTATION
-#  include <src/common/slurm_protocol_mongo_common.h>
+#  include "src/common/slurm_protocol_mongo_common.h"
 #else
-#  include <src/common/slurm_protocol_socket_common.h>
+#  include "src/common/slurm_protocol_socket_common.h"
 #endif
 
 #endif
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index cae951cb3..428e38ac7 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,7 +60,7 @@
 #include "src/common/job_options.h"
 #include "src/common/forward.h"
 #include "src/common/slurm_jobacct_gather.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 /*
 ** Define slurm-specific aliases for use by plugins, see slurm_xlator.h
@@ -77,11 +77,14 @@ strong_alias(node_state_string_compact, slurm_node_state_string_compact);
 strong_alias(private_data_string, slurm_private_data_string);
 strong_alias(accounting_enforce_string, slurm_accounting_enforce_string);
 strong_alias(conn_type_string,	slurm_conn_type_string);
+strong_alias(conn_type_string_full, slurm_conn_type_string_full);
 strong_alias(node_use_string, slurm_node_use_string);
 strong_alias(bg_block_state_string, slurm_bg_block_state_string);
 strong_alias(reservation_flags_string, slurm_reservation_flags_string);
 
 
+static void _free_all_front_end_info(front_end_info_msg_t *msg);
+
 static void _free_all_job_info (job_info_msg_t *msg);
 
 static void _free_all_node_info (node_info_msg_t *msg);
@@ -273,80 +276,83 @@ extern int slurm_sort_char_list_desc(char *name_a, char *name_b)
 	return 0;
 }
 
-void slurm_free_last_update_msg(last_update_msg_t * msg)
+extern void slurm_free_last_update_msg(last_update_msg_t * msg)
+{
+	xfree(msg);
+}
+
+extern void slurm_free_shutdown_msg(shutdown_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_shutdown_msg(shutdown_msg_t * msg)
+extern void slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg)
+extern void slurm_free_return_code_msg(return_code_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_return_code_msg(return_code_msg_t * msg)
+extern void slurm_free_job_id_msg(job_id_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_id_msg(job_id_msg_t * msg)
+extern void slurm_free_job_step_id_msg(job_step_id_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_step_id_msg(job_step_id_msg_t * msg)
+extern void slurm_free_job_id_request_msg(job_id_request_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_id_request_msg(job_id_request_msg_t * msg)
+extern void slurm_free_update_step_msg(step_update_request_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_update_step_msg(step_update_request_msg_t * msg)
+extern void slurm_free_job_id_response_msg(job_id_response_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_id_response_msg(job_id_response_msg_t * msg)
+extern void slurm_free_job_step_kill_msg(job_step_kill_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_step_kill_msg(job_step_kill_msg_t * msg)
+extern void slurm_free_job_info_request_msg(job_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_info_request_msg(job_info_request_msg_t *msg)
+extern void slurm_free_job_step_info_request_msg(job_step_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_step_info_request_msg(
-	job_step_info_request_msg_t *msg)
+extern void slurm_free_front_end_info_request_msg
+		(front_end_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurm_free_node_info_request_msg(
-	node_info_request_msg_t *msg)
+extern void slurm_free_node_info_request_msg(node_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurm_free_part_info_request_msg(
-	part_info_request_msg_t *msg)
+extern void slurm_free_part_info_request_msg(part_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_desc_msg(job_desc_msg_t * msg)
+extern void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 {
 	int i;
 
@@ -395,7 +401,7 @@ void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 	}
 }
 
-void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
+extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 {
 	int i;
 
@@ -434,7 +440,7 @@ void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 	}
 }
 
-void slurm_free_job_info(job_info_t * job)
+extern void slurm_free_job_info(job_info_t * job)
 {
 	if (job) {
 		slurm_free_job_info_members(job);
@@ -442,11 +448,13 @@ void slurm_free_job_info(job_info_t * job)
 	}
 }
 
-void slurm_free_job_info_members(job_info_t * job)
+extern void slurm_free_job_info_members(job_info_t * job)
 {
 	if (job) {
 		xfree(job->account);
 		xfree(job->alloc_node);
+		xfree(job->batch_host);
+		xfree(job->batch_script);
 		xfree(job->command);
 		xfree(job->comment);
 		xfree(job->dependency);
@@ -473,7 +481,7 @@ void slurm_free_job_info_members(job_info_t * job)
 	}
 }
 
-void slurm_free_node_registration_status_msg(
+extern void slurm_free_node_registration_status_msg(
 	slurm_node_registration_status_msg_t * msg)
 {
 	if (msg) {
@@ -490,8 +498,16 @@ void slurm_free_node_registration_status_msg(
 	}
 }
 
+extern void slurm_free_update_front_end_msg(update_front_end_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->name);
+		xfree(msg->reason);
+		xfree(msg);
+	}
+}
 
-void slurm_free_update_node_msg(update_node_msg_t * msg)
+extern void slurm_free_update_node_msg(update_node_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->features);
@@ -502,7 +518,7 @@ void slurm_free_update_node_msg(update_node_msg_t * msg)
 	}
 }
 
-void slurm_free_update_part_msg(update_part_msg_t * msg)
+extern void slurm_free_update_part_msg(update_part_msg_t * msg)
 {
 	if (msg) {
 		slurm_free_partition_info_members((partition_info_t *)msg);
@@ -510,7 +526,7 @@ void slurm_free_update_part_msg(update_part_msg_t * msg)
 	}
 }
 
-void slurm_free_delete_part_msg(delete_part_msg_t * msg)
+extern void slurm_free_delete_part_msg(delete_part_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->name);
@@ -518,7 +534,7 @@ void slurm_free_delete_part_msg(delete_part_msg_t * msg)
 	}
 }
 
-void slurm_free_resv_desc_msg(resv_desc_msg_t * msg)
+extern void slurm_free_resv_desc_msg(resv_desc_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->accounts);
@@ -532,7 +548,7 @@ void slurm_free_resv_desc_msg(resv_desc_msg_t * msg)
 	}
 }
 
-void slurm_free_resv_name_msg(reservation_name_msg_t * msg)
+extern void slurm_free_resv_name_msg(reservation_name_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->name);
@@ -540,13 +556,13 @@ void slurm_free_resv_name_msg(reservation_name_msg_t * msg)
 	}
 }
 
-void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg)
+extern void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t *
-					    msg)
+extern void slurm_free_job_step_create_request_msg(
+		job_step_create_request_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->features);
@@ -560,13 +576,14 @@ void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t *
 	}
 }
 
-void slurm_free_complete_job_allocation_msg(
+extern void slurm_free_complete_job_allocation_msg(
 	complete_job_allocation_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_complete_batch_script_msg(complete_batch_script_msg_t * msg)
+extern void slurm_free_complete_batch_script_msg(
+		complete_batch_script_msg_t * msg)
 {
 	if (msg) {
 		jobacct_gather_g_destroy(msg->jobacct);
@@ -576,7 +593,8 @@ void slurm_free_complete_batch_script_msg(complete_batch_script_msg_t * msg)
 }
 
 
-void slurm_free_launch_tasks_response_msg(launch_tasks_response_msg_t *msg)
+extern void slurm_free_launch_tasks_response_msg(
+		launch_tasks_response_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->node_name);
@@ -586,7 +604,7 @@ void slurm_free_launch_tasks_response_msg(launch_tasks_response_msg_t *msg)
 	}
 }
 
-void slurm_free_kill_job_msg(kill_job_msg_t * msg)
+extern void slurm_free_kill_job_msg(kill_job_msg_t * msg)
 {
 	if (msg) {
 		int i;
@@ -601,17 +619,17 @@ void slurm_free_kill_job_msg(kill_job_msg_t * msg)
 	}
 }
 
-void slurm_free_signal_job_msg(signal_job_msg_t * msg)
+extern void slurm_free_signal_job_msg(signal_job_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_update_job_time_msg(job_time_msg_t * msg)
+extern void slurm_free_update_job_time_msg(job_time_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_task_exit_msg(task_exit_msg_t * msg)
+extern void slurm_free_task_exit_msg(task_exit_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->task_id_list);
@@ -619,7 +637,7 @@ void slurm_free_task_exit_msg(task_exit_msg_t * msg)
 	}
 }
 
-void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg)
+extern void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg)
 {
 	int i;
 
@@ -673,15 +691,20 @@ void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg)
 	if (msg->options)
 		job_options_destroy(msg->options);
 
+	if (msg->select_jobinfo)
+		select_g_select_jobinfo_free(msg->select_jobinfo);
+
 	xfree(msg);
 }
 
-void slurm_free_task_user_managed_io_stream_msg(task_user_managed_io_msg_t *msg)
+extern void slurm_free_task_user_managed_io_stream_msg(
+		task_user_managed_io_msg_t *msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_reattach_tasks_request_msg(reattach_tasks_request_msg_t *msg)
+extern void slurm_free_reattach_tasks_request_msg(
+		reattach_tasks_request_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->resp_port);
@@ -691,7 +714,8 @@ void slurm_free_reattach_tasks_request_msg(reattach_tasks_request_msg_t *msg)
 	}
 }
 
-void slurm_free_reattach_tasks_response_msg(reattach_tasks_response_msg_t *msg)
+extern void slurm_free_reattach_tasks_response_msg(
+		reattach_tasks_response_msg_t *msg)
 {
 	int i;
 
@@ -707,12 +731,12 @@ void slurm_free_reattach_tasks_response_msg(reattach_tasks_response_msg_t *msg)
 	}
 }
 
-void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg)
+extern void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg)
 {
 	xfree(msg);
 }
 
-void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg)
+extern void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->image_dir);
@@ -720,7 +744,7 @@ void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg)
 	}
 }
 
-void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg)
+extern void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->node_name);
@@ -729,12 +753,13 @@ void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg)
 	}
 }
 
-inline void slurm_free_srun_job_complete_msg(srun_job_complete_msg_t * msg)
+extern void slurm_free_srun_job_complete_msg(
+		srun_job_complete_msg_t * msg)
 {
 	xfree(msg);
 }
 
-inline void slurm_free_srun_exec_msg(srun_exec_msg_t *msg)
+extern void slurm_free_srun_exec_msg(srun_exec_msg_t *msg)
 {
 	int i;
 
@@ -746,12 +771,12 @@ inline void slurm_free_srun_exec_msg(srun_exec_msg_t *msg)
 	}
 }
 
-inline void slurm_free_srun_ping_msg(srun_ping_msg_t * msg)
+extern void slurm_free_srun_ping_msg(srun_ping_msg_t * msg)
 {
 	xfree(msg);
 }
 
-inline void slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg)
+extern void slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->nodelist);
@@ -759,7 +784,7 @@ inline void slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg)
 	}
 }
 
-inline void slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg)
+extern void slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->nodelist);
@@ -767,12 +792,12 @@ inline void slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg)
 	}
 }
 
-inline void slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg)
+extern void slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg)
 {
 	xfree(msg);
 }
 
-inline void slurm_free_srun_user_msg(srun_user_msg_t * user_msg)
+extern void slurm_free_srun_user_msg(srun_user_msg_t * user_msg)
 {
 	if (user_msg) {
 		xfree(user_msg->msg);
@@ -780,7 +805,7 @@ inline void slurm_free_srun_user_msg(srun_user_msg_t * user_msg)
 	}
 }
 
-inline void slurm_free_checkpoint_msg(checkpoint_msg_t *msg)
+extern void slurm_free_checkpoint_msg(checkpoint_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->image_dir);
@@ -788,7 +813,7 @@ inline void slurm_free_checkpoint_msg(checkpoint_msg_t *msg)
 	}
 }
 
-inline void slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg)
+extern void slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->error_msg);
@@ -796,7 +821,7 @@ inline void slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg)
 	}
 }
 
-inline void slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg)
+extern void slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->error_msg);
@@ -804,18 +829,33 @@ inline void slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg)
 	}
 }
 
-inline void slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg)
+extern void slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->error_msg);
 		xfree(msg);
 	}
 }
-inline void slurm_free_suspend_msg(suspend_msg_t *msg)
+extern void slurm_free_suspend_msg(suspend_msg_t *msg)
+{
+	xfree(msg);
+}
+
+extern void slurm_free_spank_env_request_msg(spank_env_request_msg_t *msg)
 {
 	xfree(msg);
 }
 
+extern void slurm_free_spank_env_responce_msg(spank_env_responce_msg_t *msg)
+{
+	uint32_t i;
+
+	for (i = 0; i < msg->spank_job_env_size; i++)
+		xfree(msg->spank_job_env[i]);
+	xfree(msg->spank_job_env);
+	xfree(msg);
+}
+
 /* Given a job's reason for waiting, return a descriptive string */
 extern char *job_reason_string(enum job_state_reason inx)
 {
@@ -876,12 +916,18 @@ extern char *job_reason_string(enum job_state_reason inx)
 		return "InvalidQOS";
 	case WAIT_QOS_THRES:
 		return "QOSUsageThreshold";
+	case WAIT_QOS_JOB_LIMIT:
+		return "QOSJobLimit";
+	case WAIT_QOS_RESOURCE_LIMIT:
+		return "QOSResourceLimit";
+	case WAIT_QOS_TIME_LIMIT:
+		return "QOSTimeLimit";
 	default:
 		return "?";
 	}
 }
 
-inline void slurm_free_get_kvs_msg(kvs_get_msg_t *msg)
+extern void slurm_free_get_kvs_msg(kvs_get_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->hostname);
@@ -889,7 +935,7 @@ inline void slurm_free_get_kvs_msg(kvs_get_msg_t *msg)
 	}
 }
 
-inline void slurm_free_will_run_response_msg(will_run_response_msg_t *msg)
+extern void slurm_free_will_run_response_msg(will_run_response_msg_t *msg)
 {
         if (msg) {
                 xfree(msg->node_list);
@@ -975,7 +1021,49 @@ extern uint16_t preempt_mode_num(const char *preempt_mode)
 	return mode_num;
 }
 
-char *job_state_string(uint16_t inx)
+/* Convert SelectTypeParameter to equivalent string
+ * NOTE: Not reentrant */
+extern char *sched_param_type_string(uint16_t select_type_param)
+{
+	static char select_str[64];
+
+	select_str[0] = '\0';
+	if ((select_type_param & CR_CPU) &&
+	    (select_type_param & CR_MEMORY))
+		strcat(select_str, "CR_CPU_MEMORY");
+	else if ((select_type_param & CR_CORE) &&
+		 (select_type_param & CR_MEMORY))
+		strcat(select_str, "CR_CORE_MEMORY");
+	else if ((select_type_param & CR_SOCKET) &&
+		 (select_type_param & CR_MEMORY))
+		strcat(select_str, "CR_SOCKET_MEMORY");
+	else if (select_type_param & CR_CPU)
+		strcat(select_str, "CR_CPU");
+	else if (select_type_param & CR_CORE)
+		strcat(select_str, "CR_CORE");
+	else if (select_type_param & CR_SOCKET)
+		strcat(select_str, "CR_SOCKET");
+	else if (select_type_param & CR_MEMORY)
+		strcat(select_str, "CR_MEMORY");
+
+	if (select_type_param & CR_ONE_TASK_PER_CORE) {
+		if (select_str[0])
+			strcat(select_str, ",");
+		strcat(select_str, "CR_ONE_TASK_PER_CORE");
+	}
+	if (select_type_param & CR_CORE_DEFAULT_DIST_BLOCK) {
+		if (select_str[0])
+			strcat(select_str, ",");
+		strcat(select_str, "CR_CORE_DEFAULT_DIST_BLOCK");
+	}
+
+	if (select_str[0] == '\0')
+		strcat(select_str, "NONE");
+
+	return select_str;
+}
+
+extern char *job_state_string(uint16_t inx)
 {
 	/* Process JOB_STATE_FLAGS */
 	if (inx & JOB_COMPLETING)
@@ -1003,12 +1091,14 @@ char *job_state_string(uint16_t inx)
 		return "TIMEOUT";
 	case JOB_NODE_FAIL:
 		return "NODE_FAIL";
+	case JOB_PREEMPTED:
+		return "PREEMPTED";
 	default:
 		return "?";
 	}
 }
 
-char *job_state_string_compact(uint16_t inx)
+extern char *job_state_string_compact(uint16_t inx)
 {
 	/* Process JOB_STATE_FLAGS */
 	if (inx & JOB_COMPLETING)
@@ -1036,12 +1126,14 @@ char *job_state_string_compact(uint16_t inx)
 		return "TO";
 	case JOB_NODE_FAIL:
 		return "NF";
+	case JOB_PREEMPTED:
+		return "PR";
 	default:
 		return "?";
 	}
 }
 
-inline static bool _job_name_test(int state_num, const char *state_name)
+static bool _job_name_test(int state_num, const char *state_name)
 {
 	if (!strcasecmp(state_name, job_state_string(state_num)) ||
 	    !strcasecmp(state_name, job_state_string_compact(state_num))) {
@@ -1050,7 +1142,7 @@ inline static bool _job_name_test(int state_num, const char *state_name)
 	return false;
 }
 
-int job_state_num(const char *state_name)
+extern int job_state_num(const char *state_name)
 {
 	int i;
 
@@ -1061,15 +1153,15 @@ int job_state_num(const char *state_name)
 
 	if (_job_name_test(JOB_COMPLETING, state_name))
 		return JOB_COMPLETING;
-	if (_job_name_test(JOB_COMPLETING, state_name))
-		return JOB_COMPLETING;
+	if (_job_name_test(JOB_CONFIGURING, state_name))
+		return JOB_CONFIGURING;
 	if (_job_name_test(JOB_RESIZING, state_name))
 		return JOB_RESIZING;
 
 	return -1;
 }
 
-char *trigger_res_type(uint16_t res_type)
+extern char *trigger_res_type(uint16_t res_type)
 {
 	if      (res_type == TRIGGER_RES_TYPE_JOB)
 		return "job";
@@ -1081,11 +1173,13 @@ char *trigger_res_type(uint16_t res_type)
 		return "slurmdbd";
 	else if (res_type == TRIGGER_RES_TYPE_DATABASE)
 		return "database";
+	else if (res_type == TRIGGER_RES_TYPE_FRONT_END)
+		return "front_end";
 	else
 		return "unknown";
 }
 
-char *trigger_type(uint32_t trig_type)
+extern char *trigger_type(uint32_t trig_type)
 {
 	if      (trig_type == TRIGGER_TYPE_UP)
 		return "up";
@@ -1177,16 +1271,27 @@ extern char *reservation_flags_string(uint16_t flags)
 			xstrcat(flag_str, ",");
 		xstrcat(flag_str, "SPEC_NODES");
 	}
+	if (flags & RESERVE_FLAG_LIC_ONLY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "LICENSE_ONLY");
+	}
+	if (flags & RESERVE_FLAG_NO_LIC_ONLY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "NO_LICENSE_ONLY");
+	}
 	return flag_str;
 }
 
-char *node_state_string(uint16_t inx)
+extern char *node_state_string(uint16_t inx)
 {
 	int  base            = (inx & NODE_STATE_BASE);
 	bool comp_flag       = (inx & NODE_STATE_COMPLETING);
 	bool drain_flag      = (inx & NODE_STATE_DRAIN);
 	bool fail_flag       = (inx & NODE_STATE_FAIL);
 	bool maint_flag      = (inx & NODE_STATE_MAINT);
+	bool resume_flag     = (inx & NODE_RESUME);
 	bool no_resp_flag    = (inx & NODE_STATE_NO_RESPOND);
 	bool power_down_flag = (inx & NODE_STATE_POWER_SAVE);
 	bool power_up_flag   = (inx & NODE_STATE_POWER_UP);
@@ -1285,6 +1390,8 @@ char *node_state_string(uint16_t inx)
 			return "FUTURE*";
 		return "FUTURE";
 	}
+	if (resume_flag)
+		return "RESUME";
 	if (base == NODE_STATE_UNKNOWN) {
 		if (no_resp_flag)
 			return "UNKNOWN*";
@@ -1293,12 +1400,13 @@ char *node_state_string(uint16_t inx)
 	return "?";
 }
 
-char *node_state_string_compact(uint16_t inx)
+extern char *node_state_string_compact(uint16_t inx)
 {
 	bool comp_flag       = (inx & NODE_STATE_COMPLETING);
 	bool drain_flag      = (inx & NODE_STATE_DRAIN);
 	bool fail_flag       = (inx & NODE_STATE_FAIL);
 	bool maint_flag      = (inx & NODE_STATE_MAINT);
+	bool resume_flag     = (inx & NODE_RESUME);
 	bool no_resp_flag    = (inx & NODE_STATE_NO_RESPOND);
 	bool power_down_flag = (inx & NODE_STATE_POWER_SAVE);
 	bool power_up_flag   = (inx & NODE_STATE_POWER_UP);
@@ -1399,6 +1507,8 @@ char *node_state_string_compact(uint16_t inx)
 			return "FUTR*";
 		return "FUTR";
 	}
+	if (resume_flag)
+		return "RESM";
 	if (inx == NODE_STATE_UNKNOWN) {
 		if (no_resp_flag)
 			return "UNK*";
@@ -1408,8 +1518,7 @@ char *node_state_string_compact(uint16_t inx)
 }
 
 
-extern void
-private_data_string(uint16_t private_data, char *str, int str_len)
+extern void private_data_string(uint16_t private_data, char *str, int str_len)
 {
 	if (str_len > 0)
 		str[0] = '\0';
@@ -1451,8 +1560,7 @@ private_data_string(uint16_t private_data, char *str, int str_len)
 		strcat(str, "none");
 }
 
-extern void
-accounting_enforce_string(uint16_t enforce, char *str, int str_len)
+extern void accounting_enforce_string(uint16_t enforce, char *str, int str_len)
 {
 	if (str_len > 0)
 		str[0] = '\0';
@@ -1509,6 +1617,29 @@ extern char *conn_type_string(enum connection_type conn_type)
 	return "n/a";
 }
 
+/* caller must xfree after call */
+extern char *conn_type_string_full(uint16_t *conn_type)
+{
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+
+	if ((cluster_flags & CLUSTER_FLAG_BGQ)
+	    && (conn_type[0] < SELECT_SMALL)) {
+		int dim, pos = 0;
+		uint16_t cluster_dims = slurmdb_setup_cluster_dims();
+		char conn_type_part[cluster_dims*2], *tmp_char;
+
+		for (dim = 0; dim < cluster_dims; dim++) {
+			if (pos)
+				conn_type_part[pos++] = ',';
+			tmp_char = conn_type_string(conn_type[dim]);
+			conn_type_part[pos++] = tmp_char[0];
+		}
+		conn_type_part[pos] = '\0';
+		return xstrdup(conn_type_part);
+	} else
+		return xstrdup(conn_type_string(conn_type[0]));
+}
+
 extern char* node_use_string(enum node_use_type node_use)
 {
 	switch (node_use) {
@@ -1524,50 +1655,53 @@ extern char* node_use_string(enum node_use_type node_use)
 
 extern char *bg_block_state_string(uint16_t state)
 {
-	static char tmp[16];
-	/* This is needs to happen cross cluster.  Since the enums
-	 * changed.  We don't handle BUSY or REBOOTING though, these
-	 * states are extremely rare so it isn't that big of a deal.
-	 */
-#ifdef HAVE_BGL
-	if(working_cluster_rec) {
-		if(!(working_cluster_rec->flags & CLUSTER_FLAG_BGL)) {
-			if(state == RM_PARTITION_BUSY)
-				state = RM_PARTITION_READY;
+	static char tmp[25];
+	char *state_str = NULL;
+	char *err_str = NULL;
+	if (state & BG_BLOCK_ERROR_FLAG) {
+		err_str = "Error";
+		state &= (~BG_BLOCK_ERROR_FLAG);
+	}
+
+	switch (state) {
+	case BG_BLOCK_NAV:
+		if (!err_str)
+			state_str = "NAV";
+		else {
+			err_str = NULL;
+			state_str = "Error";
 		}
-	}
-#else
-	if(working_cluster_rec) {
-		if(working_cluster_rec->flags & CLUSTER_FLAG_BGL) {
-			if(state == RM_PARTITION_REBOOTING)
-				state = RM_PARTITION_READY;
-		}
-	}
-#endif
-
-	switch ((rm_partition_state_t)state) {
-#ifdef HAVE_BGL
-	case RM_PARTITION_BUSY:
-		return "BUSY";
-#else
-	case RM_PARTITION_REBOOTING:
-		return "REBOOTING";
-#endif
-	case RM_PARTITION_CONFIGURING:
-		return "CONFIG";
-	case RM_PARTITION_DEALLOCATING:
-		return "DEALLOC";
-	case RM_PARTITION_ERROR:
-		return "ERROR";
-	case RM_PARTITION_FREE:
-		return "FREE";
-	case RM_PARTITION_NAV:
-		return "NAV";
-	case RM_PARTITION_READY:
-		return "READY";
+		break;
+	case BG_BLOCK_FREE:
+		state_str = "Free";
+		break;
+	case BG_BLOCK_BUSY:
+		state_str = "Busy";
+		break;
+	case BG_BLOCK_BOOTING:
+		state_str = "Boot";
+		break;
+	case BG_BLOCK_REBOOTING:
+		state_str = "Reboot";
+		break;
+	case BG_BLOCK_INITED:
+		state_str = "Ready";
+		break;
+	case BG_BLOCK_ALLOCATED:
+		state_str = "Alloc";
+		break;
+	case BG_BLOCK_TERM:
+		state_str = "Term";
+		break;
+	default:
+		state_str = "Unknown";
+		break;
 	}
 
-	snprintf(tmp, sizeof(tmp), "%d", state);
+	if (err_str)
+		snprintf(tmp, sizeof(tmp), "%s(%s)", err_str, state_str);
+	else
+		return state_str;
 	return tmp;
 }
 
@@ -1577,7 +1711,7 @@ extern char *bg_block_state_string(uint16_t state)
  * IN msg - pointer to allocation response message
  * NOTE: buffer is loaded by slurm_allocate_resources
  */
-void slurm_free_resource_allocation_response_msg (
+extern void slurm_free_resource_allocation_response_msg (
 	resource_allocation_response_msg_t * msg)
 {
 	if (msg) {
@@ -1596,7 +1730,7 @@ void slurm_free_resource_allocation_response_msg (
  * IN msg - pointer to response message from slurm_sbcast_lookup()
  * NOTE: buffer is loaded by slurm_allocate_resources
  */
-void slurm_free_sbcast_cred_msg(job_sbcast_cred_msg_t * msg)
+extern void slurm_free_sbcast_cred_msg(job_sbcast_cred_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->node_addr);
@@ -1612,11 +1746,12 @@ void slurm_free_sbcast_cred_msg(job_sbcast_cred_msg_t * msg)
  * IN msg - pointer to job allocation info response message
  * NOTE: buffer is loaded by slurm_allocate_resources
  */
-void slurm_free_job_alloc_info_response_msg(job_alloc_info_response_msg_t *msg)
+extern void slurm_free_job_alloc_info_response_msg(
+		job_alloc_info_response_msg_t *msg)
 {
 	if (msg) {
-		select_g_select_jobinfo_free(msg->select_jobinfo);
-		msg->select_jobinfo = NULL;
+		if (msg->select_jobinfo)
+			select_g_select_jobinfo_free(msg->select_jobinfo);
 		xfree(msg->node_list);
 		xfree(msg->cpus_per_node);
 		xfree(msg->cpu_count_reps);
@@ -1632,13 +1767,15 @@ void slurm_free_job_alloc_info_response_msg(job_alloc_info_response_msg_t *msg)
  * IN msg - pointer to job step create response message
  * NOTE: buffer is loaded by slurm_job_step_create
  */
-void slurm_free_job_step_create_response_msg(
+extern void slurm_free_job_step_create_response_msg(
 	job_step_create_response_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->resv_ports);
 		slurm_step_layout_destroy(msg->step_layout);
 		slurm_cred_destroy(msg->cred);
+		if (msg->select_jobinfo)
+			select_g_select_jobinfo_free(msg->select_jobinfo);
 		if (msg->switch_job)
 			switch_free_jobinfo(msg->switch_job);
 
@@ -1654,10 +1791,9 @@ void slurm_free_job_step_create_response_msg(
  * IN msg - pointer to job submit response message
  * NOTE: buffer is loaded by slurm_submit_batch_job
  */
-void slurm_free_submit_response_response_msg(submit_response_msg_t * msg)
+extern void slurm_free_submit_response_response_msg(submit_response_msg_t * msg)
 {
-	if (msg)
-		xfree(msg);
+	xfree(msg);
 }
 
 
@@ -1666,7 +1802,7 @@ void slurm_free_submit_response_response_msg(submit_response_msg_t * msg)
  * IN msg - pointer to slurm control information response message
  * NOTE: buffer is loaded by slurm_load_jobs
  */
-void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
+extern void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 {
 	if (config_ptr) {
 		free_slurm_conf(config_ptr, 0);
@@ -1695,7 +1831,7 @@ extern void slurm_free_slurmd_status(slurmd_status_t* slurmd_status_ptr)
  * IN msg - pointer to job information response message
  * NOTE: buffer is loaded by slurm_load_job.
  */
-void slurm_free_job_info_msg(job_info_msg_t * job_buffer_ptr)
+extern void slurm_free_job_info_msg(job_info_msg_t * job_buffer_ptr)
 {
 	if (job_buffer_ptr) {
 		if (job_buffer_ptr->job_array) {
@@ -1724,7 +1860,7 @@ static void _free_all_job_info(job_info_msg_t *msg)
  * IN msg - pointer to job step information response message
  * NOTE: buffer is loaded by slurm_get_job_steps.
  */
-void slurm_free_job_step_info_response_msg(job_step_info_response_msg_t *
+extern void slurm_free_job_step_info_response_msg(job_step_info_response_msg_t *
 					   msg)
 {
 	if (msg != NULL) {
@@ -1748,26 +1884,62 @@ static void _free_all_step_info (job_step_info_response_msg_t *msg)
 		slurm_free_job_step_info_members (&msg->job_steps[i]);
 }
 
-void slurm_free_job_step_info_members (job_step_info_t * msg)
+extern void slurm_free_job_step_info_members (job_step_info_t * msg)
 {
 	if (msg != NULL) {
-		xfree(msg->partition);
-		xfree(msg->resv_ports);
-		xfree(msg->nodes);
+		xfree(msg->ckpt_dir);
 		xfree(msg->name);
 		xfree(msg->network);
+		xfree(msg->nodes);
 		xfree(msg->node_inx);
-		xfree(msg->ckpt_dir);
+		xfree(msg->partition);
+		xfree(msg->resv_ports);
+		select_g_select_jobinfo_free(msg->select_jobinfo);
+		msg->select_jobinfo = NULL;
 	}
 }
 
+/*
+ * slurm_free_front_end_info - free the front_end information response message
+ * IN msg - pointer to front_end information response message
+ * NOTE: buffer is loaded by slurm_load_front_end.
+ */
+extern void slurm_free_front_end_info_msg(front_end_info_msg_t * msg)
+{
+	if (msg) {
+		if (msg->front_end_array) {
+			_free_all_front_end_info(msg);
+			xfree(msg->front_end_array);
+		}
+		xfree(msg);
+	}
+}
+
+static void _free_all_front_end_info(front_end_info_msg_t *msg)
+{
+	int i;
+
+	if ((msg == NULL) || (msg->front_end_array == NULL))
+		return;
+
+	for (i = 0; i < msg->record_count; i++)
+		slurm_free_front_end_info_members(&msg->front_end_array[i]);
+}
+
+extern void slurm_free_front_end_info_members(front_end_info_t * front_end)
+{
+	if (front_end) {
+		xfree(front_end->name);
+		xfree(front_end->reason);
+	}
+}
 
 /*
  * slurm_free_node_info - free the node information response message
  * IN msg - pointer to node information response message
  * NOTE: buffer is loaded by slurm_load_node.
  */
-void slurm_free_node_info_msg(node_info_msg_t * msg)
+extern void slurm_free_node_info_msg(node_info_msg_t * msg)
 {
 	if (msg) {
 		if (msg->node_array) {
@@ -1782,20 +1954,21 @@ static void _free_all_node_info(node_info_msg_t *msg)
 {
 	int i;
 
-	if ((msg == NULL) ||
-	    (msg->node_array == NULL))
+	if ((msg == NULL) || (msg->node_array == NULL))
 		return;
 
 	for (i = 0; i < msg->record_count; i++)
 		slurm_free_node_info_members(&msg->node_array[i]);
 }
 
-void slurm_free_node_info_members(node_info_t * node)
+extern void slurm_free_node_info_members(node_info_t * node)
 {
 	if (node) {
-		xfree(node->name);
 		xfree(node->arch);
 		xfree(node->features);
+		xfree(node->name);
+		xfree(node->node_hostname);
+		xfree(node->node_addr);
 		xfree(node->os);
 		xfree(node->reason);
 		select_g_select_nodeinfo_free(node->select_nodeinfo);
@@ -1810,7 +1983,7 @@ void slurm_free_node_info_members(node_info_t * node)
  * IN msg - pointer to partition information response message
  * NOTE: buffer is loaded by slurm_load_partitions
  */
-void slurm_free_partition_info_msg(partition_info_msg_t * msg)
+extern void slurm_free_partition_info_msg(partition_info_msg_t * msg)
 {
 	if (msg) {
 		if (msg->partition_array) {
@@ -1835,7 +2008,7 @@ static void  _free_all_partitions(partition_info_msg_t *msg)
 
 }
 
-void slurm_free_partition_info_members(partition_info_t * part)
+extern void slurm_free_partition_info_members(partition_info_t * part)
 {
 	if (part) {
 		xfree(part->allow_alloc_nodes);
@@ -1853,7 +2026,7 @@ void slurm_free_partition_info_members(partition_info_t * part)
  * IN msg - pointer to reservation information response message
  * NOTE: buffer is loaded by slurm_load_reservation
  */
-void slurm_free_reservation_info_msg(reserve_info_msg_t * msg)
+extern void slurm_free_reservation_info_msg(reserve_info_msg_t * msg)
 {
 	if (msg) {
 		if (msg->reservation_array) {
@@ -1878,7 +2051,7 @@ static void  _free_all_reservations(reserve_info_msg_t *msg)
 
 }
 
-void slurm_free_reserve_info_members(reserve_info_t * resv)
+extern void slurm_free_reserve_info_members(reserve_info_t * resv)
 {
 	if (resv) {
 		xfree(resv->accounts);
@@ -1908,6 +2081,7 @@ extern void slurm_free_topo_info_msg(topo_info_response_msg_t *msg)
 			xfree(msg->topo_array[i].nodes);
 			xfree(msg->topo_array[i].switches);
 		}
+		xfree(msg->topo_array);
 		xfree(msg);
 	}
 }
@@ -1952,17 +2126,30 @@ extern void slurm_free_job_step_pids(void *object)
 }
 
 
+extern void slurm_free_block_job_info(void *object)
+{
+	block_job_info_t *block_job_info = (block_job_info_t *)object;
+	if (block_job_info) {
+		xfree(block_job_info->cnodes);
+		xfree(block_job_info->cnode_inx);
+		xfree(block_job_info->user_name);
+		xfree(block_job_info);
+	}
+}
+
 extern void slurm_free_block_info_members(block_info_t *block_info)
 {
 	if(block_info) {
 		xfree(block_info->bg_block_id);
 		xfree(block_info->blrtsimage);
-		xfree(block_info->bp_inx);
-		xfree(block_info->ionodes);
 		xfree(block_info->ionode_inx);
+		xfree(block_info->ionode_str);
 		xfree(block_info->linuximage);
 		xfree(block_info->mloaderimage);
-		xfree(block_info->nodes);
+		xfree(block_info->mp_inx);
+		xfree(block_info->mp_str);
+		xfree(block_info->mp_used_inx);
+		xfree(block_info->mp_used_str);
 		xfree(block_info->owner_name);
 		xfree(block_info->ramdiskimage);
 		xfree(block_info->reason);
@@ -1991,12 +2178,12 @@ extern void slurm_free_block_info_msg(block_info_msg_t *block_info_msg)
 	}
 }
 
-inline void slurm_free_block_info_request_msg(
+extern void slurm_free_block_info_request_msg(
 	block_info_request_msg_t *msg)
 {
 	xfree(msg);
 }
-inline void slurm_free_trigger_msg(trigger_info_msg_t *msg)
+extern void slurm_free_trigger_msg(trigger_info_msg_t *msg)
 {
 	int i;
 
@@ -2008,7 +2195,12 @@ inline void slurm_free_trigger_msg(trigger_info_msg_t *msg)
 	xfree(msg);
 }
 
-void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg)
+extern void slurm_free_set_debug_flags_msg(set_debug_flags_msg_t *msg)
+{
+	xfree(msg);
+}
+
+extern void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg)
 {
 	xfree(msg);
 }
@@ -2026,7 +2218,7 @@ extern void slurm_destroy_association_shares_object(void *object)
 	}
 }
 
-inline void slurm_free_shares_request_msg(shares_request_msg_t *msg)
+extern void slurm_free_shares_request_msg(shares_request_msg_t *msg)
 {
 	if(msg) {
 		if(msg->acct_list)
@@ -2037,7 +2229,7 @@ inline void slurm_free_shares_request_msg(shares_request_msg_t *msg)
 	}
 }
 
-inline void slurm_free_shares_response_msg(shares_response_msg_t *msg)
+extern void slurm_free_shares_response_msg(shares_response_msg_t *msg)
 {
 	if(msg) {
 		if(msg->assoc_shares_list)
@@ -2053,7 +2245,7 @@ extern void slurm_destroy_priority_factors_object(void *object)
 	xfree(obj_ptr);
 }
 
-inline void slurm_free_priority_factors_request_msg(
+extern void slurm_free_priority_factors_request_msg(
 	priority_factors_request_msg_t *msg)
 {
 	if(msg) {
@@ -2065,7 +2257,7 @@ inline void slurm_free_priority_factors_request_msg(
 	}
 }
 
-inline void slurm_free_priority_factors_response_msg(
+extern void slurm_free_priority_factors_response_msg(
 	priority_factors_response_msg_t *msg)
 {
 	if(msg) {
@@ -2076,7 +2268,7 @@ inline void slurm_free_priority_factors_response_msg(
 }
 
 
-inline void slurm_free_accounting_update_msg(accounting_update_msg_t *msg)
+extern void slurm_free_accounting_update_msg(accounting_update_msg_t *msg)
 {
 	if(msg) {
 		if(msg->update_list)
@@ -2137,6 +2329,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_SHUTDOWN:
 		slurm_free_shutdown_msg(data);
 		break;
+	case REQUEST_UPDATE_FRONT_END:
+		slurm_free_update_front_end_msg(data);
+		break;
 	case REQUEST_UPDATE_NODE:
 		slurm_free_update_node_msg(data);
 		break;
@@ -2170,7 +2365,11 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_CHECKPOINT_TASK_COMP:
 		slurm_free_checkpoint_task_comp_msg(data);
 		break;
+	case REQUEST_FRONT_END_INFO:
+		slurm_free_front_end_info_request_msg(data);
+		break;
 	case REQUEST_SUSPEND:
+	case SRUN_REQUEST_SUSPEND:
 		slurm_free_suspend_msg(data);
 		break;
 	case REQUEST_JOB_READY:
@@ -2215,6 +2414,7 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_CHECKPOINT_TASKS:
 		slurm_free_checkpoint_tasks_msg(data);
 		break;
+	case REQUEST_KILL_PREEMPTED:
 	case REQUEST_KILL_TIMELIMIT:
 		slurm_free_timelimit_msg(data);
 		break;
@@ -2243,6 +2443,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case RESPONSE_SLURM_RC:
 		slurm_free_return_code_msg(data);
 		break;
+	case REQUEST_SET_DEBUG_FLAGS:
+		slurm_free_set_debug_flags_msg(data);
+		break;
 	case REQUEST_SET_DEBUG_LEVEL:
 	case REQUEST_SET_SCHEDLOG_LEVEL:
 		slurm_free_set_debug_level_msg(data);
@@ -2256,6 +2459,7 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
 		/* No body to free */
 		break;
@@ -2267,6 +2471,13 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 		break;
 	case REQUEST_UPDATE_JOB_STEP:
 		slurm_free_update_step_msg(data);
+		break;
+	case REQUEST_SPANK_ENVIRONMENT:
+		slurm_free_spank_env_request_msg(data);
+		break;
+	case RESPONCE_SPANK_ENVIRONMENT:
+		slurm_free_spank_env_responce_msg(data);
+		break;
 	default:
 		error("invalid type trying to be freed %u", type);
 		break;
@@ -2307,7 +2518,7 @@ extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data)
 	return rc;
 }
 
-inline void slurm_free_job_notify_msg(job_notify_msg_t * msg)
+extern void slurm_free_job_notify_msg(job_notify_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->message);
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index d6d2b2220..a2404347a 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,10 +54,10 @@
 #  include <inttypes.h>
 #endif				/*  HAVE_CONFIG_H */
 
-#include <slurm/slurm.h>
-#include <slurm/slurmdb.h>
 #include <sys/wait.h>
 
+#include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
 #include "src/common/bitstring.h"
 #include "src/common/job_options.h"
 #include "src/common/list.h"
@@ -153,11 +153,6 @@ enum task_flag_vals {
 	TASK_UNUSED2 = 0x4
 };
 
-enum suspend_opts {
-	SUSPEND_JOB,		/* Suspend a job now */
-	RESUME_JOB		/* Resume a job now */
-};
-
 /*
  * SLURM Message types
  *
@@ -179,6 +174,7 @@ typedef enum {
 	REQUEST_HEALTH_CHECK,
 	REQUEST_TAKEOVER,
 	REQUEST_SET_SCHEDLOG_LEVEL,
+	REQUEST_SET_DEBUG_FLAGS,
 
 	REQUEST_BUILD_INFO = 2001,
 	RESPONSE_BUILD_INFO,
@@ -210,6 +206,10 @@ typedef enum {
 	REQUEST_TOPO_INFO,
 	RESPONSE_TOPO_INFO,
 	REQUEST_TRIGGER_PULL,
+	REQUEST_FRONT_END_INFO,
+	RESPONSE_FRONT_END_INFO,
+	REQUEST_SPANK_ENVIRONMENT,
+	RESPONCE_SPANK_ENVIRONMENT,
 
 	REQUEST_UPDATE_JOB = 3001,
 	REQUEST_UPDATE_NODE,
@@ -221,6 +221,7 @@ typedef enum {
 	REQUEST_DELETE_RESERVATION,
 	REQUEST_UPDATE_RESERVATION,
 	REQUEST_UPDATE_BLOCK,
+	REQUEST_UPDATE_FRONT_END,
 
 	REQUEST_RESOURCE_ALLOCATION = 4001,
 	RESPONSE_RESOURCE_ALLOCATION,
@@ -292,6 +293,7 @@ typedef enum {
 				 * job/step/task complete responses */
 	REQUEST_FILE_BCAST,
 	TASK_USER_MANAGED_IO_STREAM,
+	REQUEST_KILL_PREEMPTED,
 
 	SRUN_PING = 7001,
 	SRUN_TIMEOUT,
@@ -300,6 +302,7 @@ typedef enum {
 	SRUN_USER_MSG,
 	SRUN_EXEC,
 	SRUN_STEP_MISSING,
+	SRUN_REQUEST_SUSPEND,
 
 	PMI_KVS_PUT_REQ = 7201,
 	PMI_KVS_PUT_RESP,
@@ -312,7 +315,7 @@ typedef enum {
 
 	ACCOUNTING_UPDATE_MSG = 10001,
 	ACCOUNTING_FIRST_REG,
-
+	ACCOUNTING_REGISTER_CTLD,
 } slurm_msg_type_t;
 
 typedef enum {
@@ -492,6 +495,10 @@ typedef struct node_info_request_msg {
 	uint16_t show_flags;
 } node_info_request_msg_t;
 
+typedef struct front_end_info_request_msg {
+	time_t last_update;
+} front_end_info_request_msg_t;
+
 typedef struct block_info_request_msg {
 	time_t last_update;
 	uint16_t show_flags;
@@ -556,6 +563,11 @@ typedef struct last_update_msg {
 	time_t last_update;
 } last_update_msg_t;
 
+typedef struct set_debug_flags_msg {
+	uint32_t debug_flags_minus;
+	uint32_t debug_flags_plus;
+} set_debug_flags_msg_t;
+
 typedef struct set_debug_level_msg {
 	uint32_t debug_level;
 } set_debug_level_msg_t;
@@ -600,6 +612,7 @@ typedef struct job_step_create_response_msg {
 	slurm_step_layout_t *step_layout; /* information about how the
                                            * step is laid out */
 	slurm_cred_t *cred;    	  /* slurm job credential */
+	dynamic_plugin_data_t *select_jobinfo;	/* select opaque data type */
 	switch_jobinfo_t *switch_job;	/* switch context, opaque
                                          * data structure */
 } job_step_create_response_msg_t;
@@ -667,6 +680,7 @@ typedef struct launch_tasks_request_msg {
 	char *restart_dir;	/* restart from checkpoint if set */
 	char **spank_job_env;
 	uint32_t spank_job_env_size;
+	dynamic_plugin_data_t *select_jobinfo; /* select context, opaque data */
 } launch_tasks_request_msg_t;
 
 typedef struct task_user_managed_io_msg {
@@ -687,6 +701,7 @@ typedef struct return_code_msg {
  * from getting the MPIRUN_PARTITION at that time. It is needed for
  * the job epilog. */
 
+#define SIG_PREEMPTED	994	/* Dummy signal value for job preemption */
 #define SIG_DEBUG_WAKE	995	/* Dummy signal value to wake procs stopped 
 				 * for debugger */
 #define SIG_TIME_LIMIT	996	/* Dummy signal value for time limit reached */
@@ -766,6 +781,9 @@ typedef struct batch_job_launch_msg {
 	slurm_cred_t *cred;
 	uint8_t open_mode;	/* stdout/err append or truncate */
 	uint8_t overcommit;	/* if resources being over subscribed */
+	uint32_t pn_min_memory;  /* minimum real memory per node OR
+				  * real memory per CPU | MEM_PER_CPU,
+				  * default=0 (no limit) */
 	uint16_t acctg_freq;	/* accounting polling interval	*/
 	uint32_t job_mem;	/* memory limit for job		*/
 	uint16_t restart_cnt;	/* batch job restart count	*/
@@ -821,14 +839,9 @@ typedef struct checkpoint_resp_msg {
 	char   * error_msg;	/* error message on failure */
 } checkpoint_resp_msg_t;
 
-typedef struct suspend_msg {
-	uint16_t op;            /* suspend operation, see enum suspend_opts */
-	uint32_t job_id;        /* slurm job_id */
-} suspend_msg_t;
-
 typedef struct kvs_get_msg {
-	uint16_t task_id;	/* job step's task id */
-	uint16_t size;		/* count of tasks in job */
+	uint32_t task_id;	/* job step's task id */
+	uint32_t size;		/* count of tasks in job */
 	uint16_t port;		/* port to be sent the kvs data */
 	char * hostname;	/* hostname to be sent the kvs data */
 } kvs_get_msg_t;
@@ -908,6 +921,15 @@ typedef struct {
 	uint16_t rpc_version;
 } accounting_update_msg_t;
 
+typedef struct {
+	uint32_t job_id;	/* ID of job of request */
+} spank_env_request_msg_t;
+
+typedef struct {
+	uint32_t spank_job_env_size;
+	char **spank_job_env;	/* spank environment */
+} spank_env_responce_msg_t;
+
 typedef struct slurm_ctl_conf slurm_ctl_conf_info_msg_t;
 /*****************************************************************************\
  *	SLURM MESSAGE INITIALIZATION
@@ -938,125 +960,135 @@ extern int slurm_sort_char_list_asc(char *name_a, char *name_b);
 extern int slurm_sort_char_list_desc(char *name_a, char *name_b);
 
 /* free message functions */
-inline void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg);
-inline void slurm_free_last_update_msg(last_update_msg_t * msg);
-inline void slurm_free_return_code_msg(return_code_msg_t * msg);
-inline void slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg);
-inline void slurm_free_job_info_request_msg(job_info_request_msg_t *msg);
-inline void slurm_free_job_step_info_request_msg(
+extern void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg);
+extern void slurm_free_last_update_msg(last_update_msg_t * msg);
+extern void slurm_free_return_code_msg(return_code_msg_t * msg);
+extern void slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg);
+extern void slurm_free_job_info_request_msg(job_info_request_msg_t *msg);
+extern void slurm_free_job_step_info_request_msg(
 		job_step_info_request_msg_t *msg);
-inline void slurm_free_node_info_request_msg(node_info_request_msg_t *msg);
-inline void slurm_free_part_info_request_msg(part_info_request_msg_t *msg);
-inline void slurm_free_resv_info_request_msg(resv_info_request_msg_t *msg);
-inline void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg);
+extern void slurm_free_front_end_info_request_msg(
+		front_end_info_request_msg_t *msg);
+extern void slurm_free_node_info_request_msg(node_info_request_msg_t *msg);
+extern void slurm_free_part_info_request_msg(part_info_request_msg_t *msg);
+extern void slurm_free_resv_info_request_msg(resv_info_request_msg_t *msg);
+extern void slurm_free_set_debug_flags_msg(set_debug_flags_msg_t *msg);
+extern void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg);
 extern void slurm_destroy_association_shares_object(void *object);
-inline void slurm_free_shares_request_msg(shares_request_msg_t *msg);
-inline void slurm_free_shares_response_msg(shares_response_msg_t *msg);
+extern void slurm_free_shares_request_msg(shares_request_msg_t *msg);
+extern void slurm_free_shares_response_msg(shares_response_msg_t *msg);
 extern void slurm_destroy_priority_factors_object(void *object);
-inline void slurm_free_priority_factors_request_msg(
+extern void slurm_free_priority_factors_request_msg(
 	priority_factors_request_msg_t *msg);
-inline void slurm_free_priority_factors_response_msg(
+extern void slurm_free_priority_factors_response_msg(
 	priority_factors_response_msg_t *msg);
 
 #define	slurm_free_timelimit_msg(msg) \
 	slurm_free_kill_job_msg(msg)
 
-inline void slurm_free_shutdown_msg(shutdown_msg_t * msg);
+extern void slurm_free_shutdown_msg(shutdown_msg_t * msg);
 
-inline void slurm_free_job_desc_msg(job_desc_msg_t * msg);
+extern void slurm_free_job_desc_msg(job_desc_msg_t * msg);
 
-inline void
+extern void
 slurm_free_node_registration_status_msg(slurm_node_registration_status_msg_t *
 					msg);
 
-inline void slurm_free_job_info(job_info_t * job);
-inline void slurm_free_job_info_members(job_info_t * job);
-
-inline void slurm_free_job_id_msg(job_id_msg_t * msg);
-inline void slurm_free_job_id_request_msg(job_id_request_msg_t * msg);
-inline void slurm_free_job_id_response_msg(job_id_response_msg_t * msg);
-
-inline void slurm_free_job_step_id_msg(job_step_id_msg_t *msg);
-
-inline void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg);
-
-inline void slurm_free_update_node_msg(update_node_msg_t * msg);
-inline void slurm_free_update_part_msg(update_part_msg_t * msg);
-inline void slurm_free_delete_part_msg(delete_part_msg_t * msg);
-inline void slurm_free_resv_desc_msg(resv_desc_msg_t * msg);
-inline void slurm_free_resv_name_msg(reservation_name_msg_t * msg);
-inline void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg);
-inline void
-slurm_free_job_step_create_request_msg(job_step_create_request_msg_t * msg);
-inline void
-slurm_free_job_step_create_response_msg(job_step_create_response_msg_t *msg);
-inline void
-slurm_free_complete_job_allocation_msg(complete_job_allocation_msg_t * msg);
-inline void
-slurm_free_complete_batch_script_msg(complete_batch_script_msg_t * msg);
-inline void
-slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg);
-inline void
-slurm_free_launch_tasks_response_msg(launch_tasks_response_msg_t * msg);
-inline void slurm_free_task_user_managed_io_stream_msg(
-	task_user_managed_io_msg_t *msg);
-inline void slurm_free_task_exit_msg(task_exit_msg_t * msg);
-inline void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg);
-inline void
-slurm_free_reattach_tasks_request_msg(reattach_tasks_request_msg_t * msg);
-inline void
-slurm_free_reattach_tasks_response_msg(reattach_tasks_response_msg_t * msg);
-inline void slurm_free_kill_job_msg(kill_job_msg_t * msg);
-inline void slurm_free_signal_job_msg(signal_job_msg_t * msg);
-inline void slurm_free_update_job_time_msg(job_time_msg_t * msg);
-inline void slurm_free_job_step_kill_msg(job_step_kill_msg_t * msg);
-inline void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg);
-inline void slurm_free_srun_job_complete_msg(srun_job_complete_msg_t * msg);
-inline void slurm_free_srun_exec_msg(srun_exec_msg_t *msg);
-inline void slurm_free_srun_ping_msg(srun_ping_msg_t * msg);
-inline void slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg);
-inline void slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg);
-inline void slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg);
-inline void slurm_free_srun_user_msg(srun_user_msg_t * msg);
-inline void slurm_free_checkpoint_msg(checkpoint_msg_t *msg);
-inline void slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg);
-inline void slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg);
-inline void slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg);
-inline void slurm_free_suspend_msg(suspend_msg_t *msg);
-void slurm_free_update_step_msg(step_update_request_msg_t * msg);
-void slurm_free_resource_allocation_response_msg (
+extern void slurm_free_job_info(job_info_t * job);
+extern void slurm_free_job_info_members(job_info_t * job);
+
+extern void slurm_free_job_id_msg(job_id_msg_t * msg);
+extern void slurm_free_job_id_request_msg(job_id_request_msg_t * msg);
+extern void slurm_free_job_id_response_msg(job_id_response_msg_t * msg);
+
+extern void slurm_free_job_step_id_msg(job_step_id_msg_t *msg);
+
+extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg);
+
+extern void slurm_free_update_front_end_msg(update_front_end_msg_t * msg);
+extern void slurm_free_update_node_msg(update_node_msg_t * msg);
+extern void slurm_free_update_part_msg(update_part_msg_t * msg);
+extern void slurm_free_delete_part_msg(delete_part_msg_t * msg);
+extern void slurm_free_resv_desc_msg(resv_desc_msg_t * msg);
+extern void slurm_free_resv_name_msg(reservation_name_msg_t * msg);
+extern void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg);
+extern void slurm_free_job_step_create_request_msg(
+		job_step_create_request_msg_t * msg);
+extern void slurm_free_job_step_create_response_msg(
+		job_step_create_response_msg_t *msg);
+extern void slurm_free_complete_job_allocation_msg(
+		complete_job_allocation_msg_t * msg);
+extern void slurm_free_complete_batch_script_msg(
+		complete_batch_script_msg_t * msg);
+extern void slurm_free_launch_tasks_request_msg(
+		launch_tasks_request_msg_t * msg);
+extern void slurm_free_launch_tasks_response_msg(
+		launch_tasks_response_msg_t * msg);
+extern void slurm_free_task_user_managed_io_stream_msg(
+		task_user_managed_io_msg_t *msg);
+extern void slurm_free_task_exit_msg(task_exit_msg_t * msg);
+extern void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg);
+extern void slurm_free_reattach_tasks_request_msg(
+		reattach_tasks_request_msg_t * msg);
+extern void slurm_free_reattach_tasks_response_msg(
+		reattach_tasks_response_msg_t * msg);
+extern void slurm_free_kill_job_msg(kill_job_msg_t * msg);
+extern void slurm_free_signal_job_msg(signal_job_msg_t * msg);
+extern void slurm_free_update_job_time_msg(job_time_msg_t * msg);
+extern void slurm_free_job_step_kill_msg(job_step_kill_msg_t * msg);
+extern void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg);
+extern void slurm_free_srun_job_complete_msg(srun_job_complete_msg_t * msg);
+extern void slurm_free_srun_exec_msg(srun_exec_msg_t *msg);
+extern void slurm_free_srun_ping_msg(srun_ping_msg_t * msg);
+extern void slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg);
+extern void slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg);
+extern void slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg);
+extern void slurm_free_srun_user_msg(srun_user_msg_t * msg);
+extern void slurm_free_checkpoint_msg(checkpoint_msg_t *msg);
+extern void slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg);
+extern void slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg);
+extern void slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg);
+extern void slurm_free_suspend_msg(suspend_msg_t *msg);
+extern void slurm_free_update_step_msg(step_update_request_msg_t * msg);
+extern void slurm_free_resource_allocation_response_msg (
 		resource_allocation_response_msg_t * msg);
-void slurm_free_job_alloc_info_response_msg (
+extern void slurm_free_job_alloc_info_response_msg (
 		job_alloc_info_response_msg_t * msg);
-void slurm_free_job_step_create_response_msg(
+extern void slurm_free_job_step_create_response_msg(
 		job_step_create_response_msg_t * msg);
-void slurm_free_submit_response_response_msg(submit_response_msg_t * msg);
-void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr);
-void slurm_free_job_info_msg(job_info_msg_t * job_buffer_ptr);
-void slurm_free_job_step_info_response_msg(
+extern void slurm_free_submit_response_response_msg(
+		submit_response_msg_t * msg);
+extern void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr);
+extern void slurm_free_job_info_msg(job_info_msg_t * job_buffer_ptr);
+extern void slurm_free_job_step_info_response_msg(
 		job_step_info_response_msg_t * msg);
-void slurm_free_job_step_info_members (job_step_info_t * msg);
-void slurm_free_node_info_msg(node_info_msg_t * msg);
-void slurm_free_node_info_members(node_info_t * node);
-void slurm_free_partition_info_msg(partition_info_msg_t * msg);
-void slurm_free_partition_info_members(partition_info_t * part);
-void slurm_free_reservation_info_msg(reserve_info_msg_t * msg);
-void slurm_free_get_kvs_msg(kvs_get_msg_t *msg);
-void slurm_free_will_run_response_msg(will_run_response_msg_t *msg);
-void slurm_free_reserve_info_members(reserve_info_t * resv);
+extern void slurm_free_job_step_info_members (job_step_info_t * msg);
+extern void slurm_free_front_end_info_msg (front_end_info_msg_t * msg);
+extern void slurm_free_front_end_info_members(front_end_info_t * front_end);
+extern void slurm_free_node_info_msg(node_info_msg_t * msg);
+extern void slurm_free_node_info_members(node_info_t * node);
+extern void slurm_free_partition_info_msg(partition_info_msg_t * msg);
+extern void slurm_free_partition_info_members(partition_info_t * part);
+extern void slurm_free_reservation_info_msg(reserve_info_msg_t * msg);
+extern void slurm_free_get_kvs_msg(kvs_get_msg_t *msg);
+extern void slurm_free_will_run_response_msg(will_run_response_msg_t *msg);
+extern void slurm_free_reserve_info_members(reserve_info_t * resv);
 extern void slurm_free_topo_info_msg(topo_info_response_msg_t *msg);
 extern void slurm_free_file_bcast_msg(file_bcast_msg_t *msg);
 extern void slurm_free_step_complete_msg(step_complete_msg_t *msg);
 extern void slurm_free_job_step_stat(void *object);
 extern void slurm_free_job_step_pids(void *object);
+extern void slurm_free_block_job_info(void *object);
 extern void slurm_free_block_info_members(block_info_t *block_info);
 extern void slurm_free_block_info(block_info_t *block_info);
-void slurm_free_block_info_msg(block_info_msg_t *block_info_msg);
-inline void slurm_free_block_info_request_msg(
+extern void slurm_free_block_info_msg(block_info_msg_t *block_info_msg);
+extern void slurm_free_block_info_request_msg(
 		block_info_request_msg_t *msg);
-inline void slurm_free_job_notify_msg(job_notify_msg_t * msg);
+extern void slurm_free_job_notify_msg(job_notify_msg_t * msg);
 
-inline void slurm_free_accounting_update_msg(accounting_update_msg_t *msg);
+extern void slurm_free_accounting_update_msg(accounting_update_msg_t *msg);
+extern void slurm_free_spank_env_request_msg(spank_env_request_msg_t *msg);
+extern void slurm_free_spank_env_responce_msg(spank_env_responce_msg_t *msg);
 
 extern int slurm_free_msg_data(slurm_msg_type_t type, void *data);
 extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data);
@@ -1064,6 +1096,7 @@ extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data);
 extern char *preempt_mode_string(uint16_t preempt_mode);
 extern uint16_t preempt_mode_num(const char *preempt_mode);
 
+extern char *sched_param_type_string(uint16_t select_type_param);
 extern char *job_reason_string(enum job_state_reason inx);
 extern char *job_state_string(uint16_t inx);
 extern char *job_state_string_compact(uint16_t inx);
@@ -1074,6 +1107,7 @@ extern void  private_data_string(uint16_t private_data, char *str, int str_len);
 extern void  accounting_enforce_string(uint16_t enforce,
 				       char *str, int str_len);
 extern char *conn_type_string(enum connection_type conn_type);
+extern char *conn_type_string_full(uint16_t *conn_type);
 extern char *node_use_string(enum node_use_type node_use);
 /* Translate a state enum to a readable string */
 extern char *bg_block_state_string(uint16_t state);
@@ -1084,8 +1118,8 @@ extern char *bg_block_state_string(uint16_t state);
 extern bool valid_spank_job_env(char **spank_job_env,
 			        uint32_t spank_job_env_size, uid_t uid);
 
-char *trigger_res_type(uint16_t res_type);
-char *trigger_type(uint32_t trig_type);
+extern char *trigger_res_type(uint16_t res_type);
+extern char *trigger_type(uint32_t trig_type);
 
 /* user needs to xfree after */
 extern char *reservation_flags_string(uint16_t flags);
diff --git a/src/common/slurm_protocol_interface.h b/src/common/slurm_protocol_interface.h
index 39b07c99d..683d29658 100644
--- a/src/common/slurm_protocol_interface.h
+++ b/src/common/slurm_protocol_interface.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_protocol_mongo_common.h b/src/common/slurm_protocol_mongo_common.h
index 3bbe27742..699014312 100644
--- a/src/common/slurm_protocol_mongo_common.h
+++ b/src/common/slurm_protocol_mongo_common.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index a30c8a20e..a95c5280f 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,6 +54,7 @@
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/pack.h"
+#include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
@@ -69,6 +70,7 @@
 #define _pack_job_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_job_step_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_block_info_resp_msg(msg,buf)	_pack_buffer_msg(msg,buf)
+#define _pack_front_end_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_node_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_partition_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_reserve_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
@@ -160,6 +162,22 @@ static int _unpack_node_info_msg(node_info_msg_t ** msg, Buf buffer,
 static int _unpack_node_info_members(node_info_t * node, Buf buffer,
 				     uint16_t protocol_version);
 
+static void _pack_front_end_info_request_msg(
+				front_end_info_request_msg_t * msg,
+				Buf buffer, uint16_t protocol_version);
+static int _unpack_front_end_info_request_msg(
+				front_end_info_request_msg_t ** msg,
+				Buf buffer, uint16_t protocol_version);
+static int _unpack_front_end_info_msg(front_end_info_msg_t ** msg, Buf buffer,
+				      uint16_t protocol_version);
+static int _unpack_front_end_info_members(front_end_info_t *front_end,
+					  Buf buffer,
+					  uint16_t protocol_version);
+static void _pack_update_front_end_msg(update_front_end_msg_t * msg,
+				       Buf buffer, uint16_t protocol_version);
+static int _unpack_update_front_end_msg(update_front_end_msg_t ** msg,
+					Buf buffer, uint16_t protocol_version);
+
 static void _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 				       uint16_t protocol_version);
 static int _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
@@ -313,7 +331,7 @@ static int _unpack_block_info_req_msg(block_info_request_msg_t **
 				      uint16_t protocol_version);
 static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 				 uint16_t protocol_version);
-extern int _unpack_block_info(block_info_t **block_info, Buf buffer,
+static int _unpack_block_info(block_info_t **block_info, Buf buffer,
 			      uint16_t protocol_version);
 
 static void _pack_job_step_info_req_msg(job_step_info_request_msg_t * msg,
@@ -520,6 +538,12 @@ static void _pack_job_notify(job_notify_msg_t *msg, Buf buffer,
 static int  _unpack_job_notify(job_notify_msg_t **msg_ptr, Buf buffer,
 			       uint16_t protocol_version);
 
+static void _pack_set_debug_flags_msg(set_debug_flags_msg_t * msg, Buf buffer,
+				      uint16_t protocol_version);
+static int _unpack_set_debug_flags_msg(set_debug_flags_msg_t ** msg_ptr,
+				       Buf buffer,
+				       uint16_t protocol_version);
+
 static void _pack_set_debug_level_msg(set_debug_level_msg_t * msg, Buf buffer,
 				      uint16_t protocol_version);
 static int _unpack_set_debug_level_msg(set_debug_level_msg_t ** msg_ptr,
@@ -565,8 +589,18 @@ static void _pack_update_job_step_msg(step_update_request_msg_t * msg,
 static int _unpack_update_job_step_msg(step_update_request_msg_t ** msg_ptr,
 				       Buf buffer, uint16_t protocol_version);
 
+static void _pack_spank_env_request_msg(spank_env_request_msg_t * msg,
+					Buf buffer, uint16_t protocol_version);
+static int _unpack_spank_env_request_msg(spank_env_request_msg_t ** msg_ptr,
+					 Buf buffer, uint16_t protocol_version);
+
+static void _pack_spank_env_responce_msg(spank_env_responce_msg_t * msg,
+					 Buf buffer, uint16_t protocol_version);
+static int _unpack_spank_env_responce_msg(spank_env_responce_msg_t ** msg_ptr,
+					  Buf buffer, uint16_t protocol_version);
+
 /* pack_header
- * packs a slurm protocol header that proceeds every slurm message
+ * packs a slurm protocol header that precedes every slurm message
  * IN header - the header structure to pack
  * IN/OUT buffer - destination of the pack, contains pointers that are
  *			automatically updated
@@ -574,7 +608,6 @@ static int _unpack_update_job_step_msg(step_update_request_msg_t ** msg_ptr,
 void
 pack_header(header_t * header, Buf buffer)
 {
-
 	pack16((uint16_t)header->version, buffer);
 	pack16((uint16_t)header->flags, buffer);
 	pack16((uint16_t)header->msg_type, buffer);
@@ -585,7 +618,7 @@ pack_header(header_t * header, Buf buffer)
 		pack32((uint32_t)header->forward.timeout, buffer);
 	}
 	pack16((uint16_t)header->ret_cnt, buffer);
-	if(header->ret_cnt > 0) {
+	if (header->ret_cnt > 0) {
 		_pack_ret_list(header->ret_list,
 			       header->ret_cnt, buffer, header->version);
 	}
@@ -593,7 +626,7 @@ pack_header(header_t * header, Buf buffer)
 }
 
 /* unpack_header
- * unpacks a slurm protocol header that proceeds every slurm message
+ * unpacks a slurm protocol header that precedes every slurm message
  * OUT header - the header structure to unpack
  * IN/OUT buffer - source of the unpack data, contains pointers that are
  *			automatically updated
@@ -619,9 +652,9 @@ unpack_header(header_t * header, Buf buffer)
 	}
 
 	safe_unpack16(&header->ret_cnt, buffer);
-	if(header->ret_cnt > 0) {
-		if(_unpack_ret_list(&(header->ret_list),
-				    header->ret_cnt, buffer, header->version))
+	if (header->ret_cnt > 0) {
+		if (_unpack_ret_list(&(header->ret_list),
+				     header->ret_cnt, buffer, header->version))
 			goto unpack_error;
 	} else {
 		header->ret_list = NULL;
@@ -633,7 +666,7 @@ unpack_header(header_t * header, Buf buffer)
 unpack_error:
 	error("unpacking header");
 	destroy_forward(&header->forward);
-	if(header->ret_list)
+	if (header->ret_list)
 		list_destroy(header->ret_list);
 	return SLURM_ERROR;
 }
@@ -720,6 +753,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
 		/* Message contains no body/information */
 		break;
@@ -750,6 +784,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 			msg->data, buffer,
 			msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_FRONT_END:
+		_pack_update_front_end_msg((update_front_end_msg_t *) msg->data,
+					   buffer, msg->protocol_version);
+		break;
 	case REQUEST_UPDATE_NODE:
 		_pack_update_node_msg((update_node_msg_t *) msg->data,
 				      buffer,
@@ -878,6 +916,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 				     msg->protocol_version);
 		break;
 	case REQUEST_ABORT_JOB:
+	case REQUEST_KILL_PREEMPTED:
 	case REQUEST_KILL_TIMELIMIT:
 	case REQUEST_TERMINATE_JOB:
 		_pack_kill_job_msg((kill_job_msg_t *) msg->data, buffer,
@@ -1000,6 +1039,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 					  msg->protocol_version);
 		break;
 	case REQUEST_SUSPEND:
+	case SRUN_REQUEST_SUSPEND:
 		_pack_suspend_msg((suspend_msg_t *)msg->data, buffer,
 				  msg->protocol_version);
 		break;
@@ -1074,6 +1114,11 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_job_notify((job_notify_msg_t *) msg->data, buffer,
 				 msg->protocol_version);
 		break;
+	case REQUEST_SET_DEBUG_FLAGS:
+		_pack_set_debug_flags_msg(
+			(set_debug_flags_msg_t *)msg->data, buffer,
+			msg->protocol_version);
+		break;
 	case REQUEST_SET_DEBUG_LEVEL:
 	case REQUEST_SET_SCHEDLOG_LEVEL:
 		_pack_set_debug_level_msg(
@@ -1096,6 +1141,24 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 			(job_sbcast_cred_msg_t *)msg->data, buffer,
 			msg->protocol_version);
 		break;
+	case REQUEST_FRONT_END_INFO:
+		_pack_front_end_info_request_msg(
+			(front_end_info_request_msg_t *)msg->data, buffer,
+			msg->protocol_version);
+		break;
+	case RESPONSE_FRONT_END_INFO:
+		_pack_front_end_info_msg((slurm_msg_t *) msg, buffer);
+		break;
+	case REQUEST_SPANK_ENVIRONMENT:
+		_pack_spank_env_request_msg(
+			(spank_env_request_msg_t *)msg->data, buffer,
+			msg->protocol_version);
+		break;
+	case RESPONCE_SPANK_ENVIRONMENT:
+		_pack_spank_env_responce_msg(
+			(spank_env_responce_msg_t *)msg->data, buffer,
+			msg->protocol_version);
+		break;
 	default:
 		debug("No pack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -1197,6 +1260,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
 		/* Message contains no body/information */
 		break;
@@ -1228,6 +1292,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			& (msg->data), buffer,
 			msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_FRONT_END:
+		rc = _unpack_update_front_end_msg((update_front_end_msg_t **) &
+						  (msg->data), buffer,
+						  msg->protocol_version);
+		break;
 	case REQUEST_UPDATE_NODE:
 		rc = _unpack_update_node_msg((update_node_msg_t **) &
 					     (msg->data), buffer,
@@ -1367,6 +1436,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 					    msg->protocol_version);
 		break;
 	case REQUEST_ABORT_JOB:
+	case REQUEST_KILL_PREEMPTED:
 	case REQUEST_KILL_TIMELIMIT:
 	case REQUEST_TERMINATE_JOB:
 		rc = _unpack_kill_job_msg((kill_job_msg_t **) & (msg->data),
@@ -1500,6 +1570,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 						 msg->protocol_version);
 		break;
 	case REQUEST_SUSPEND:
+	case SRUN_REQUEST_SUSPEND:
 		rc = _unpack_suspend_msg((suspend_msg_t **) &msg->data,
 					 buffer,
 					 msg->protocol_version);
@@ -1585,6 +1656,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 					 &msg->data, buffer,
 					 msg->protocol_version);
 		break;
+	case REQUEST_SET_DEBUG_FLAGS:
+		rc = _unpack_set_debug_flags_msg(
+			(set_debug_flags_msg_t **)&(msg->data), buffer,
+			msg->protocol_version);
+		break;
 	case REQUEST_SET_DEBUG_LEVEL:
 	case REQUEST_SET_SCHEDLOG_LEVEL:
 		rc = _unpack_set_debug_level_msg(
@@ -1607,6 +1683,26 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			(job_sbcast_cred_msg_t **)&msg->data, buffer,
 			msg->protocol_version);
 		break;
+	case REQUEST_FRONT_END_INFO:
+		rc = _unpack_front_end_info_request_msg(
+			(front_end_info_request_msg_t **)&msg->data, buffer,
+			msg->protocol_version);
+		break;
+	case RESPONSE_FRONT_END_INFO:
+		rc = _unpack_front_end_info_msg(
+			(front_end_info_msg_t **)&msg->data, buffer,
+			msg->protocol_version);
+		break;
+	case REQUEST_SPANK_ENVIRONMENT:
+		rc = _unpack_spank_env_request_msg(
+			(spank_env_request_msg_t **)&msg->data, buffer,
+			msg->protocol_version);
+		break;
+	case RESPONCE_SPANK_ENVIRONMENT:
+		rc = _unpack_spank_env_responce_msg(
+			(spank_env_responce_msg_t **)&msg->data, buffer,
+			msg->protocol_version);
+		break;
 	default:
 		debug("No unpack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -2023,12 +2119,52 @@ unpack_error:
 
 }
 
+static void
+_pack_update_front_end_msg(update_front_end_msg_t * msg, Buf buffer,
+			   uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		packstr(msg->name, buffer);
+		pack16(msg->node_state, buffer);
+		packstr(msg->reason, buffer);
+		pack32(msg->reason_uid, buffer);
+	}
+}
+
+static int
+_unpack_update_front_end_msg(update_front_end_msg_t ** msg, Buf buffer,
+			     uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	update_front_end_msg_t *tmp_ptr;
+
+	/* alloc memory for structure */
+	xassert(msg != NULL);
+	tmp_ptr = xmalloc(sizeof(update_front_end_msg_t));
+	*msg = tmp_ptr;
+
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&tmp_ptr->name,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&tmp_ptr->node_state, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->reason, &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->reason_uid, buffer);
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_update_front_end_msg(tmp_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 static void
 _pack_update_node_msg(update_node_msg_t * msg, Buf buffer,
 		      uint16_t protocol_version)
 {
 	xassert(msg != NULL);
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		packstr(msg->node_names, buffer);
 		pack16(msg->node_state, buffer);
 		packstr(msg->features, buffer);
@@ -2057,7 +2193,7 @@ _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer,
 	tmp_ptr = xmalloc(sizeof(update_node_msg_t));
 	*msg = tmp_ptr;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&tmp_ptr->node_names,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&tmp_ptr->node_state, buffer);
@@ -2279,6 +2415,7 @@ _pack_resource_allocation_response_msg(resource_allocation_response_msg_t *msg,
 
 	pack32(msg->error_code, buffer);
 	pack32(msg->job_id, buffer);
+	pack32(msg->pn_min_memory, buffer);
 	packstr(msg->node_list, buffer);
 
 	pack32(msg->num_cpu_groups, buffer);
@@ -2309,6 +2446,7 @@ _unpack_resource_allocation_response_msg(
 	/* load the data values */
 	safe_unpack32(&tmp_ptr->error_code, buffer);
 	safe_unpack32(&tmp_ptr->job_id, buffer);
+	safe_unpack32(&tmp_ptr->pn_min_memory, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer);
 
 	safe_unpack32(&tmp_ptr->num_cpu_groups, buffer);
@@ -2524,7 +2662,7 @@ _unpack_node_info_msg(node_info_msg_t ** msg, Buf buffer,
 	*msg = xmalloc(sizeof(node_info_msg_t));
 
 	/* load buffer's header (data structure version and time) */
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		safe_unpack32(&((*msg)->record_count), buffer);
 		safe_unpack32(&((*msg)->node_scaling), buffer);
 		safe_unpack_time(&((*msg)->last_update), buffer);
@@ -2555,7 +2693,35 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 
 	xassert(node != NULL);
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->node_hostname, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&node->node_addr, &uint32_tmp, buffer);
+		safe_unpack16(&node->node_state, buffer);
+		safe_unpack16(&node->cpus, buffer);
+		safe_unpack16(&node->sockets, buffer);
+		safe_unpack16(&node->cores, buffer);
+		safe_unpack16(&node->threads, buffer);
+
+		safe_unpack32(&node->real_memory, buffer);
+		safe_unpack32(&node->tmp_disk, buffer);
+		safe_unpack32(&node->weight, buffer);
+		safe_unpack32(&node->reason_uid, buffer);
+
+		safe_unpack_time(&node->boot_time, buffer);
+		safe_unpack_time(&node->reason_time, buffer);
+		safe_unpack_time(&node->slurmd_start_time, buffer);
+
+		select_g_select_nodeinfo_unpack(&node->select_nodeinfo, buffer,
+						protocol_version);
+
+		safe_unpackstr_xmalloc(&node->arch, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->features, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->gres, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->os, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->reason, &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer);
 		safe_unpack16(&node->node_state, buffer);
 		safe_unpack16(&node->cpus, buffer);
@@ -2613,7 +2779,26 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 {
 	xassert(msg != NULL);
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		packstr(msg->allow_groups, buffer);
+		packstr(msg->alternate,    buffer);
+		pack32(msg-> grace_time,   buffer);
+		pack32(msg-> max_time,     buffer);
+		pack32(msg-> default_time, buffer);
+		pack32(msg-> max_nodes,    buffer);
+		pack32(msg-> min_nodes,    buffer);
+		pack32(msg-> def_mem_per_cpu, buffer);
+		pack32(msg-> max_mem_per_cpu, buffer);
+		packstr(msg->name,         buffer);
+		packstr(msg->nodes,        buffer);
+		pack16(msg-> flags,        buffer);
+		pack16(msg-> max_share,    buffer);
+		pack16(msg-> preempt_mode, buffer);
+		pack16(msg-> priority,     buffer);
+		pack16(msg-> state_up,     buffer);
+
+		packstr(msg->allow_alloc_nodes, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		packstr(msg->allow_groups, buffer);
 		packstr(msg->alternate,    buffer);
 		pack32(msg-> max_time,     buffer);
@@ -2689,7 +2874,29 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 	tmp_ptr = xmalloc(sizeof(update_part_msg_t));
 	*msg = tmp_ptr;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&tmp_ptr->allow_groups,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->alternate, &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->grace_time, buffer);
+		safe_unpack32(&tmp_ptr->max_time, buffer);
+		safe_unpack32(&tmp_ptr->default_time, buffer);
+		safe_unpack32(&tmp_ptr->max_nodes, buffer);
+		safe_unpack32(&tmp_ptr->min_nodes, buffer);
+		safe_unpack32(&tmp_ptr->def_mem_per_cpu, buffer);
+		safe_unpack32(&tmp_ptr->max_mem_per_cpu, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint32_tmp, buffer);
+
+		safe_unpack16(&tmp_ptr->flags,     buffer);
+		safe_unpack16(&tmp_ptr->max_share, buffer);
+		safe_unpack16(&tmp_ptr->preempt_mode, buffer);
+		safe_unpack16(&tmp_ptr->priority,  buffer);
+		safe_unpack16(&tmp_ptr->state_up,  buffer);
+
+		safe_unpackstr_xmalloc(&tmp_ptr->allow_alloc_nodes,
+				       &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&tmp_ptr->allow_groups,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->alternate, &uint32_tmp, buffer);
@@ -3242,12 +3449,23 @@ _pack_job_step_create_response_msg(job_step_create_response_msg_t * msg,
 {
 	xassert(msg != NULL);
 
-	packstr(msg->resv_ports, buffer);
-	pack32(msg->job_step_id, buffer);
-	pack_slurm_step_layout(msg->step_layout, buffer, protocol_version);
-	slurm_cred_pack(msg->cred, buffer);
-	switch_pack_jobinfo(msg->switch_job, buffer);
-
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		packstr(msg->resv_ports, buffer);
+		pack32(msg->job_step_id, buffer);
+		pack_slurm_step_layout(
+			msg->step_layout, buffer, protocol_version);
+		slurm_cred_pack(msg->cred, buffer);
+		select_g_select_jobinfo_pack(
+			msg->select_jobinfo, buffer, protocol_version);
+		switch_pack_jobinfo(msg->switch_job, buffer);
+	} else {
+		packstr(msg->resv_ports, buffer);
+		pack32(msg->job_step_id, buffer);
+		pack_slurm_step_layout(
+			msg->step_layout, buffer, protocol_version);
+		slurm_cred_pack(msg->cred, buffer);
+		switch_pack_jobinfo(msg->switch_job, buffer);
+	}
 }
 
 static int
@@ -3263,20 +3481,45 @@ _unpack_job_step_create_response_msg(job_step_create_response_msg_t ** msg,
 	tmp_ptr = xmalloc(sizeof(job_step_create_response_msg_t));
 	*msg = tmp_ptr;
 
-	safe_unpackstr_xmalloc(&tmp_ptr->resv_ports, &uint32_tmp, buffer);
-	safe_unpack32(&tmp_ptr->job_step_id, buffer);
-	if (unpack_slurm_step_layout(&tmp_ptr->step_layout, buffer,
-				     protocol_version))
-		goto unpack_error;
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(
+			&tmp_ptr->resv_ports, &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->job_step_id, buffer);
+		if (unpack_slurm_step_layout(&tmp_ptr->step_layout, buffer,
+					     protocol_version))
+			goto unpack_error;
 
-	if (!(tmp_ptr->cred = slurm_cred_unpack(buffer, protocol_version)))
-		goto unpack_error;
+		if (!(tmp_ptr->cred = slurm_cred_unpack(
+			      buffer, protocol_version)))
+			goto unpack_error;
 
-	switch_alloc_jobinfo(&tmp_ptr->switch_job);
-	if (switch_unpack_jobinfo(tmp_ptr->switch_job, buffer)) {
-		error("switch_unpack_jobinfo: %m");
-		switch_free_jobinfo(tmp_ptr->switch_job);
-		goto unpack_error;
+		if (select_g_select_jobinfo_unpack(
+			    &tmp_ptr->select_jobinfo, buffer, protocol_version))
+			goto unpack_error;
+		switch_alloc_jobinfo(&tmp_ptr->switch_job);
+		if (switch_unpack_jobinfo(tmp_ptr->switch_job, buffer)) {
+			error("switch_unpack_jobinfo: %m");
+			switch_free_jobinfo(tmp_ptr->switch_job);
+			goto unpack_error;
+		}
+	} else {
+		safe_unpackstr_xmalloc(
+			&tmp_ptr->resv_ports, &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->job_step_id, buffer);
+		if (unpack_slurm_step_layout(&tmp_ptr->step_layout, buffer,
+					     protocol_version))
+			goto unpack_error;
+
+		if (!(tmp_ptr->cred = slurm_cred_unpack(
+			      buffer, protocol_version)))
+			goto unpack_error;
+
+		switch_alloc_jobinfo(&tmp_ptr->switch_job);
+		if (switch_unpack_jobinfo(tmp_ptr->switch_job, buffer)) {
+			error("switch_unpack_jobinfo: %m");
+			switch_free_jobinfo(tmp_ptr->switch_job);
+			goto unpack_error;
+		}
 	}
 	return SLURM_SUCCESS;
 
@@ -3329,16 +3572,48 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 	uint32_t uint32_tmp;
 	char *node_inx_str = NULL;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&part->name, &uint32_tmp, buffer);
 		if (part->name == NULL)
 			part->name = xmalloc(1);/* part->name = "" implicit */
+		safe_unpack32(&part->grace_time,   buffer);
 		safe_unpack32(&part->max_time,     buffer);
 		safe_unpack32(&part->default_time, buffer);
 		safe_unpack32(&part->max_nodes,    buffer);
 		safe_unpack32(&part->min_nodes,    buffer);
 		safe_unpack32(&part->total_nodes,  buffer);
+		safe_unpack32(&part->total_cpus,   buffer);
+		safe_unpack32(&part->def_mem_per_cpu, buffer);
+		safe_unpack32(&part->max_mem_per_cpu, buffer);
+		safe_unpack16(&part->flags,        buffer);
+		safe_unpack16(&part->max_share,    buffer);
+		safe_unpack16(&part->preempt_mode, buffer);
+		safe_unpack16(&part->priority,     buffer);
 
+		safe_unpack16(&part->state_up, buffer);
+		safe_unpackstr_xmalloc(&part->allow_groups, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&part->allow_alloc_nodes, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&part->alternate, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&part->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			part->node_inx = bitfmt2int("");
+		else {
+			part->node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+			node_inx_str = NULL;
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&part->name, &uint32_tmp, buffer);
+		if (part->name == NULL)
+			part->name = xmalloc(1);/* part->name = "" implicit */
+		safe_unpack32(&part->max_time,     buffer);
+		safe_unpack32(&part->default_time, buffer);
+		safe_unpack32(&part->max_nodes,    buffer);
+		safe_unpack32(&part->min_nodes,    buffer);
+		safe_unpack32(&part->total_nodes,  buffer);
 		safe_unpack32(&part->total_cpus,   buffer);
 		safe_unpack16(&part->flags,        buffer);
 		safe_unpack16(&part->max_share,    buffer);
@@ -3370,7 +3645,6 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 		safe_unpack32(&part->max_nodes,    buffer);
 		safe_unpack32(&part->min_nodes,    buffer);
 		safe_unpack32(&part->total_nodes,  buffer);
-
 		safe_unpack32(&part->total_cpus,   buffer);
 		safe_unpack16(&default_part,       buffer);
 		safe_unpack16(&disable_root_jobs,  buffer);
@@ -3496,7 +3770,36 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 	uint32_t uint32_tmp = 0;
 	char *node_inx_str;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack32(&step->job_id, buffer);
+		safe_unpack32(&step->step_id, buffer);
+		safe_unpack16(&step->ckpt_interval, buffer);
+		safe_unpack32(&step->user_id, buffer);
+		safe_unpack32(&step->num_cpus, buffer);
+		safe_unpack32(&step->num_tasks, buffer);
+		safe_unpack32(&step->time_limit, buffer);
+
+		safe_unpack_time(&step->start_time, buffer);
+		safe_unpack_time(&step->run_time, buffer);
+
+		safe_unpackstr_xmalloc(&step->partition, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->resv_ports, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->network, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->ckpt_dir, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step->gres, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			step->node_inx = bitfmt2int("");
+		else {
+			step->node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+		}
+		if (select_g_select_jobinfo_unpack(&step->select_jobinfo,
+						   buffer, protocol_version))
+			goto unpack_error;
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&step->job_id, buffer);
 		safe_unpack32(&step->step_id, buffer);
 		safe_unpack16(&step->ckpt_interval, buffer);
@@ -3548,10 +3851,14 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 			xfree(node_inx_str);
 		}
 	}
+
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurm_free_job_step_info_members(step);
+	/* no need to free here.  (we will just be freeing it 2 times
+	   since this is freed in _unpack_job_step_info_response_msg
+	*/
+	//slurm_free_job_step_info_members(step);
 	return SLURM_ERROR;
 }
 
@@ -3639,7 +3946,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 	char *node_inx_str;
 	multi_core_data_t *mc_ptr;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		safe_unpack32(&job->assoc_id, buffer);
 		safe_unpack32(&job->job_id, buffer);
 		safe_unpack32(&job->user_id, buffer);
@@ -3664,15 +3971,17 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack_time(&job->suspend_time, buffer);
 		safe_unpack_time(&job->pre_sus_time, buffer);
 		safe_unpack_time(&job->resize_time, buffer);
-
+		safe_unpack_time(&job->preempt_time, buffer);
 		safe_unpack32(&job->priority, buffer);
-
 		safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->partition, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->comment, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->gres, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->batch_host, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->batch_script, &uint32_tmp, buffer);
+
 		safe_unpackstr_xmalloc(&job->qos, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->state_desc, &uint32_tmp, buffer);
@@ -3685,6 +3994,9 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 
 		safe_unpackstr_xmalloc(&job->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->wckey, &uint32_tmp, buffer);
+		safe_unpack32(&job->req_switch, buffer);
+		safe_unpack32(&job->wait4switch, buffer);
+
 		safe_unpackstr_xmalloc(&job->alloc_node, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
 		if (node_inx_str == NULL)
@@ -3695,10 +4007,9 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		}
 
 		if (select_g_select_jobinfo_unpack(&job->select_jobinfo,
-						   buffer, protocol_version)) {
-			info("here");
+						   buffer, protocol_version))
 			goto unpack_error;
-		}
+
 		/*** unpack default job details ***/
 		safe_unpackstr_xmalloc(&job->features,   &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->work_dir,   &uint32_tmp, buffer);
@@ -3747,7 +4058,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 			job->ntasks_per_core   = mc_ptr->ntasks_per_core;
 			xfree(mc_ptr);
 		}
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&job->assoc_id, buffer);
 		safe_unpack32(&job->job_id, buffer);
 		safe_unpack32(&job->user_id, buffer);
@@ -3757,10 +4068,11 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack16(&job->batch_flag,   buffer);
 		safe_unpack16(&job->state_reason, buffer);
 		safe_unpack16(&job->restart_cnt, buffer);
+		safe_unpack16(&job->show_flags, buffer);
 
 		safe_unpack32(&job->alloc_sid,    buffer);
 		safe_unpack32(&job->time_limit,   buffer);
-		job->time_min = job->time_limit;
+		safe_unpack32(&job->time_min,   buffer);
 
 		safe_unpack16(&job->nice, buffer);
 
@@ -3770,6 +4082,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack_time(&job->end_time, buffer);
 		safe_unpack_time(&job->suspend_time, buffer);
 		safe_unpack_time(&job->pre_sus_time, buffer);
+		safe_unpack_time(&job->resize_time, buffer);
 
 		safe_unpack32(&job->priority, buffer);
 
@@ -3778,16 +4091,16 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->comment, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->gres, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->qos, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->state_desc, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->resv_name,  &uint32_tmp, buffer);
 
 		safe_unpack32(&job->exit_code, buffer);
+		safe_unpack32(&job->derived_ec, buffer);
 		unpack_job_resources(&job->job_resrcs, buffer,
 				     protocol_version);
-		/* Kludge for lack of resource node list in SLURM version 2.1 */
-		job->job_resrcs->nodes = xstrdup(job->nodes);
 
 		safe_unpackstr_xmalloc(&job->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->wckey, &uint32_tmp, buffer);
@@ -3799,12 +4112,118 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 			job->node_inx = bitfmt2int(node_inx_str);
 			xfree(node_inx_str);
 		}
-		safe_unpack32(&job->num_cpus, buffer);
 
 		if (select_g_select_jobinfo_unpack(&job->select_jobinfo,
-						   buffer, protocol_version))
+						   buffer, protocol_version)) {
+			info("here");
 			goto unpack_error;
-
+		}
+		/*** unpack default job details ***/
+		safe_unpackstr_xmalloc(&job->features,   &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->work_dir,   &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->dependency, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->command,    &uint32_tmp, buffer);
+
+		safe_unpack32(&job->num_cpus, buffer);
+		safe_unpack32(&job->max_cpus, buffer);
+		safe_unpack32(&job->num_nodes,   buffer);
+		safe_unpack32(&job->max_nodes,   buffer);
+		safe_unpack16(&job->requeue,     buffer);
+
+		/*** unpack pending job details ***/
+		safe_unpack16(&job->shared,        buffer);
+		safe_unpack16(&job->contiguous,    buffer);
+		safe_unpack16(&job->cpus_per_task, buffer);
+		safe_unpack16(&job->pn_min_cpus, buffer);
+
+		safe_unpack32(&job->pn_min_memory, buffer);
+		safe_unpack32(&job->pn_min_tmp_disk, buffer);
+
+		safe_unpackstr_xmalloc(&job->req_nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			job->req_node_inx = bitfmt2int("");
+		else {
+			job->req_node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+		}
+		safe_unpackstr_xmalloc(&job->exc_nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			job->exc_node_inx = bitfmt2int("");
+		else {
+			job->exc_node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+		}
+
+		if (unpack_multi_core_data(&mc_ptr, buffer, protocol_version))
+			goto unpack_error;
+		if (mc_ptr) {
+			job->sockets_per_node  = mc_ptr->sockets_per_node;
+			job->cores_per_socket  = mc_ptr->cores_per_socket;
+			job->threads_per_core  = mc_ptr->threads_per_core;
+			job->ntasks_per_socket = mc_ptr->ntasks_per_socket;
+			job->ntasks_per_core   = mc_ptr->ntasks_per_core;
+			xfree(mc_ptr);
+		}
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		safe_unpack32(&job->assoc_id, buffer);
+		safe_unpack32(&job->job_id, buffer);
+		safe_unpack32(&job->user_id, buffer);
+		safe_unpack32(&job->group_id, buffer);
+
+		safe_unpack16(&job->job_state,    buffer);
+		safe_unpack16(&job->batch_flag,   buffer);
+		safe_unpack16(&job->state_reason, buffer);
+		safe_unpack16(&job->restart_cnt, buffer);
+
+		safe_unpack32(&job->alloc_sid,    buffer);
+		safe_unpack32(&job->time_limit,   buffer);
+		job->time_min = job->time_limit;
+
+		safe_unpack16(&job->nice, buffer);
+
+		safe_unpack_time(&job->submit_time, buffer);
+		safe_unpack_time(&job->eligible_time, buffer);
+		safe_unpack_time(&job->start_time, buffer);
+		safe_unpack_time(&job->end_time, buffer);
+		safe_unpack_time(&job->suspend_time, buffer);
+		safe_unpack_time(&job->pre_sus_time, buffer);
+
+		safe_unpack32(&job->priority, buffer);
+
+		safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->partition, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->comment, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->qos, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->state_desc, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->resv_name,  &uint32_tmp, buffer);
+
+		safe_unpack32(&job->exit_code, buffer);
+		unpack_job_resources(&job->job_resrcs, buffer,
+				     protocol_version);
+		/* Kludge for lack of resource node list in SLURM version 2.1 */
+		job->job_resrcs->nodes = xstrdup(job->nodes);
+
+		safe_unpackstr_xmalloc(&job->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->wckey, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->alloc_node, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
+		if (node_inx_str == NULL)
+			job->node_inx = bitfmt2int("");
+		else {
+			job->node_inx = bitfmt2int(node_inx_str);
+			xfree(node_inx_str);
+		}
+		safe_unpack32(&job->num_cpus, buffer);
+
+		if (select_g_select_jobinfo_unpack(&job->select_jobinfo,
+						   buffer, protocol_version))
+			goto unpack_error;
+
 		/*** unpack default job details ***/
 		safe_unpackstr_xmalloc(&job->features,   &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->work_dir,   &uint32_tmp, buffer);
@@ -3867,7 +4286,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 	uint16_t uint16_tmp;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		pack_time(build_ptr->last_update, buffer);
 
 		pack16(build_ptr->accounting_storage_enforce, buffer);
@@ -3877,6 +4296,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->accounting_storage_port, buffer);
 		packstr(build_ptr->accounting_storage_type, buffer);
 		packstr(build_ptr->accounting_storage_user, buffer);
+		pack16(build_ptr->acctng_store_job_comment, buffer);
 
 		packstr(build_ptr->authtype, buffer);
 
@@ -3939,7 +4359,9 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 
 		packstr(build_ptr->mail_prog, buffer);
 		pack32(build_ptr->max_job_cnt, buffer);
+		pack32(build_ptr->max_job_id, buffer);
 		pack32(build_ptr->max_mem_per_cpu, buffer);
+		pack32(build_ptr->max_step_cnt, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
 		pack16(build_ptr->min_job_age, buffer);
 		packstr(build_ptr->mpi_default, buffer);
@@ -4061,14 +4483,13 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->z_16, buffer);
 		pack32(build_ptr->z_32, buffer);
 		packstr(build_ptr->z_char, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		pack_time(build_ptr->last_update, buffer);
 
 		pack16(build_ptr->accounting_storage_enforce, buffer);
 		packstr(build_ptr->accounting_storage_backup_host, buffer);
 		packstr(build_ptr->accounting_storage_host, buffer);
 		packstr(build_ptr->accounting_storage_loc, buffer);
-		packstr("", buffer);
 		pack32(build_ptr->accounting_storage_port, buffer);
 		packstr(build_ptr->accounting_storage_type, buffer);
 		packstr(build_ptr->accounting_storage_user, buffer);
@@ -4080,11 +4501,6 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->batch_start_timeout, buffer);
 		pack_time(build_ptr->boot_time, buffer);
 
-		if (build_ptr->group_info & GROUP_CACHE)
-			uint16_tmp = 1;
-		else
-			uint16_tmp = 0;
-		pack16(uint16_tmp, buffer);	/* cache_groups equivalent */
 		packstr(build_ptr->checkpoint_type, buffer);
 		packstr(build_ptr->cluster_name, buffer);
 		pack16(build_ptr->complete_wait, buffer);
@@ -4096,6 +4512,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->debug_flags, buffer);
 		pack16(build_ptr->disable_root_jobs, buffer);
 
+		pack16(build_ptr->enforce_part_limits, buffer);
 		packstr(build_ptr->epilog, buffer);
 		pack32(build_ptr->epilog_msg_time, buffer);
 		packstr(build_ptr->epilog_slurmctld, buffer);
@@ -4104,6 +4521,10 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->first_job_id, buffer);
 
 		pack16(build_ptr->get_env_timeout, buffer);
+		packstr(build_ptr->gres_plugins, buffer);
+		pack16(build_ptr->group_info, buffer);
+
+		pack32(build_ptr->hash_val, buffer);
 
 		pack16(build_ptr->health_check_interval, buffer);
 		packstr(build_ptr->health_check_program, buffer);
@@ -4117,7 +4538,6 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 
 		packstr(build_ptr->job_comp_host, buffer);
 		packstr(build_ptr->job_comp_loc, buffer);
-		packstr("", buffer);
 		pack32((uint32_t)build_ptr->job_comp_port, buffer);
 		packstr(build_ptr->job_comp_type, buffer);
 		packstr(build_ptr->job_comp_user, buffer);
@@ -4126,6 +4546,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->job_credential_public_certificate, buffer);
 		pack16(build_ptr->job_file_append, buffer);
 		pack16(build_ptr->job_requeue, buffer);
+		packstr(build_ptr->job_submit_plugins, buffer);
 
 		pack16(build_ptr->kill_on_bad_exit, buffer);
 		pack16(build_ptr->kill_wait, buffer);
@@ -4133,7 +4554,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->licenses, buffer);
 
 		packstr(build_ptr->mail_prog, buffer);
-		pack16(build_ptr->max_job_cnt, buffer);
+		pack32(build_ptr->max_job_cnt, buffer);
 		pack32(build_ptr->max_mem_per_cpu, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
 		pack16(build_ptr->min_job_age, buffer);
@@ -4181,100 +4602,565 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->sched_params, buffer);
 		pack16(build_ptr->schedport, buffer);
 		pack16(build_ptr->schedrootfltr, buffer);
+		packstr(build_ptr->sched_logfile, buffer);
+		pack16(build_ptr->sched_log_level, buffer);
 		pack16(build_ptr->sched_time_slice, buffer);
 		packstr(build_ptr->schedtype, buffer);
 		packstr(build_ptr->select_type, buffer);
 		if(build_ptr->select_conf_key_pairs)
 			count = list_count(build_ptr->select_conf_key_pairs);
 
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			ListIterator itr = list_iterator_create(
-				(List)build_ptr->select_conf_key_pairs);
-			config_key_pair_t *key_pair = NULL;
-			while((key_pair = list_next(itr))) {
-				pack_config_key_pair(key_pair,
-						     protocol_version, buffer);
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			ListIterator itr = list_iterator_create(
+				(List)build_ptr->select_conf_key_pairs);
+			config_key_pair_t *key_pair = NULL;
+			while((key_pair = list_next(itr))) {
+				pack_config_key_pair(key_pair,
+						     protocol_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(build_ptr->select_type_param, buffer);
+
+		packstr(build_ptr->slurm_conf, buffer);
+		pack32(build_ptr->slurm_user_id, buffer);
+		packstr(build_ptr->slurm_user_name, buffer);
+		pack32(build_ptr->slurmd_user_id, buffer);
+		packstr(build_ptr->slurmd_user_name, buffer);
+
+		pack16(build_ptr->slurmctld_debug, buffer);
+		packstr(build_ptr->slurmctld_logfile, buffer);
+		packstr(build_ptr->slurmctld_pidfile, buffer);
+		pack32(build_ptr->slurmctld_port, buffer);
+		pack16(build_ptr->slurmctld_port_count, buffer);
+		pack16(build_ptr->slurmctld_timeout, buffer);
+
+		pack16(build_ptr->slurmd_debug, buffer);
+		packstr(build_ptr->slurmd_logfile, buffer);
+		packstr(build_ptr->slurmd_pidfile, buffer);
+		if(!(cluster_flags & CLUSTER_FLAG_MULTSD))
+			pack32(build_ptr->slurmd_port, buffer);
+
+		packstr(build_ptr->slurmd_spooldir, buffer);
+		pack16(build_ptr->slurmd_timeout, buffer);
+		packstr(build_ptr->srun_epilog, buffer);
+		packstr(build_ptr->srun_prolog, buffer);
+		packstr(build_ptr->state_save_location, buffer);
+		packstr(build_ptr->suspend_exc_nodes, buffer);
+		packstr(build_ptr->suspend_exc_parts, buffer);
+		packstr(build_ptr->suspend_program, buffer);
+		pack16(build_ptr->suspend_rate, buffer);
+		pack32(build_ptr->suspend_time, buffer);
+		pack16(build_ptr->suspend_timeout, buffer);
+		packstr(build_ptr->switch_type, buffer);
+
+		packstr(build_ptr->task_epilog, buffer);
+		packstr(build_ptr->task_prolog, buffer);
+		packstr(build_ptr->task_plugin, buffer);
+		pack16(build_ptr->task_plugin_param, buffer);
+		packstr(build_ptr->tmp_fs, buffer);
+		packstr(build_ptr->topology_plugin, buffer);
+		pack16(build_ptr->track_wckey, buffer);
+		pack16(build_ptr->tree_width, buffer);
+
+		pack16(build_ptr->use_pam, buffer);
+		packstr(build_ptr->unkillable_program, buffer);
+		pack16(build_ptr->unkillable_timeout, buffer);
+		packstr(build_ptr->version, buffer);
+		pack16(build_ptr->vsize_factor, buffer);
+
+		pack16(build_ptr->wait_time, buffer);
+
+		pack16(build_ptr->z_16, buffer);
+		pack32(build_ptr->z_32, buffer);
+		packstr(build_ptr->z_char, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		pack_time(build_ptr->last_update, buffer);
+
+		pack16(build_ptr->accounting_storage_enforce, buffer);
+		packstr(build_ptr->accounting_storage_backup_host, buffer);
+		packstr(build_ptr->accounting_storage_host, buffer);
+		packstr(build_ptr->accounting_storage_loc, buffer);
+		packstr("", buffer);
+		pack32(build_ptr->accounting_storage_port, buffer);
+		packstr(build_ptr->accounting_storage_type, buffer);
+		packstr(build_ptr->accounting_storage_user, buffer);
+
+		packstr(build_ptr->authtype, buffer);
+
+		packstr(build_ptr->backup_addr, buffer);
+		packstr(build_ptr->backup_controller, buffer);
+		pack16(build_ptr->batch_start_timeout, buffer);
+		pack_time(build_ptr->boot_time, buffer);
+
+		if (build_ptr->group_info & GROUP_CACHE)
+			uint16_tmp = 1;
+		else
+			uint16_tmp = 0;
+		pack16(uint16_tmp, buffer);	/* cache_groups equivalent */
+		packstr(build_ptr->checkpoint_type, buffer);
+		packstr(build_ptr->cluster_name, buffer);
+		pack16(build_ptr->complete_wait, buffer);
+		packstr(build_ptr->control_addr, buffer);
+		packstr(build_ptr->control_machine, buffer);
+		packstr(build_ptr->crypto_type, buffer);
+
+		pack32(build_ptr->def_mem_per_cpu, buffer);
+		pack32(build_ptr->debug_flags, buffer);
+		pack16(build_ptr->disable_root_jobs, buffer);
+
+		packstr(build_ptr->epilog, buffer);
+		pack32(build_ptr->epilog_msg_time, buffer);
+		packstr(build_ptr->epilog_slurmctld, buffer);
+
+		pack16(build_ptr->fast_schedule, buffer);
+		pack32(build_ptr->first_job_id, buffer);
+
+		pack16(build_ptr->get_env_timeout, buffer);
+
+		pack16(build_ptr->health_check_interval, buffer);
+		packstr(build_ptr->health_check_program, buffer);
+
+		pack16(build_ptr->inactive_limit, buffer);
+
+		pack16(build_ptr->job_acct_gather_freq, buffer);
+		packstr(build_ptr->job_acct_gather_type, buffer);
+
+		packstr(build_ptr->job_ckpt_dir, buffer);
+
+		packstr(build_ptr->job_comp_host, buffer);
+		packstr(build_ptr->job_comp_loc, buffer);
+		packstr("", buffer);
+		pack32((uint32_t)build_ptr->job_comp_port, buffer);
+		packstr(build_ptr->job_comp_type, buffer);
+		packstr(build_ptr->job_comp_user, buffer);
+
+		packstr(build_ptr->job_credential_private_key, buffer);
+		packstr(build_ptr->job_credential_public_certificate, buffer);
+		pack16(build_ptr->job_file_append, buffer);
+		pack16(build_ptr->job_requeue, buffer);
+
+		pack16(build_ptr->kill_on_bad_exit, buffer);
+		pack16(build_ptr->kill_wait, buffer);
+
+		packstr(build_ptr->licenses, buffer);
+
+		packstr(build_ptr->mail_prog, buffer);
+		pack16(build_ptr->max_job_cnt, buffer);
+		pack32(build_ptr->max_mem_per_cpu, buffer);
+		pack16(build_ptr->max_tasks_per_node, buffer);
+		pack16(build_ptr->min_job_age, buffer);
+		packstr(build_ptr->mpi_default, buffer);
+		packstr(build_ptr->mpi_params, buffer);
+		pack16(build_ptr->msg_timeout, buffer);
+
+		pack32(build_ptr->next_job_id, buffer);
+		packstr(build_ptr->node_prefix, buffer);
+
+		pack16(build_ptr->over_time_limit, buffer);
+
+		packstr(build_ptr->plugindir, buffer);
+		packstr(build_ptr->plugstack, buffer);
+		pack16(build_ptr->preempt_mode, buffer);
+		packstr(build_ptr->preempt_type, buffer);
+
+		pack32(build_ptr->priority_decay_hl, buffer);
+		pack32(build_ptr->priority_calc_period, buffer);
+		pack16(build_ptr->priority_favor_small, buffer);
+		pack32(build_ptr->priority_max_age, buffer);
+		pack16(build_ptr->priority_reset_period, buffer);
+		packstr(build_ptr->priority_type, buffer);
+		pack32(build_ptr->priority_weight_age, buffer);
+		pack32(build_ptr->priority_weight_fs, buffer);
+		pack32(build_ptr->priority_weight_js, buffer);
+		pack32(build_ptr->priority_weight_part, buffer);
+		pack32(build_ptr->priority_weight_qos, buffer);
+
+		pack16(build_ptr->private_data, buffer);
+		packstr(build_ptr->proctrack_type, buffer);
+		packstr(build_ptr->prolog, buffer);
+		packstr(build_ptr->prolog_slurmctld, buffer);
+		pack16(build_ptr->propagate_prio_process, buffer);
+		packstr(build_ptr->propagate_rlimits, buffer);
+		packstr(build_ptr->propagate_rlimits_except, buffer);
+
+		packstr(build_ptr->resume_program, buffer);
+		pack16(build_ptr->resume_rate, buffer);
+		pack16(build_ptr->resume_timeout, buffer);
+		pack16(build_ptr->resv_over_run, buffer);
+		pack16(build_ptr->ret2service, buffer);
+
+		packstr(build_ptr->salloc_default_command, buffer);
+		packstr(build_ptr->sched_params, buffer);
+		pack16(build_ptr->schedport, buffer);
+		pack16(build_ptr->schedrootfltr, buffer);
+		pack16(build_ptr->sched_time_slice, buffer);
+		packstr(build_ptr->schedtype, buffer);
+		packstr(build_ptr->select_type, buffer);
+		if(build_ptr->select_conf_key_pairs)
+			count = list_count(build_ptr->select_conf_key_pairs);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			ListIterator itr = list_iterator_create(
+				(List)build_ptr->select_conf_key_pairs);
+			config_key_pair_t *key_pair = NULL;
+			while((key_pair = list_next(itr))) {
+				pack_config_key_pair(key_pair,
+						     protocol_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(build_ptr->select_type_param, buffer);
+
+		packstr(build_ptr->slurm_conf, buffer);
+		pack32(build_ptr->slurm_user_id, buffer);
+		packstr(build_ptr->slurm_user_name, buffer);
+		pack32(build_ptr->slurmd_user_id, buffer);
+		packstr(build_ptr->slurmd_user_name, buffer);
+
+		pack16(build_ptr->slurmctld_debug, buffer);
+		packstr(build_ptr->slurmctld_logfile, buffer);
+		packstr(build_ptr->slurmctld_pidfile, buffer);
+		pack32(build_ptr->slurmctld_port, buffer);
+		pack16(build_ptr->slurmctld_timeout, buffer);
+
+		pack16(build_ptr->slurmd_debug, buffer);
+		packstr(build_ptr->slurmd_logfile, buffer);
+		packstr(build_ptr->slurmd_pidfile, buffer);
+#ifndef MULTIPLE_SLURMD
+		pack32(build_ptr->slurmd_port, buffer);
+#endif
+		packstr(build_ptr->slurmd_spooldir, buffer);
+		pack16(build_ptr->slurmd_timeout, buffer);
+
+		packstr(build_ptr->srun_epilog, buffer);
+		packstr(build_ptr->srun_prolog, buffer);
+		packstr(build_ptr->state_save_location, buffer);
+		packstr(build_ptr->suspend_exc_nodes, buffer);
+		packstr(build_ptr->suspend_exc_parts, buffer);
+		packstr(build_ptr->suspend_program, buffer);
+		pack16(build_ptr->suspend_rate, buffer);
+		pack32(build_ptr->suspend_time, buffer);
+		pack16(build_ptr->suspend_timeout, buffer);
+		packstr(build_ptr->switch_type, buffer);
+
+		packstr(build_ptr->task_epilog, buffer);
+		packstr(build_ptr->task_prolog, buffer);
+		packstr(build_ptr->task_plugin, buffer);
+		pack16(build_ptr->task_plugin_param, buffer);
+		packstr(build_ptr->tmp_fs, buffer);
+		packstr(build_ptr->topology_plugin, buffer);
+		pack16(build_ptr->track_wckey, buffer);
+		pack16(build_ptr->tree_width, buffer);
+
+		pack16(build_ptr->use_pam, buffer);
+		packstr(build_ptr->unkillable_program, buffer);
+		pack16(build_ptr->unkillable_timeout, buffer);
+		packstr(build_ptr->version, buffer);
+
+		pack16(build_ptr->wait_time, buffer);
+
+		pack16(build_ptr->z_16, buffer);
+		pack32(build_ptr->z_32, buffer);
+		packstr(build_ptr->z_char, buffer);
+	}
+}
+
+static int
+_unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
+			   Buf buffer, uint16_t protocol_version)
+{
+	uint32_t count = NO_VAL;
+	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
+	slurm_ctl_conf_info_msg_t *build_ptr;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+
+	/* alloc memory for structure */
+	build_ptr = xmalloc(sizeof(slurm_ctl_conf_t));
+	*build_buffer_ptr = build_ptr;
+
+	/* initialize this so we don't check for those not sending it */
+	build_ptr->hash_val = NO_VAL;
+
+	/* load the data values */
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		/* unpack timestamp of snapshot */
+		safe_unpack_time(&build_ptr->last_update, buffer);
+
+		safe_unpack16(&build_ptr->accounting_storage_enforce, buffer);
+		safe_unpackstr_xmalloc(
+			&build_ptr->accounting_storage_backup_host,
+			&uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_host,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_loc,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->accounting_storage_port, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_type,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_user,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->acctng_store_job_comment, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->authtype,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->backup_addr,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->backup_controller,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->batch_start_timeout, buffer);
+		safe_unpack_time(&build_ptr->boot_time, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->checkpoint_type,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->cluster_name,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->complete_wait, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->control_addr,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->control_machine,
+				       &uint32_tmp,buffer);
+		safe_unpackstr_xmalloc(&build_ptr->crypto_type, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&build_ptr->def_mem_per_cpu, buffer);
+		safe_unpack32(&build_ptr->debug_flags, buffer);
+		safe_unpack16(&build_ptr->disable_root_jobs, buffer);
+
+		safe_unpack16(&build_ptr->enforce_part_limits, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&build_ptr->epilog_msg_time, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->epilog_slurmctld,
+				       &uint32_tmp, buffer);
+
+		safe_unpack16(&build_ptr->fast_schedule, buffer);
+		safe_unpack32(&build_ptr->first_job_id, buffer);
+
+		safe_unpack16(&build_ptr->get_env_timeout, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->gres_plugins,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->group_info, buffer);
+
+		safe_unpack32(&build_ptr->hash_val, buffer);
+
+		safe_unpack16(&build_ptr->health_check_interval, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->health_check_program,
+				       &uint32_tmp, buffer);
+
+		safe_unpack16(&build_ptr->inactive_limit, buffer);
+
+		safe_unpack16(&build_ptr->job_acct_gather_freq, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_acct_gather_type,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->job_ckpt_dir,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->job_comp_host,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_comp_loc,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->job_comp_port, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_comp_type,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_comp_user,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->job_credential_private_key,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->
+				       job_credential_public_certificate,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->job_file_append, buffer);
+		safe_unpack16(&build_ptr->job_requeue, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_submit_plugins,
+				       &uint32_tmp, buffer);
+
+		safe_unpack16(&build_ptr->kill_on_bad_exit, buffer);
+		safe_unpack16(&build_ptr->kill_wait, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->licenses,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->mail_prog,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->max_job_cnt, buffer);
+		safe_unpack32(&build_ptr->max_job_id, buffer);
+		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		safe_unpack32(&build_ptr->max_step_cnt, buffer);
+		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
+		safe_unpack16(&build_ptr->min_job_age, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->mpi_params,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->msg_timeout, buffer);
+
+		safe_unpack32(&build_ptr->next_job_id, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->node_prefix,
+				       &uint32_tmp, buffer);
+
+		safe_unpack16(&build_ptr->over_time_limit, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->plugindir,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->plugstack,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->preempt_mode, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->preempt_type,
+				       &uint32_tmp, buffer);
+
+		safe_unpack32(&build_ptr->priority_decay_hl, buffer);
+		safe_unpack32(&build_ptr->priority_calc_period, buffer);
+		safe_unpack16(&build_ptr->priority_favor_small, buffer);
+		safe_unpack32(&build_ptr->priority_max_age, buffer);
+		safe_unpack16(&build_ptr->priority_reset_period, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->priority_type, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&build_ptr->priority_weight_age, buffer);
+		safe_unpack32(&build_ptr->priority_weight_fs, buffer);
+		safe_unpack32(&build_ptr->priority_weight_js, buffer);
+		safe_unpack32(&build_ptr->priority_weight_part, buffer);
+		safe_unpack32(&build_ptr->priority_weight_qos, buffer);
+
+		safe_unpack16(&build_ptr->private_data, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->proctrack_type, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->prolog, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->prolog_slurmctld,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->propagate_prio_process, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits_except,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->resume_program,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->resume_rate, buffer);
+		safe_unpack16(&build_ptr->resume_timeout, buffer);
+		safe_unpack16(&build_ptr->resv_over_run, buffer);
+		safe_unpack16(&build_ptr->ret2service, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->salloc_default_command,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->sched_params,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->schedport, buffer);
+		safe_unpack16(&build_ptr->schedrootfltr, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->sched_logfile,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->sched_log_level, buffer);
+		safe_unpack16(&build_ptr->sched_time_slice, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->schedtype,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->select_type,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			List tmp_list = list_create(destroy_config_key_pair);
+			config_key_pair_t *object = NULL;
+			int i;
+			for(i=0; i<count; i++) {
+				if(unpack_config_key_pair(
+					   (void *)&object, protocol_version,
+					   buffer)
+				   == SLURM_ERROR)
+					goto unpack_error;
+				list_append(tmp_list, object);
 			}
-			list_iterator_destroy(itr);
+			build_ptr->select_conf_key_pairs = (void *)tmp_list;
 		}
-		count = NO_VAL;
-
-		pack16(build_ptr->select_type_param, buffer);
-
-		packstr(build_ptr->slurm_conf, buffer);
-		pack32(build_ptr->slurm_user_id, buffer);
-		packstr(build_ptr->slurm_user_name, buffer);
-		pack32(build_ptr->slurmd_user_id, buffer);
-		packstr(build_ptr->slurmd_user_name, buffer);
-
-		pack16(build_ptr->slurmctld_debug, buffer);
-		packstr(build_ptr->slurmctld_logfile, buffer);
-		packstr(build_ptr->slurmctld_pidfile, buffer);
-		pack32(build_ptr->slurmctld_port, buffer);
-		pack16(build_ptr->slurmctld_timeout, buffer);
 
-		pack16(build_ptr->slurmd_debug, buffer);
-		packstr(build_ptr->slurmd_logfile, buffer);
-		packstr(build_ptr->slurmd_pidfile, buffer);
-#ifndef MULTIPLE_SLURMD
-		pack32(build_ptr->slurmd_port, buffer);
-#endif
-		packstr(build_ptr->slurmd_spooldir, buffer);
-		pack16(build_ptr->slurmd_timeout, buffer);
+		safe_unpack16(&build_ptr->select_type_param, buffer);
 
-		packstr(build_ptr->srun_epilog, buffer);
-		packstr(build_ptr->srun_prolog, buffer);
-		packstr(build_ptr->state_save_location, buffer);
-		packstr(build_ptr->suspend_exc_nodes, buffer);
-		packstr(build_ptr->suspend_exc_parts, buffer);
-		packstr(build_ptr->suspend_program, buffer);
-		pack16(build_ptr->suspend_rate, buffer);
-		pack32(build_ptr->suspend_time, buffer);
-		pack16(build_ptr->suspend_timeout, buffer);
-		packstr(build_ptr->switch_type, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurm_conf,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->slurm_user_id, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurm_user_name,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->slurmd_user_id, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmd_user_name,
+				       &uint32_tmp, buffer);
 
-		packstr(build_ptr->task_epilog, buffer);
-		packstr(build_ptr->task_prolog, buffer);
-		packstr(build_ptr->task_plugin, buffer);
-		pack16(build_ptr->task_plugin_param, buffer);
-		packstr(build_ptr->tmp_fs, buffer);
-		packstr(build_ptr->topology_plugin, buffer);
-		pack16(build_ptr->track_wckey, buffer);
-		pack16(build_ptr->tree_width, buffer);
+		safe_unpack16(&build_ptr->slurmctld_debug, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmctld_logfile,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmctld_pidfile,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->slurmctld_port, buffer);
+		safe_unpack16(&build_ptr->slurmctld_port_count, buffer);
+		safe_unpack16(&build_ptr->slurmctld_timeout, buffer);
 
-		pack16(build_ptr->use_pam, buffer);
-		packstr(build_ptr->unkillable_program, buffer);
-		pack16(build_ptr->unkillable_timeout, buffer);
-		packstr(build_ptr->version, buffer);
+		safe_unpack16(&build_ptr->slurmd_debug, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmd_logfile, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmd_pidfile, &uint32_tmp,
+				       buffer);
+		if (!(cluster_flags & CLUSTER_FLAG_MULTSD))
+			safe_unpack32(&build_ptr->slurmd_port, buffer);
 
-		pack16(build_ptr->wait_time, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmd_spooldir,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->slurmd_timeout, buffer);
 
-		pack16(build_ptr->z_16, buffer);
-		pack32(build_ptr->z_32, buffer);
-		packstr(build_ptr->z_char, buffer);
-	}
-}
+		safe_unpackstr_xmalloc(&build_ptr->srun_epilog,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->srun_prolog,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->state_save_location,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->suspend_exc_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->suspend_exc_parts,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->suspend_program,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->suspend_rate, buffer);
+		safe_unpack32(&build_ptr->suspend_time, buffer);
+		safe_unpack16(&build_ptr->suspend_timeout, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->switch_type,
+				       &uint32_tmp, buffer);
 
-static int
-_unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
-			   Buf buffer, uint16_t protocol_version)
-{
-	uint32_t count = NO_VAL;
-	uint32_t uint32_tmp;
-	uint16_t uint16_tmp;
-	slurm_ctl_conf_info_msg_t *build_ptr;
-	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+		safe_unpackstr_xmalloc(&build_ptr->task_epilog,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->task_prolog,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->task_plugin,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->task_plugin_param, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->topology_plugin,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->track_wckey, buffer);
+		safe_unpack16(&build_ptr->tree_width, buffer);
 
-	/* alloc memory for structure */
-	build_ptr = xmalloc(sizeof(slurm_ctl_conf_t));
-	*build_buffer_ptr = build_ptr;
+		safe_unpack16(&build_ptr->use_pam, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->unkillable_program,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->unkillable_timeout, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->version,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->vsize_factor, buffer);
 
-	/* initialize this so we don't check for those not sending it */
-	build_ptr->hash_val = NO_VAL;
+		safe_unpack16(&build_ptr->wait_time, buffer);
 
-	/* load the data values */
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpack16(&build_ptr->z_16, buffer);
+		safe_unpack32(&build_ptr->z_32, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
+				       buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&build_ptr->last_update, buffer);
 
@@ -4377,7 +5263,9 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->mail_prog,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&build_ptr->max_job_cnt, buffer);
+		build_ptr->max_job_id = DEFAULT_MAX_JOB_ID;
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		build_ptr->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
@@ -4448,7 +5336,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->select_type,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if (count != NO_VAL) {
 			List tmp_list = list_create(destroy_config_key_pair);
 			config_key_pair_t *object = NULL;
 			int i;
@@ -4488,7 +5376,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->slurmd_pidfile, &uint32_tmp,
 				       buffer);
-		if(!(cluster_flags & CLUSTER_FLAG_MULTSD))
+		if (!(cluster_flags & CLUSTER_FLAG_MULTSD))
 			safe_unpack32(&build_ptr->slurmd_port, buffer);
 
 		safe_unpackstr_xmalloc(&build_ptr->slurmd_spooldir,
@@ -4541,7 +5429,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->z_32, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
 				       buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		uint16_t max_job_cnt;
 		char *tmp_str = NULL;
 		/* unpack timestamp of snapshot */
@@ -4647,8 +5535,10 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->mail_prog,
 				       &uint32_tmp, buffer);
 		max_job_cnt = MIN(build_ptr->max_job_cnt, 0xfffe);
+		build_ptr->max_job_id = DEFAULT_MAX_JOB_ID;
 		safe_unpack16(&max_job_cnt, buffer);
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
+		build_ptr->max_step_cnt = DEFAULT_MAX_STEP_COUNT;
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
@@ -4802,33 +5692,176 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->version,
 				       &uint32_tmp, buffer);
 
-		safe_unpack16(&build_ptr->wait_time, buffer);
+		safe_unpack16(&build_ptr->wait_time, buffer);
+
+		safe_unpack16(&build_ptr->z_16, buffer);
+		safe_unpack32(&build_ptr->z_32, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
+				       buffer);
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_ctl_conf(build_ptr);
+	*build_buffer_ptr = NULL;
+	return SLURM_ERROR;
+}
+
+/* _pack_job_desc_msg
+ * packs a job_desc struct
+ * IN job_desc_ptr - pointer to the job descriptor to pack
+ * IN/OUT buffer - destination of the pack, contains pointers that are
+ *			automatically updated
+ */
+static void
+_pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
+		   uint16_t protocol_version)
+{
+	/* load the data values */
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		pack16(job_desc_ptr->contiguous, buffer);
+		pack16(job_desc_ptr->task_dist, buffer);
+		pack16(job_desc_ptr->kill_on_node_fail, buffer);
+		packstr(job_desc_ptr->features, buffer);
+		packstr(job_desc_ptr->gres, buffer);
+		pack32(job_desc_ptr->job_id, buffer);
+		packstr(job_desc_ptr->name, buffer);
+
+		packstr(job_desc_ptr->alloc_node, buffer);
+		pack32(job_desc_ptr->alloc_sid, buffer);
+		pack16(job_desc_ptr->pn_min_cpus, buffer);
+		pack32(job_desc_ptr->pn_min_memory, buffer);
+		pack32(job_desc_ptr->pn_min_tmp_disk, buffer);
+
+		packstr(job_desc_ptr->partition, buffer);
+		pack32(job_desc_ptr->priority, buffer);
+		packstr(job_desc_ptr->dependency, buffer);
+		packstr(job_desc_ptr->account, buffer);
+		packstr(job_desc_ptr->comment, buffer);
+		pack16(job_desc_ptr->nice, buffer);
+		packstr(job_desc_ptr->qos, buffer);
+
+		pack8(job_desc_ptr->open_mode,   buffer);
+		pack8(job_desc_ptr->overcommit,  buffer);
+		pack16(job_desc_ptr->acctg_freq, buffer);
+		pack32(job_desc_ptr->num_tasks,  buffer);
+		pack16(job_desc_ptr->ckpt_interval, buffer);
+
+		packstr(job_desc_ptr->req_nodes, buffer);
+		packstr(job_desc_ptr->exc_nodes, buffer);
+		packstr_array(job_desc_ptr->environment,
+			      job_desc_ptr->env_size, buffer);
+		packstr_array(job_desc_ptr->spank_job_env,
+			      job_desc_ptr->spank_job_env_size, buffer);
+		packstr(job_desc_ptr->script, buffer);
+		packstr_array(job_desc_ptr->argv, job_desc_ptr->argc, buffer);
+
+		packstr(job_desc_ptr->std_err, buffer);
+		packstr(job_desc_ptr->std_in, buffer);
+		packstr(job_desc_ptr->std_out, buffer);
+		packstr(job_desc_ptr->work_dir, buffer);
+		packstr(job_desc_ptr->ckpt_dir, buffer);
+
+		pack16(job_desc_ptr->immediate, buffer);
+		pack16(job_desc_ptr->requeue, buffer);
+		pack16(job_desc_ptr->shared, buffer);
+		pack16(job_desc_ptr->cpus_per_task, buffer);
+		pack16(job_desc_ptr->ntasks_per_node, buffer);
+		pack16(job_desc_ptr->ntasks_per_socket, buffer);
+		pack16(job_desc_ptr->ntasks_per_core, buffer);
+
+		pack16(job_desc_ptr->plane_size, buffer);
+		pack16(job_desc_ptr->cpu_bind_type, buffer);
+		pack16(job_desc_ptr->mem_bind_type, buffer);
+		packstr(job_desc_ptr->cpu_bind, buffer);
+		packstr(job_desc_ptr->mem_bind, buffer);
+
+		pack32(job_desc_ptr->time_limit, buffer);
+		pack32(job_desc_ptr->time_min, buffer);
+		pack32(job_desc_ptr->min_cpus, buffer);
+		pack32(job_desc_ptr->max_cpus, buffer);
+		pack32(job_desc_ptr->min_nodes, buffer);
+		pack32(job_desc_ptr->max_nodes, buffer);
+		pack16(job_desc_ptr->sockets_per_node, buffer);
+		pack16(job_desc_ptr->cores_per_socket, buffer);
+		pack16(job_desc_ptr->threads_per_core, buffer);
+		pack32(job_desc_ptr->user_id, buffer);
+		pack32(job_desc_ptr->group_id, buffer);
 
-		safe_unpack16(&build_ptr->z_16, buffer);
-		safe_unpack32(&build_ptr->z_32, buffer);
-		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
-				       buffer);
-	}
-	return SLURM_SUCCESS;
+		pack16(job_desc_ptr->alloc_resp_port, buffer);
+		pack16(job_desc_ptr->other_port, buffer);
+		packstr(job_desc_ptr->network, buffer);
+		pack_time(job_desc_ptr->begin_time, buffer);
+		pack_time(job_desc_ptr->end_time, buffer);
 
-unpack_error:
-	slurm_free_ctl_conf(build_ptr);
-	*build_buffer_ptr = NULL;
-	return SLURM_ERROR;
-}
+		packstr(job_desc_ptr->licenses, buffer);
+		pack16(job_desc_ptr->mail_type, buffer);
+		packstr(job_desc_ptr->mail_user, buffer);
+		packstr(job_desc_ptr->reservation, buffer);
+		pack16(job_desc_ptr->warn_signal, buffer);
+		pack16(job_desc_ptr->warn_time, buffer);
+		packstr(job_desc_ptr->wckey, buffer);
+		pack32(job_desc_ptr->req_switch, buffer);
+		pack32(job_desc_ptr->wait4switch, buffer);
 
-/* _pack_job_desc_msg
- * packs a job_desc struct
- * IN job_desc_ptr - pointer to the job descriptor to pack
- * IN/OUT buffer - destination of the pack, contains pointers that are
- *			automatically updated
- */
-static void
-_pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
-		   uint16_t protocol_version)
-{
-	/* load the data values */
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		if (job_desc_ptr->select_jobinfo) {
+			select_g_select_jobinfo_pack(
+				job_desc_ptr->select_jobinfo,
+				buffer, protocol_version);
+		} else {
+			job_desc_ptr->select_jobinfo =
+				select_g_select_jobinfo_alloc();
+			if(job_desc_ptr->geometry[0] != (uint16_t) NO_VAL)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_GEOMETRY,
+					job_desc_ptr->geometry);
+
+			if (job_desc_ptr->conn_type[0] != (uint16_t) NO_VAL)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_CONN_TYPE,
+					&(job_desc_ptr->conn_type));
+			if (job_desc_ptr->reboot != (uint16_t) NO_VAL)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_REBOOT,
+					&(job_desc_ptr->reboot));
+			if (job_desc_ptr->rotate != (uint16_t) NO_VAL)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_ROTATE,
+					&(job_desc_ptr->rotate));
+			if (job_desc_ptr->blrtsimage) {
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_BLRTS_IMAGE,
+					job_desc_ptr->blrtsimage);
+			}
+			if (job_desc_ptr->linuximage)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_LINUX_IMAGE,
+					job_desc_ptr->linuximage);
+			if (job_desc_ptr->mloaderimage)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_MLOADER_IMAGE,
+					job_desc_ptr->mloaderimage);
+			if (job_desc_ptr->ramdiskimage)
+				select_g_select_jobinfo_set(
+					job_desc_ptr->select_jobinfo,
+					SELECT_JOBDATA_RAMDISK_IMAGE,
+					job_desc_ptr->ramdiskimage);
+			select_g_select_jobinfo_pack(
+				job_desc_ptr->select_jobinfo,
+				buffer, protocol_version);
+			select_g_select_jobinfo_free(
+				job_desc_ptr->select_jobinfo);
+			job_desc_ptr->select_jobinfo = NULL;
+		}
+		pack16(job_desc_ptr->wait_all_nodes, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		pack16(job_desc_ptr->contiguous, buffer);
 		pack16(job_desc_ptr->task_dist, buffer);
 		pack16(job_desc_ptr->kill_on_node_fail, buffer);
@@ -4912,7 +5945,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		pack16(job_desc_ptr->warn_time, buffer);
 		packstr(job_desc_ptr->wckey, buffer);
 
-		if(job_desc_ptr->select_jobinfo) {
+		if (job_desc_ptr->select_jobinfo) {
 			select_g_select_jobinfo_pack(
 				job_desc_ptr->select_jobinfo,
 				buffer, protocol_version);
@@ -4925,7 +5958,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 					SELECT_JOBDATA_GEOMETRY,
 					job_desc_ptr->geometry);
 
-			if (job_desc_ptr->conn_type != (uint16_t) NO_VAL)
+			if (job_desc_ptr->conn_type[0] != (uint16_t) NO_VAL)
 				select_g_select_jobinfo_set(
 					job_desc_ptr->select_jobinfo,
 					SELECT_JOBDATA_CONN_TYPE,
@@ -4969,7 +6002,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 			job_desc_ptr->select_jobinfo = NULL;
 		}
 		pack16(job_desc_ptr->wait_all_nodes, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		pack16(job_desc_ptr->contiguous, buffer);
 		pack16(job_desc_ptr->task_dist, buffer);
 		pack16(job_desc_ptr->kill_on_node_fail, buffer);
@@ -5064,7 +6097,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 					SELECT_JOBDATA_GEOMETRY,
 					job_desc_ptr->geometry);
 
-			if (job_desc_ptr->conn_type != (uint16_t) NO_VAL)
+			if (job_desc_ptr->conn_type[0] != (uint16_t) NO_VAL)
 				select_g_select_jobinfo_set(
 					job_desc_ptr->select_jobinfo,
 					SELECT_JOBDATA_CONN_TYPE,
@@ -5124,7 +6157,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 	job_desc_msg_t *job_desc_ptr;
 
 	/* alloc memory for structure */
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
 		*job_desc_buffer_ptr = job_desc_ptr;
 
@@ -5236,6 +6269,8 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		safe_unpack16(&job_desc_ptr->warn_time, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->wckey,
 				       &uint32_tmp, buffer);
+		safe_unpack32(&job_desc_ptr->req_switch, buffer);
+		safe_unpack32(&job_desc_ptr->wait4switch, buffer);
 
 		if (select_g_select_jobinfo_unpack(
 			    &job_desc_ptr->select_jobinfo,
@@ -5246,7 +6281,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		 * set in the select_jobinfo structure.
 		 */
 		job_desc_ptr->geometry[0] = (uint16_t)NO_VAL;
-		job_desc_ptr->conn_type = (uint16_t)NO_VAL;
+		job_desc_ptr->conn_type[0] = (uint16_t)NO_VAL;
 		job_desc_ptr->reboot = (uint16_t)NO_VAL;
 		job_desc_ptr->rotate = (uint16_t)NO_VAL;
 		job_desc_ptr->blrtsimage = NULL;
@@ -5254,7 +6289,137 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		job_desc_ptr->mloaderimage = NULL;
 		job_desc_ptr->ramdiskimage = NULL;
 		safe_unpack16(&job_desc_ptr->wait_all_nodes, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
+		*job_desc_buffer_ptr = job_desc_ptr;
+
+		/* load the data values */
+		safe_unpack16(&job_desc_ptr->contiguous, buffer);
+		safe_unpack16(&job_desc_ptr->task_dist, buffer);
+		safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->features,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->gres, &uint32_tmp,buffer);
+		safe_unpack32(&job_desc_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->name,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&job_desc_ptr->alloc_node,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&job_desc_ptr->alloc_sid, buffer);
+		safe_unpack16(&job_desc_ptr->pn_min_cpus, buffer);
+		safe_unpack32(&job_desc_ptr->pn_min_memory, buffer);
+		safe_unpack32(&job_desc_ptr->pn_min_tmp_disk, buffer);
+
+		safe_unpackstr_xmalloc(&job_desc_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&job_desc_ptr->priority, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->dependency,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->account,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->comment,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&job_desc_ptr->nice, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->qos, &uint32_tmp,
+				       buffer);
+
+		safe_unpack8(&job_desc_ptr->open_mode,   buffer);
+		safe_unpack8(&job_desc_ptr->overcommit,  buffer);
+		safe_unpack16(&job_desc_ptr->acctg_freq, buffer);
+		safe_unpack32(&job_desc_ptr->num_tasks,  buffer);
+		safe_unpack16(&job_desc_ptr->ckpt_interval, buffer);
+
+		safe_unpackstr_xmalloc(&job_desc_ptr->req_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->exc_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_array(&job_desc_ptr->environment,
+				     &job_desc_ptr->env_size, buffer);
+		safe_unpackstr_array(&job_desc_ptr->spank_job_env,
+				     &job_desc_ptr->spank_job_env_size,
+				     buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->script,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_array(&job_desc_ptr->argv,
+				     &job_desc_ptr->argc, buffer);
+
+		safe_unpackstr_xmalloc(&job_desc_ptr->std_err,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->std_in,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->std_out,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->work_dir,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->ckpt_dir,
+				       &uint32_tmp, buffer);
+
+		safe_unpack16(&job_desc_ptr->immediate, buffer);
+		safe_unpack16(&job_desc_ptr->requeue, buffer);
+		safe_unpack16(&job_desc_ptr->shared, buffer);
+		safe_unpack16(&job_desc_ptr->cpus_per_task, buffer);
+		safe_unpack16(&job_desc_ptr->ntasks_per_node, buffer);
+		safe_unpack16(&job_desc_ptr->ntasks_per_socket, buffer);
+		safe_unpack16(&job_desc_ptr->ntasks_per_core, buffer);
+
+		safe_unpack16(&job_desc_ptr->plane_size, buffer);
+		safe_unpack16(&job_desc_ptr->cpu_bind_type, buffer);
+		safe_unpack16(&job_desc_ptr->mem_bind_type, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->cpu_bind,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->mem_bind,
+				       &uint32_tmp, buffer);
+
+		safe_unpack32(&job_desc_ptr->time_limit, buffer);
+		safe_unpack32(&job_desc_ptr->time_min, buffer);
+		safe_unpack32(&job_desc_ptr->min_cpus, buffer);
+		safe_unpack32(&job_desc_ptr->max_cpus, buffer);
+		safe_unpack32(&job_desc_ptr->min_nodes, buffer);
+		safe_unpack32(&job_desc_ptr->max_nodes, buffer);
+		safe_unpack16(&job_desc_ptr->sockets_per_node, buffer);
+		safe_unpack16(&job_desc_ptr->cores_per_socket, buffer);
+		safe_unpack16(&job_desc_ptr->threads_per_core, buffer);
+		safe_unpack32(&job_desc_ptr->user_id, buffer);
+		safe_unpack32(&job_desc_ptr->group_id, buffer);
+
+		safe_unpack16(&job_desc_ptr->alloc_resp_port, buffer);
+		safe_unpack16(&job_desc_ptr->other_port, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->network,
+				       &uint32_tmp, buffer);
+		safe_unpack_time(&job_desc_ptr->begin_time, buffer);
+		safe_unpack_time(&job_desc_ptr->end_time, buffer);
+
+		safe_unpackstr_xmalloc(&job_desc_ptr->licenses,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&job_desc_ptr->mail_type, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->mail_user,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->reservation,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&job_desc_ptr->warn_signal, buffer);
+		safe_unpack16(&job_desc_ptr->warn_time, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->wckey,
+				       &uint32_tmp, buffer);
+
+		if (select_g_select_jobinfo_unpack(
+			    &job_desc_ptr->select_jobinfo,
+			    buffer, protocol_version))
+			goto unpack_error;
+
+		/* These are set so we don't confuse them later for what is
+		 * set in the select_jobinfo structure.
+		 */
+		job_desc_ptr->geometry[0] = (uint16_t)NO_VAL;
+		job_desc_ptr->conn_type[0] = (uint16_t)NO_VAL;
+		job_desc_ptr->reboot = (uint16_t)NO_VAL;
+		job_desc_ptr->rotate = (uint16_t)NO_VAL;
+		job_desc_ptr->blrtsimage = NULL;
+		job_desc_ptr->linuximage = NULL;
+		job_desc_ptr->mloaderimage = NULL;
+		job_desc_ptr->ramdiskimage = NULL;
+		safe_unpack16(&job_desc_ptr->wait_all_nodes, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
 		*job_desc_buffer_ptr = job_desc_ptr;
 
@@ -5374,7 +6539,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		 * set in the select_jobinfo structure.
 		 */
 		job_desc_ptr->geometry[0] = (uint16_t)NO_VAL;
-		job_desc_ptr->conn_type = (uint16_t)NO_VAL;
+		job_desc_ptr->conn_type[0] = (uint16_t)NO_VAL;
 		job_desc_ptr->reboot = (uint16_t)NO_VAL;
 		job_desc_ptr->rotate = (uint16_t)NO_VAL;
 		job_desc_ptr->blrtsimage = NULL;
@@ -5670,13 +6835,85 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-static void
-_pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
-			       uint16_t protocol_version)
-{
-	int i=0;
-	xassert(msg != NULL);
-	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+static void
+_pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
+			       uint16_t protocol_version)
+{
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	int i=0;
+	xassert(msg != NULL);
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		pack32(msg->job_id, buffer);
+		pack32(msg->job_step_id, buffer);
+		pack32(msg->ntasks, buffer);
+		pack32(msg->uid, buffer);
+		pack32(msg->gid, buffer);
+		pack32(msg->job_mem_lim, buffer);
+		pack32(msg->step_mem_lim, buffer);
+
+		pack32(msg->nnodes, buffer);
+		pack16(msg->cpus_per_task, buffer);
+		pack16(msg->task_dist, buffer);
+
+		slurm_cred_pack(msg->cred, buffer);
+		for(i=0; i<msg->nnodes; i++) {
+			pack16(msg->tasks_to_launch[i], buffer);
+			pack16(msg->cpus_allocated[i], buffer);
+			pack32_array(msg->global_task_ids[i],
+				     (uint32_t) msg->tasks_to_launch[i],
+				     buffer);
+		}
+		pack16(msg->num_resp_port, buffer);
+		for(i = 0; i < msg->num_resp_port; i++)
+			pack16(msg->resp_port[i], buffer);
+		slurm_pack_slurm_addr(&msg->orig_addr, buffer);
+		packstr_array(msg->env, msg->envc, buffer);
+		packstr_array(msg->spank_job_env, msg->spank_job_env_size,
+			      buffer);
+		packstr(msg->cwd, buffer);
+		pack16(msg->cpu_bind_type, buffer);
+		packstr(msg->cpu_bind, buffer);
+		pack16(msg->mem_bind_type, buffer);
+		packstr(msg->mem_bind, buffer);
+		packstr_array(msg->argv, msg->argc, buffer);
+		pack16(msg->task_flags, buffer);
+		pack16(msg->multi_prog, buffer);
+		pack16(msg->user_managed_io, buffer);
+		if (msg->user_managed_io == 0) {
+			packstr(msg->ofname, buffer);
+			packstr(msg->efname, buffer);
+			packstr(msg->ifname, buffer);
+			pack8(msg->buffered_stdio, buffer);
+			pack8(msg->labelio, buffer);
+			pack16(msg->num_io_port, buffer);
+			for(i = 0; i < msg->num_io_port; i++)
+				pack16(msg->io_port[i], buffer);
+		}
+		packstr(msg->task_prolog, buffer);
+		packstr(msg->task_epilog, buffer);
+		pack16(msg->slurmd_debug, buffer);
+		switch_pack_jobinfo(msg->switch_job, buffer);
+		job_options_pack(msg->options, buffer);
+		packstr(msg->complete_nodelist, buffer);
+
+		pack8(msg->open_mode, buffer);
+		pack8(msg->pty, buffer);
+		pack16(msg->acctg_freq, buffer);
+		packstr(msg->ckpt_dir, buffer);
+		packstr(msg->restart_dir, buffer);
+		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
+			/* If on a Blue Gene cluster do not send this to the
+			   slurmstepd, it will overwrite the environment that is
+			   already set up correctly for both the job
+			   and the step.  The slurmstep treats this
+			   select_jobinfo as if it were for the job
+			   instead of for the step.
+			*/
+			select_g_select_jobinfo_pack(msg->select_jobinfo,
+						     buffer,
+						     protocol_version);
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->job_step_id, buffer);
 		pack32(msg->ntasks, buffer);
@@ -5801,6 +7038,7 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 				 msg_ptr, Buf buffer,
 				 uint16_t protocol_version)
 {
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	uint32_t uint32_tmp;
 	launch_tasks_request_msg_t *msg;
 	int i=0;
@@ -5809,7 +7047,101 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 	msg = xmalloc(sizeof(launch_tasks_request_msg_t));
 	*msg_ptr = msg;
 
-	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack32(&msg->job_id, buffer);
+		safe_unpack32(&msg->job_step_id, buffer);
+		safe_unpack32(&msg->ntasks, buffer);
+		safe_unpack32(&msg->uid, buffer);
+		safe_unpack32(&msg->gid, buffer);
+		safe_unpack32(&msg->job_mem_lim, buffer);
+		safe_unpack32(&msg->step_mem_lim, buffer);
+
+		safe_unpack32(&msg->nnodes, buffer);
+		safe_unpack16(&msg->cpus_per_task, buffer);
+		safe_unpack16(&msg->task_dist, buffer);
+
+		if (!(msg->cred = slurm_cred_unpack(buffer, protocol_version)))
+			goto unpack_error;
+		msg->tasks_to_launch = xmalloc(sizeof(uint16_t) * msg->nnodes);
+		msg->cpus_allocated = xmalloc(sizeof(uint16_t) * msg->nnodes);
+		msg->global_task_ids = xmalloc(sizeof(uint32_t *) *
+					       msg->nnodes);
+		for(i=0; i<msg->nnodes; i++) {
+			safe_unpack16(&msg->tasks_to_launch[i], buffer);
+			safe_unpack16(&msg->cpus_allocated[i], buffer);
+			safe_unpack32_array(&msg->global_task_ids[i],
+					    &uint32_tmp,
+					    buffer);
+			if (msg->tasks_to_launch[i] != (uint16_t) uint32_tmp)
+				goto unpack_error;
+		}
+		safe_unpack16(&msg->num_resp_port, buffer);
+		if (msg->num_resp_port > 0) {
+			msg->resp_port = xmalloc(sizeof(uint16_t) *
+						 msg->num_resp_port);
+			for (i = 0; i < msg->num_resp_port; i++)
+				safe_unpack16(&msg->resp_port[i], buffer);
+		}
+		slurm_unpack_slurm_addr_no_alloc(&msg->orig_addr, buffer);
+		safe_unpackstr_array(&msg->env, &msg->envc, buffer);
+		safe_unpackstr_array(&msg->spank_job_env,
+				     &msg->spank_job_env_size, buffer);
+		safe_unpackstr_xmalloc(&msg->cwd, &uint32_tmp, buffer);
+		safe_unpack16(&msg->cpu_bind_type, buffer);
+		safe_unpackstr_xmalloc(&msg->cpu_bind, &uint32_tmp, buffer);
+		safe_unpack16(&msg->mem_bind_type, buffer);
+		safe_unpackstr_xmalloc(&msg->mem_bind, &uint32_tmp, buffer);
+		safe_unpackstr_array(&msg->argv, &msg->argc, buffer);
+		safe_unpack16(&msg->task_flags, buffer);
+		safe_unpack16(&msg->multi_prog, buffer);
+		safe_unpack16(&msg->user_managed_io, buffer);
+		if (msg->user_managed_io == 0) {
+			safe_unpackstr_xmalloc(&msg->ofname, &uint32_tmp,
+					       buffer);
+			safe_unpackstr_xmalloc(&msg->efname, &uint32_tmp,
+					       buffer);
+			safe_unpackstr_xmalloc(&msg->ifname, &uint32_tmp,
+					       buffer);
+			safe_unpack8(&msg->buffered_stdio, buffer);
+			safe_unpack8(&msg->labelio, buffer);
+			safe_unpack16(&msg->num_io_port, buffer);
+			if (msg->num_io_port > 0) {
+				msg->io_port = xmalloc(sizeof(uint16_t) *
+						       msg->num_io_port);
+				for (i = 0; i < msg->num_io_port; i++)
+					safe_unpack16(&msg->io_port[i],
+						      buffer);
+			}
+		}
+		safe_unpackstr_xmalloc(&msg->task_prolog, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg->task_epilog, &uint32_tmp, buffer);
+		safe_unpack16(&msg->slurmd_debug, buffer);
+
+		switch_alloc_jobinfo(&msg->switch_job);
+		if (switch_unpack_jobinfo(msg->switch_job, buffer) < 0) {
+			error("switch_unpack_jobinfo: %m");
+			switch_free_jobinfo(msg->switch_job);
+			goto unpack_error;
+		}
+		msg->options = job_options_create();
+		if (job_options_unpack(msg->options, buffer) < 0) {
+			error("Unable to unpack extra job options: %m");
+			goto unpack_error;
+		}
+		safe_unpackstr_xmalloc(&msg->complete_nodelist, &uint32_tmp,
+				       buffer);
+
+		safe_unpack8(&msg->open_mode, buffer);
+		safe_unpack8(&msg->pty, buffer);
+		safe_unpack16(&msg->acctg_freq, buffer);
+		safe_unpackstr_xmalloc(&msg->ckpt_dir, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg->restart_dir, &uint32_tmp, buffer);
+		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
+			select_g_select_jobinfo_unpack(&msg->select_jobinfo,
+						       buffer,
+						       protocol_version);
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&msg->job_id, buffer);
 		safe_unpack32(&msg->job_step_id, buffer);
 		safe_unpack32(&msg->ntasks, buffer);
@@ -6474,113 +7806,140 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-/* NOTE: The matching pack functions are directly in the select/bluegene
- * plugin. The unpack functions can not be there since the plugin is
- * dependent upon libraries which do not exist on the BlueGene front-end
- * nodes. */
-static int _unpack_block_info_members(block_info_t *block_info, Buf buffer,
-				      uint16_t protocol_version)
+static int _unpack_block_job_info(block_job_info_t **job_info, Buf buffer,
+				  uint16_t protocol_version)
 {
+	block_job_info_t *job;
 	uint32_t uint32_tmp;
-	char *bp_inx_str = NULL;
-	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	char *cnode_inx_str = NULL;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
-		safe_unpackstr_xmalloc(&block_info->bg_block_id,
-				       &uint32_tmp, buffer);
-		if(cluster_flags & CLUSTER_FLAG_BGL)
-			safe_unpackstr_xmalloc(&block_info->blrtsimage,
-					       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer);
-		if (bp_inx_str == NULL) {
-			block_info->bp_inx = bitfmt2int("");
-		} else {
-			block_info->bp_inx = bitfmt2int(bp_inx_str);
-			xfree(bp_inx_str);
-		}
-		safe_unpack16(&block_info->conn_type, buffer);
-		safe_unpackstr_xmalloc(&(block_info->ionodes),
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer);
-		if (bp_inx_str == NULL) {
-			block_info->ionode_inx = bitfmt2int("");
-		} else {
-			block_info->ionode_inx = bitfmt2int(bp_inx_str);
-			xfree(bp_inx_str);
-		}
-		safe_unpack32(&block_info->job_running, buffer);
-		safe_unpackstr_xmalloc(&block_info->linuximage,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&block_info->mloaderimage,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(block_info->nodes), &uint32_tmp,
-				       buffer);
-		safe_unpack32(&block_info->node_cnt, buffer);
-		if(cluster_flags & CLUSTER_FLAG_BGL)
-			safe_unpack16(&block_info->node_use, buffer);
-		safe_unpackstr_xmalloc(&block_info->owner_name,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&block_info->ramdiskimage,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&block_info->reason,
-				       &uint32_tmp, buffer);
-		safe_unpack16(&block_info->state, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		safe_unpackstr_xmalloc(&block_info->bg_block_id,
-				       &uint32_tmp, buffer);
-		if(cluster_flags & CLUSTER_FLAG_BGL)
-			safe_unpackstr_xmalloc(&block_info->blrtsimage,
-					       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer);
-		if (bp_inx_str == NULL) {
-			block_info->bp_inx = bitfmt2int("");
-		} else {
-			block_info->bp_inx = bitfmt2int(bp_inx_str);
-			xfree(bp_inx_str);
-		}
-		safe_unpack16(&block_info->conn_type, buffer);
-		safe_unpackstr_xmalloc(&(block_info->ionodes),
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer);
-		if (bp_inx_str == NULL) {
-			block_info->ionode_inx = bitfmt2int("");
-		} else {
-			block_info->ionode_inx = bitfmt2int(bp_inx_str);
-			xfree(bp_inx_str);
-		}
-		safe_unpack32(&block_info->job_running, buffer);
-		safe_unpackstr_xmalloc(&block_info->linuximage,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&block_info->mloaderimage,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(block_info->nodes), &uint32_tmp,
-				       buffer);
-		safe_unpack32(&block_info->node_cnt, buffer);
-		if(cluster_flags & CLUSTER_FLAG_BGL)
-			safe_unpack16(&block_info->node_use, buffer);
-		safe_unpackstr_xmalloc(&block_info->owner_name,
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&block_info->ramdiskimage,
-				       &uint32_tmp, buffer);
-		safe_unpack16(&block_info->state, buffer);
+	job = xmalloc(sizeof(block_job_info_t));
+	*job_info = job;
+
+	safe_unpackstr_xmalloc(&job->cnodes, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&cnode_inx_str, &uint32_tmp, buffer);
+	if (cnode_inx_str == NULL) {
+		job->cnode_inx = bitfmt2int("");
+	} else {
+		job->cnode_inx = bitfmt2int(cnode_inx_str);
+		xfree(cnode_inx_str);
 	}
+	safe_unpack32(&job->job_id, buffer);
+	safe_unpack32(&job->user_id, buffer);
+	safe_unpackstr_xmalloc(&job->user_name, &uint32_tmp, buffer);
+
 	return SLURM_SUCCESS;
 
-unpack_error:
-	error("_unpack_node_info: error unpacking here");
-	slurm_free_block_info_members(block_info);
-	return SLURM_ERROR;
-}
+unpack_error:
+	slurm_free_block_job_info(job);
+	*job_info = NULL;
+	return SLURM_ERROR;
+}
+
+/* NOTE: There is a matching pack function directly in the select/bluegene
+ * plugin dealing with the bg_record_t structure there.  If anything
+ * changes here please update that as well.
+ */
+static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
+				 uint16_t protocol_version)
+{
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	uint32_t cluster_dims = (uint32_t)slurmdb_setup_cluster_dims();
+	int dim, count = NO_VAL;
+	ListIterator itr;
+	block_job_info_t *job;
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		if (!block_info) {
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(1, buffer);
+			pack16((uint16_t)NO_VAL, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack16((uint16_t)NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack16((uint16_t)NO_VAL, buffer);
+			packnull(buffer);
+			return;
+		}
+
+		packstr(block_info->bg_block_id, buffer);
+		packstr(block_info->blrtsimage, buffer);
+
+		if (block_info->mp_inx) {
+			char *bitfmt = inx2bitfmt(block_info->mp_inx);
+			packstr(bitfmt, buffer);
+			xfree(bitfmt);
+		} else
+			packnull(buffer);
+
+		pack32(cluster_dims, buffer);
+		for (dim = 0; dim < cluster_dims; dim++)
+			pack16(block_info->conn_type[dim], buffer);
+
+		packstr(block_info->ionode_str, buffer);
+
+		if (block_info->ionode_inx) {
+			char *bitfmt =
+				inx2bitfmt(block_info->ionode_inx);
+			packstr(bitfmt, buffer);
+			xfree(bitfmt);
+		} else
+			packnull(buffer);
+
+		if (block_info->job_list)
+			count = list_count(block_info->job_list);
 
-static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
-				 uint16_t protocol_version)
-{
-	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
-		if(!block_info) {
+		pack32(count, buffer);
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(block_info->job_list);
+			while ((job = list_next(itr))) {
+				slurm_pack_block_job_info(job, buffer,
+							  protocol_version);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(block_info->job_running, buffer);
+
+		packstr(block_info->linuximage, buffer);
+		packstr(block_info->mloaderimage, buffer);
+		packstr(block_info->mp_str, buffer);
+		packstr(block_info->mp_used_str, buffer);
+		pack32(block_info->cnode_cnt, buffer);
+		pack16(block_info->node_use, buffer);
+		packstr(block_info->owner_name, buffer);
+		packstr(block_info->ramdiskimage, buffer);
+		packstr(block_info->reason, buffer);
+		pack16(block_info->state, buffer);
+		if (block_info->mp_used_inx) {
+			char *bitfmt = inx2bitfmt(block_info->mp_used_inx);
+			packstr(bitfmt, buffer);
+			xfree(bitfmt);
+		} else
 			packnull(buffer);
-			if(cluster_flags & CLUSTER_FLAG_BGL)
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		if (!block_info) {
+			packnull(buffer);
+			if (cluster_flags & CLUSTER_FLAG_BGL)
 				packnull(buffer);
+			packnull(buffer);
 			pack16((uint16_t)NO_VAL, buffer);
 			packnull(buffer);
 
@@ -6606,16 +7965,16 @@ static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 		if(cluster_flags & CLUSTER_FLAG_BGL)
 			packstr(block_info->blrtsimage, buffer);
 
-		if(block_info->bp_inx) {
-			char *bitfmt = inx2bitfmt(block_info->bp_inx);
+		if(block_info->mp_inx) {
+			char *bitfmt = inx2bitfmt(block_info->mp_inx);
 			packstr(bitfmt, buffer);
 			xfree(bitfmt);
 		} else
 			packnull(buffer);
 
-		pack16(block_info->conn_type, buffer);
+		pack16(block_info->conn_type[0], buffer);
 
-		packstr(block_info->ionodes, buffer);
+		packstr(block_info->ionode_str, buffer);
 
 		if(block_info->ionode_inx) {
 			char *bitfmt =
@@ -6629,8 +7988,8 @@ static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 
 		packstr(block_info->linuximage, buffer);
 		packstr(block_info->mloaderimage, buffer);
-		packstr(block_info->nodes, buffer);
-		pack32(block_info->node_cnt, buffer);
+		packstr(block_info->mp_str, buffer);
+		pack32(block_info->cnode_cnt, buffer);
 		if(cluster_flags & CLUSTER_FLAG_BGL)
 			pack16(block_info->node_use, buffer);
 		packstr(block_info->owner_name, buffer);
@@ -6666,16 +8025,16 @@ static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 		if(cluster_flags & CLUSTER_FLAG_BGL)
 			packstr(block_info->blrtsimage, buffer);
 
-		if(block_info->bp_inx) {
-			char *bitfmt = inx2bitfmt(block_info->bp_inx);
+		if(block_info->mp_inx) {
+			char *bitfmt = inx2bitfmt(block_info->mp_inx);
 			packstr(bitfmt, buffer);
 			xfree(bitfmt);
 		} else
 			packnull(buffer);
 
-		pack16(block_info->conn_type, buffer);
+		pack16(block_info->conn_type[0], buffer);
 
-		packstr(block_info->ionodes, buffer);
+		packstr(block_info->ionode_str, buffer);
 
 		if(block_info->ionode_inx) {
 			char *bitfmt =
@@ -6689,8 +8048,8 @@ static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 
 		packstr(block_info->linuximage, buffer);
 		packstr(block_info->mloaderimage, buffer);
-		packstr(block_info->nodes, buffer);
-		pack32(block_info->node_cnt, buffer);
+		packstr(block_info->mp_str, buffer);
+		pack32(block_info->cnode_cnt, buffer);
 		if(cluster_flags & CLUSTER_FLAG_BGL)
 			pack16(block_info->node_use, buffer);
 		packstr(block_info->owner_name, buffer);
@@ -6699,6 +8058,198 @@ static void _pack_block_info_msg(block_info_t *block_info, Buf buffer,
 	}
 }
 
+extern void slurm_pack_block_job_info(block_job_info_t *block_job_info,
+				     Buf buffer, uint16_t protocol_version)
+{
+	if (!block_job_info) {
+		packnull(buffer);
+		packnull(buffer);
+		pack32(0, buffer);
+		pack32(0, buffer);
+		packnull(buffer);
+		return;
+	}
+
+	packstr(block_job_info->cnodes, buffer);
+	if (block_job_info->cnode_inx) {
+		char *bitfmt = inx2bitfmt(block_job_info->cnode_inx);
+		packstr(bitfmt, buffer);
+		xfree(bitfmt);
+	} else
+		packnull(buffer);
+	pack32(block_job_info->job_id, buffer);
+	pack32(block_job_info->user_id, buffer);
+	packstr(block_job_info->user_name, buffer);
+}
+
+extern int slurm_unpack_block_info_members(block_info_t *block_info, Buf buffer,
+					   uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	char *mp_inx_str = NULL;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	int i;
+	uint32_t count;
+	block_job_info_t *job = NULL;
+
+	memset(block_info, 0, sizeof(block_info_t));
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&block_info->bg_block_id,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->blrtsimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->mp_inx = bitfmt2int("");
+		} else {
+			block_info->mp_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+
+		safe_unpack32(&count, buffer);
+		if (count > HIGHEST_DIMENSIONS) {
+			error("slurm_unpack_block_info_members: count of "
+			      "system is %d but we can only handle %d",
+			      count, HIGHEST_DIMENSIONS);
+			goto unpack_error;
+		}
+		for (i=0; i<count; i++)
+			safe_unpack16(&block_info->conn_type[i], buffer);
+		safe_unpackstr_xmalloc(&(block_info->ionode_str),
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->ionode_inx = bitfmt2int("");
+		} else {
+			block_info->ionode_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			block_info->job_list =
+				list_create(slurm_free_block_job_info);
+			for (i=0; i<count; i++) {
+				if (_unpack_block_job_info(&job, buffer,
+							   protocol_version)
+				    == SLURM_ERROR)
+					goto unpack_error;
+				list_append(block_info->job_list, job);
+			}
+		}
+
+		safe_unpack32(&block_info->job_running, buffer);
+		safe_unpackstr_xmalloc(&block_info->linuximage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->mloaderimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(block_info->mp_str), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(block_info->mp_used_str), &uint32_tmp,
+				       buffer);
+		safe_unpack32(&block_info->cnode_cnt, buffer);
+		safe_unpack16(&block_info->node_use, buffer);
+		safe_unpackstr_xmalloc(&block_info->owner_name,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->ramdiskimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->reason,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&block_info->state, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->mp_used_inx = bitfmt2int("");
+		} else {
+			block_info->mp_used_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&block_info->bg_block_id,
+				       &uint32_tmp, buffer);
+		if (cluster_flags & CLUSTER_FLAG_BGL)
+			safe_unpackstr_xmalloc(&block_info->blrtsimage,
+					       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->mp_inx = bitfmt2int("");
+		} else {
+			block_info->mp_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+		safe_unpack16(&block_info->conn_type[0], buffer);
+		safe_unpackstr_xmalloc(&(block_info->ionode_str),
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->ionode_inx = bitfmt2int("");
+		} else {
+			block_info->ionode_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+		safe_unpack32(&block_info->job_running, buffer);
+		safe_unpackstr_xmalloc(&block_info->linuximage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->mloaderimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(block_info->mp_str), &uint32_tmp,
+				       buffer);
+		safe_unpack32(&block_info->cnode_cnt, buffer);
+		if (cluster_flags & CLUSTER_FLAG_BGL)
+			safe_unpack16(&block_info->node_use, buffer);
+		safe_unpackstr_xmalloc(&block_info->owner_name,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->ramdiskimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->reason,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&block_info->state, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&block_info->bg_block_id,
+				       &uint32_tmp, buffer);
+		if(cluster_flags & CLUSTER_FLAG_BGL)
+			safe_unpackstr_xmalloc(&block_info->blrtsimage,
+					       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->mp_inx = bitfmt2int("");
+		} else {
+			block_info->mp_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+		safe_unpack16(&block_info->conn_type[0], buffer);
+		safe_unpackstr_xmalloc(&(block_info->ionode_str),
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&mp_inx_str, &uint32_tmp, buffer);
+		if (mp_inx_str == NULL) {
+			block_info->ionode_inx = bitfmt2int("");
+		} else {
+			block_info->ionode_inx = bitfmt2int(mp_inx_str);
+			xfree(mp_inx_str);
+		}
+		safe_unpack32(&block_info->job_running, buffer);
+		safe_unpackstr_xmalloc(&block_info->linuximage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->mloaderimage,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(block_info->mp_str), &uint32_tmp,
+				       buffer);
+		safe_unpack32(&block_info->cnode_cnt, buffer);
+		if(cluster_flags & CLUSTER_FLAG_BGL)
+			safe_unpack16(&block_info->node_use, buffer);
+		safe_unpackstr_xmalloc(&block_info->owner_name,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&block_info->ramdiskimage,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&block_info->state, buffer);
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	error("slurm_unpack_block_info_members: error unpacking here");
+	slurm_free_block_info_members(block_info);
+	return SLURM_ERROR;
+}
+
 extern int slurm_unpack_block_info_msg(
 	block_info_msg_t **block_info_msg_pptr, Buf buffer,
 	uint16_t protocol_version)
@@ -6713,7 +8264,7 @@ extern int slurm_unpack_block_info_msg(
 		buf->block_array = xmalloc(sizeof(block_info_t) *
 					   buf->record_count);
 		for(i=0; i<buf->record_count; i++) {
-			if (_unpack_block_info_members(
+			if (slurm_unpack_block_info_members(
 				    &(buf->block_array[i]), buffer,
 				    protocol_version))
 				goto unpack_error;
@@ -6728,13 +8279,14 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern int _unpack_block_info(block_info_t **block_info, Buf buffer,
+static int _unpack_block_info(block_info_t **block_info, Buf buffer,
 			      uint16_t protocol_version)
 {
         int rc = SLURM_SUCCESS;
 	block_info_t *bg_rec = xmalloc(sizeof(block_info_t));
 
-	if((rc = _unpack_block_info_members(bg_rec, buffer, protocol_version))
+	if((rc = slurm_unpack_block_info_members(
+		    bg_rec, buffer, protocol_version))
 	   != SLURM_SUCCESS)
 		xfree(bg_rec);
 	else
@@ -6800,6 +8352,91 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void
+_pack_front_end_info_request_msg(front_end_info_request_msg_t * msg,
+				 Buf buffer, uint16_t protocol_version)
+{
+	pack_time(msg->last_update, buffer);
+}
+
+static int
+_unpack_front_end_info_request_msg(front_end_info_request_msg_t ** msg,
+				   Buf buffer, uint16_t protocol_version)
+{
+	front_end_info_request_msg_t* front_end_info;
+
+	front_end_info = xmalloc(sizeof(front_end_info_request_msg_t));
+	*msg = front_end_info;
+
+	safe_unpack_time(&front_end_info->last_update, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_front_end_info_request_msg(front_end_info);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static int
+_unpack_front_end_info_msg(front_end_info_msg_t ** msg, Buf buffer,
+			   uint16_t protocol_version)
+{
+	int i;
+	front_end_info_t *front_end = NULL;
+
+	xassert(msg != NULL);
+	*msg = xmalloc(sizeof(front_end_info_msg_t));
+
+	/* load buffer's header (data structure version and time) */
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpack32(&((*msg)->record_count), buffer);
+		safe_unpack_time(&((*msg)->last_update), buffer);
+		front_end = xmalloc(sizeof(front_end_info_t) *
+				    (*msg)->record_count);
+		(*msg)->front_end_array = front_end;
+
+		/* load individual front_end info */
+		for (i = 0; i < (*msg)->record_count; i++) {
+			if (_unpack_front_end_info_members(&front_end[i],
+							   buffer,
+							   protocol_version))
+				goto unpack_error;
+		}
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_front_end_info_msg(*msg);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static int
+_unpack_front_end_info_members(front_end_info_t *front_end, Buf buffer,
+			       uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+
+	xassert(front_end != NULL);
+
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpack_time(&front_end->boot_time, buffer);
+		safe_unpackstr_xmalloc(&front_end->name, &uint32_tmp, buffer);
+		safe_unpack16(&front_end->node_state, buffer);
+
+		safe_unpackstr_xmalloc(&front_end->reason, &uint32_tmp, buffer);\
+		safe_unpack_time(&front_end->reason_time, buffer);
+		safe_unpack32(&front_end->reason_uid, buffer);
+
+		safe_unpack_time(&front_end->slurmd_start_time, buffer);
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_front_end_info_members(front_end);
+	return SLURM_ERROR;
+}
+
 static void
 _pack_part_info_request_msg(part_info_request_msg_t * msg, Buf buffer,
 			    uint16_t protocol_version)
@@ -6945,6 +8582,7 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer,
 	pack32(msg->uid, buffer);
 	pack32(msg->gid, buffer);
 	pack32(msg->ntasks, buffer);
+	pack32(msg->pn_min_memory, buffer);
 
 	pack8(msg->open_mode, buffer);
 	pack8(msg->overcommit, buffer);
@@ -7002,6 +8640,7 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer,
 	safe_unpack32(&launch_msg_ptr->uid, buffer);
 	safe_unpack32(&launch_msg_ptr->gid, buffer);
 	safe_unpack32(&launch_msg_ptr->ntasks, buffer);
+	safe_unpack32(&launch_msg_ptr->pn_min_memory, buffer);
 
 	safe_unpack8(&launch_msg_ptr->open_mode, buffer);
 	safe_unpack8(&launch_msg_ptr->overcommit, buffer);
@@ -7671,7 +9310,7 @@ unpack_error:
 static void _pack_kvs_host_rec(struct kvs_hosts *msg_ptr, Buf buffer,
 			       uint16_t protocol_version)
 {
-	pack16(msg_ptr->task_id, buffer);
+	pack32(msg_ptr->task_id, buffer);
 	pack16(msg_ptr->port, buffer);
 	packstr(msg_ptr->hostname, buffer);
 }
@@ -7681,7 +9320,7 @@ static int _unpack_kvs_host_rec(struct kvs_hosts *msg_ptr, Buf buffer,
 {
 	uint32_t uint32_tmp;
 
-	safe_unpack16(&msg_ptr->task_id, buffer);
+	safe_unpack32(&msg_ptr->task_id, buffer);
 	safe_unpack16(&msg_ptr->port, buffer);
 	safe_unpackstr_xmalloc(&msg_ptr->hostname, &uint32_tmp, buffer);
 	return SLURM_SUCCESS;
@@ -7818,8 +9457,8 @@ unpack_error:
 static void _pack_kvs_get(kvs_get_msg_t *msg_ptr, Buf buffer,
 			  uint16_t protocol_version)
 {
-	pack16((uint16_t)msg_ptr->task_id, buffer);
-	pack16((uint16_t)msg_ptr->size, buffer);
+	pack32((uint32_t)msg_ptr->task_id, buffer);
+	pack32((uint32_t)msg_ptr->size, buffer);
 	pack16((uint16_t)msg_ptr->port, buffer);
 	packstr(msg_ptr->hostname, buffer);
 }
@@ -7832,8 +9471,8 @@ static int  _unpack_kvs_get(kvs_get_msg_t **msg_ptr, Buf buffer,
 
 	msg = xmalloc(sizeof(struct kvs_get_msg));
 	*msg_ptr = msg;
-	safe_unpack16(&msg->task_id, buffer);
-	safe_unpack16(&msg->size, buffer);
+	safe_unpack32(&msg->task_id, buffer);
+	safe_unpack32(&msg->size, buffer);
 	safe_unpack16(&msg->port, buffer);
 	safe_unpackstr_xmalloc(&msg->hostname, &uint32_tmp, buffer);
 	return SLURM_SUCCESS;
@@ -7990,6 +9629,33 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void
+_pack_set_debug_flags_msg(set_debug_flags_msg_t * msg, Buf buffer,
+			  uint16_t protocol_version)
+{
+	pack32(msg->debug_flags_minus, buffer);
+	pack32(msg->debug_flags_plus,  buffer);
+}
+
+static int
+_unpack_set_debug_flags_msg(set_debug_flags_msg_t ** msg_ptr, Buf buffer,
+			    uint16_t protocol_version)
+{
+	set_debug_flags_msg_t *msg;
+
+	msg = xmalloc(sizeof(set_debug_flags_msg_t));
+	*msg_ptr = msg;
+
+	safe_unpack32(&msg->debug_flags_minus, buffer);
+	safe_unpack32(&msg->debug_flags_plus,  buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_set_debug_flags_msg(msg);
+	*msg_ptr = NULL;
+	return SLURM_ERROR;
+}
+
 static void
 _pack_set_debug_level_msg(set_debug_level_msg_t * msg, Buf buffer,
 			  uint16_t protocol_version)
@@ -8159,7 +9825,7 @@ static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 			list_append(msg_ptr->update_list, rec);
 		}
 	} else {
-		/* Before 2.2 the only happened in the slurmctld, now
+		/* Before 2.2 this only happened in the slurmctld, now
 		   it can happen else where, so this should work for <
 		   2.2 and above will catch everything from now on.
 		*/
@@ -8230,6 +9896,60 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void _pack_spank_env_request_msg(spank_env_request_msg_t * msg,
+					Buf buffer, uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+
+	pack32(msg->job_id, buffer);
+}
+
+static int _unpack_spank_env_request_msg(spank_env_request_msg_t ** msg_ptr,
+					 Buf buffer, uint16_t protocol_version)
+{
+	spank_env_request_msg_t *msg;
+
+	xassert(msg_ptr != NULL);
+	msg = xmalloc(sizeof(spank_env_request_msg_t));
+	*msg_ptr = msg;
+
+	safe_unpack32(&msg->job_id, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_spank_env_request_msg(msg);
+	*msg_ptr = NULL;
+	return SLURM_ERROR;
+}
+
+static void _pack_spank_env_responce_msg(spank_env_responce_msg_t * msg,
+					 Buf buffer, uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+
+	packstr_array(msg->spank_job_env, msg->spank_job_env_size, buffer);
+}
+
+static int _unpack_spank_env_responce_msg(spank_env_responce_msg_t ** msg_ptr,
+					  Buf buffer, uint16_t protocol_version)
+{
+	spank_env_responce_msg_t *msg;
+
+	xassert(msg_ptr != NULL);
+	msg = xmalloc(sizeof(spank_env_responce_msg_t));
+	*msg_ptr = msg;
+
+	safe_unpackstr_array(&msg->spank_job_env, &msg->spank_job_env_size,
+			     buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_spank_env_responce_msg(msg);
+	*msg_ptr = NULL;
+	return SLURM_ERROR;
+}
+
+
 /* template
    void pack_ ( * msg , Buf buffer )
    {
diff --git a/src/common/slurm_protocol_pack.h b/src/common/slurm_protocol_pack.h
index 9aa576ee6..f45c237bc 100644
--- a/src/common/slurm_protocol_pack.h
+++ b/src/common/slurm_protocol_pack.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,7 @@
 /****************************/
 
 /* pack_header
- * packs a slurm protocol header that proceeds every slurm message
+ * packs a slurm protocol header that precedes every slurm message
  * IN header - the header structure to pack
  * IN/OUT buffer - destination of the pack, contains pointers that are
  *			automatically updated
@@ -69,7 +69,7 @@
 extern void pack_header ( header_t  * header , Buf buffer );
 
 /* unpack_header
- * unpacks a slurm protocol header that proceeds every slurm message
+ * unpacks a slurm protocol header that precedes every slurm message
  * OUT header - the header structure to unpack
  * IN/OUT buffer - source of the unpack data, contains pointers that are
  *			automatically updated
@@ -144,6 +144,10 @@ extern void pack_multi_core_data (multi_core_data_t *multi_core, Buf buffer,
 				  uint16_t protocol_version);
 extern int unpack_multi_core_data (multi_core_data_t **multi_core, Buf buffer,
 				   uint16_t protocol_version);
+extern void slurm_pack_block_job_info(block_job_info_t *block_job_info,
+				      Buf buffer, uint16_t protocol_version);
+extern int slurm_unpack_block_info_members(block_info_t *block_info, Buf buffer,
+					   uint16_t protocol_version);
 extern int slurm_unpack_block_info_msg(
 	block_info_msg_t **block_info_msg_pptr, Buf buffer,
 	uint16_t protocol_version);
diff --git a/src/common/slurm_protocol_socket_common.h b/src/common/slurm_protocol_socket_common.h
index 03815ade4..15435293c 100644
--- a/src/common/slurm_protocol_socket_common.h
+++ b/src/common/slurm_protocol_socket_common.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index af66def65..3a48370f7 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,7 +58,6 @@
 #include <stdlib.h>
 #include <arpa/inet.h>
 #include <sys/param.h>
-#include <slurm/slurm_errno.h>
 #include <stdlib.h>
 
 #if HAVE_SYS_SOCKET_H
@@ -69,6 +68,7 @@
 #  endif
 #endif
 
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_interface.h"
 #include "src/common/slurm_protocol_defs.h"
@@ -88,7 +88,7 @@
  *  Maximum message size. Messages larger than this value (in bytes)
  *  will not be received.
  */
-#define MAX_MSG_SIZE     (16*1024*1024)
+#define MAX_MSG_SIZE     (128*1024*1024)
 
 /****************************************************************
  * MIDDLE LAYER MSG FUNCTIONS
diff --git a/src/common/slurm_protocol_util.c b/src/common/slurm_protocol_util.c
index 7d75224c8..2d0361770 100644
--- a/src/common/slurm_protocol_util.c
+++ b/src/common/slurm_protocol_util.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,6 +48,24 @@
 #include "src/common/xmalloc.h"
 #include "src/slurmdbd/read_config.h"
 
+uint16_t _get_slurm_version(uint32_t rpc_version)
+{
+	uint16_t version;
+
+	if (rpc_version >= 9)
+		version = SLURM_PROTOCOL_VERSION;
+	else if (rpc_version >= 8)
+		version = SLURM_2_2_PROTOCOL_VERSION;
+	else if (rpc_version >= 6)
+		version = SLURM_2_1_PROTOCOL_VERSION;
+	else if (rpc_version >= 5)
+		version = SLURM_2_0_PROTOCOL_VERSION;
+	else
+		version = SLURM_1_3_PROTOCOL_VERSION;
+
+	return version;
+}
+
 /*
  * check_header_version checks to see that the specified header was sent
  * from a node running the same version of the protocol as the current node
@@ -56,13 +74,18 @@
  */
 int check_header_version(header_t * header)
 {
+	uint16_t check_version = SLURM_PROTOCOL_VERSION;
+
+	if (working_cluster_rec)
+		check_version = _get_slurm_version(
+			working_cluster_rec->rpc_version);
+
 	if (slurmdbd_conf) {
-		if (header->version != SLURM_PROTOCOL_VERSION
-		    && header->version != SLURM_2_1_PROTOCOL_VERSION
-		    && header->version != SLURM_2_0_PROTOCOL_VERSION
-		    && header->version != SLURM_1_3_PROTOCOL_VERSION)
+		if ((header->version != SLURM_PROTOCOL_VERSION)     &&
+		    (header->version != SLURM_2_2_PROTOCOL_VERSION) &&
+		    (header->version != SLURM_2_1_PROTOCOL_VERSION))
 			slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR);
-	} else if (header->version != SLURM_PROTOCOL_VERSION) {
+	} else if (header->version != check_version) {
 		/* Starting with 2.2 we will handle previous versions
 		 * of SLURM for some calls */
 		switch(header->msg_type) {
@@ -76,31 +99,52 @@ int check_header_version(header_t * header)
 		case REQUEST_COMPLETE_JOB_ALLOCATION:
 		case REQUEST_CREATE_PARTITION:
 		case REQUEST_CREATE_RESERVATION:
+		case REQUEST_DELETE_PARTITION:
+		case REQUEST_DELETE_RESERVATION:
+		case REQUEST_FRONT_END_INFO:
+		case REQUEST_JOB_ALLOCATION_INFO:
+		case REQUEST_JOB_ALLOCATION_INFO_LITE:
 		case REQUEST_JOB_END_TIME:
 		case REQUEST_JOB_INFO:
 		case REQUEST_JOB_INFO_SINGLE:
+		case REQUEST_JOB_NOTIFY:
 		case REQUEST_JOB_READY:
 		case REQUEST_JOB_REQUEUE:
 		case REQUEST_JOB_STEP_INFO:
 		case REQUEST_JOB_WILL_RUN:
 		case REQUEST_NODE_INFO:
 		case REQUEST_PARTITION_INFO:
+		case REQUEST_PING:
 		case REQUEST_PRIORITY_FACTORS:
 		case REQUEST_RECONFIGURE:
 		case REQUEST_RESERVATION_INFO:
+		case REQUEST_SET_DEBUG_FLAGS:
 		case REQUEST_SET_DEBUG_LEVEL:
+		case REQUEST_SET_SCHEDLOG_LEVEL:
 		case REQUEST_SHARE_INFO:
 		case REQUEST_SHUTDOWN:
 		case REQUEST_SHUTDOWN_IMMEDIATE:
+		case REQUEST_SPANK_ENVIRONMENT:
 		case REQUEST_STEP_COMPLETE:		/* From slurmstepd */
 		case REQUEST_STEP_LAYOUT:
 		case REQUEST_SUBMIT_BATCH_JOB:
 		case REQUEST_SUSPEND:
+		case REQUEST_TERMINATE_JOB:
+		case REQUEST_TERMINATE_TASKS:
 		case REQUEST_TOPO_INFO:
+		case REQUEST_TRIGGER_CLEAR:
+		case REQUEST_TRIGGER_GET:
+		case REQUEST_TRIGGER_PULL:
+		case REQUEST_TRIGGER_SET:
 		case REQUEST_UPDATE_BLOCK:
+		case REQUEST_UPDATE_FRONT_END:
 		case REQUEST_UPDATE_JOB:
+		case REQUEST_UPDATE_JOB_STEP:
+		case REQUEST_UPDATE_NODE:
 		case REQUEST_UPDATE_PARTITION:
-			if (header->version == SLURM_2_1_PROTOCOL_VERSION)
+		case REQUEST_UPDATE_RESERVATION:
+			if ((header->version == SLURM_2_2_PROTOCOL_VERSION)
+			    || (header->version == SLURM_2_1_PROTOCOL_VERSION))
 				break;
 		default:
 			slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR);
@@ -125,18 +169,14 @@ void init_header(header_t *header, slurm_msg_t *msg, uint16_t flags)
 	   protocol version changes. */
 	if (msg->protocol_version != (uint16_t)NO_VAL)
 		header->version = msg->protocol_version;
+	else if (working_cluster_rec)
+		header->version = _get_slurm_version(
+			working_cluster_rec->rpc_version);
 	else if ((msg->msg_type == ACCOUNTING_UPDATE_MSG) ||
 	         (msg->msg_type == ACCOUNTING_FIRST_REG)) {
 		uint32_t rpc_version =
 			((accounting_update_msg_t *)msg->data)->rpc_version;
-		if (rpc_version >= 8)
-			header->version = SLURM_PROTOCOL_VERSION;
-		else if (rpc_version >= 6)
-			header->version = SLURM_2_1_PROTOCOL_VERSION;
-		else if (rpc_version >= 5)
-			header->version = SLURM_2_0_PROTOCOL_VERSION;
-		else
-			header->version = SLURM_1_3_PROTOCOL_VERSION;
+		header->version = _get_slurm_version(rpc_version);
 	} else
 		header->version = SLURM_PROTOCOL_VERSION;
 
diff --git a/src/common/slurm_protocol_util.h b/src/common/slurm_protocol_util.h
index 6027fc00a..b535cc132 100644
--- a/src/common/slurm_protocol_util.h
+++ b/src/common/slurm_protocol_util.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c
index 6029c2b8b..0c74c66ea 100644
--- a/src/common/slurm_resource_info.c
+++ b/src/common/slurm_resource_info.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,8 @@
 
 #include <ctype.h>
 #include <sys/types.h>
-#include <slurm/slurm.h>
+
+#include "slurm/slurm.h"
 
 #include "src/common/log.h"
 #include "src/common/slurm_protocol_api.h"
@@ -72,11 +73,11 @@ static void _clear_then_set(int *data, int clear_mask, int set_mask)
  * returns 1 is the argument appears to be a value, 0 otherwise
  */
 static int _isvalue(char *arg) {
-    	if (isdigit(*arg)) {	 /* decimal values and 0x... hex values */
+    	if (isdigit((int)*arg)) { /* decimal values and 0x... hex values */
 	    	return 1;
 	}
 
-	while (isxdigit(*arg)) { /* hex values not preceded by 0x */
+	while (isxdigit((int)*arg)) { /* hex values not preceded by 0x */
 		arg++;
 	}
 	if (*arg == ',' || *arg == '\0') { /* end of field or string */
@@ -139,7 +140,7 @@ int slurm_get_avail_procs(const uint16_t socket_cnt,
 	uint16_t max_avail_cpus = 0xffff;	/* for alloc_* accounting */
 	uint16_t min_sockets = 1, max_sockets = 0xffff;
 	uint16_t min_cores   = 1, max_cores   = 0xffff;
-	uint16_t min_threads = 1, max_threads = 0xffff;
+	uint16_t                  max_threads = 0xffff;
 	int i;
 
         /* pick defaults for any unspecified items */
@@ -148,7 +149,7 @@ int slurm_get_avail_procs(const uint16_t socket_cnt,
 	if (core_cnt != (uint16_t) NO_VAL)
 		min_cores = max_cores = core_cnt;	
 	if (thread_cnt != (uint16_t) NO_VAL)
-		min_threads = max_threads = thread_cnt;
+		max_threads = thread_cnt;
 	if (cpus_per_task <= 0)
 		cpus_per_task = 1;
 	if (*threads <= 0)
diff --git a/src/common/slurm_resource_info.h b/src/common/slurm_resource_info.h
index b3605396b..d043e5c54 100644
--- a/src/common/slurm_resource_info.h
+++ b/src/common/slurm_resource_info.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_rlimits_info.c b/src/common/slurm_rlimits_info.c
index 98dd2596a..e2b109115 100644
--- a/src/common/slurm_rlimits_info.c
+++ b/src/common/slurm_rlimits_info.c
@@ -6,7 +6,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_rlimits_info.h b/src/common/slurm_rlimits_info.h
index 342706100..b102a0b4c 100644
--- a/src/common/slurm_rlimits_info.h
+++ b/src/common/slurm_rlimits_info.h
@@ -6,7 +6,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_selecttype_info.c b/src/common/slurm_selecttype_info.c
index 42dc747d4..274c9e822 100644
--- a/src/common/slurm_selecttype_info.c
+++ b/src/common/slurm_selecttype_info.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_selecttype_info.h b/src/common/slurm_selecttype_info.h
index 5849a1c03..4cee33cd2 100644
--- a/src/common/slurm_selecttype_info.h
+++ b/src/common/slurm_selecttype_info.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,7 +42,7 @@
 
 #include <stdio.h>
 #include <string.h>
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 int parse_select_type_param(char *select_type_parameters, uint16_t *param);
 
diff --git a/src/common/slurm_step_layout.c b/src/common/slurm_step_layout.c
index a5c3211a5..ffc0e2a9c 100644
--- a/src/common/slurm_step_layout.c
+++ b/src/common/slurm_step_layout.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,12 +39,10 @@
 #  include <string.h>
 #endif                /* HAVE_CONFIG_H */
 
-
-#include <slurm/slurm.h>
-
 #include <stdlib.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_step_layout.h"
 #include "src/common/log.h"
@@ -286,21 +284,38 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout,
 				   Buf buffer, uint16_t protocol_version)
 {
 	uint16_t i = 0;
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		if(step_layout)
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		if (step_layout)
+			i=1;
+
+		pack16(i, buffer);
+		if (!i)
+			return;
+		packstr(step_layout->front_end, buffer);
+		packstr(step_layout->node_list, buffer);
+		pack32(step_layout->node_cnt, buffer);
+		pack32(step_layout->task_cnt, buffer);
+		pack16(step_layout->task_dist, buffer);
+
+		for (i=0; i<step_layout->node_cnt; i++) {
+			pack32_array(step_layout->tids[i],
+				     step_layout->tasks[i],
+				     buffer);
+		}
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		if (step_layout)
 			i=1;
 
 		pack16(i, buffer);
-		if(!i)
+		if (!i)
 			return;
 		packstr(step_layout->node_list, buffer);
 		pack32(step_layout->node_cnt, buffer);
 		pack32(step_layout->task_cnt, buffer);
 		pack16(step_layout->task_dist, buffer);
-/* 	slurm_pack_slurm_addr_array(step_layout->node_addr,  */
-/* 				    step_layout->node_cnt, buffer); */
 
-		for(i=0; i<step_layout->node_cnt; i++) {
+		for (i=0; i<step_layout->node_cnt; i++) {
 			pack32_array(step_layout->tids[i],
 				     step_layout->tasks[i],
 				     buffer);
@@ -316,35 +331,51 @@ extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer,
 	slurm_step_layout_t *step_layout = NULL;
 	int i;
 
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		safe_unpack16(&uint16_tmp, buffer);
-		if(!uint16_tmp)
+		if (!uint16_tmp)
 			return SLURM_SUCCESS;
 
 		step_layout = xmalloc(sizeof(slurm_step_layout_t));
 		*layout = step_layout;
 
-		step_layout->node_list = NULL;
-		step_layout->node_cnt = 0;
-		step_layout->tids = NULL;
-		step_layout->tasks = NULL;
+		safe_unpackstr_xmalloc(&step_layout->front_end,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&step_layout->node_list,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&step_layout->node_cnt, buffer);
 		safe_unpack32(&step_layout->task_cnt, buffer);
 		safe_unpack16(&step_layout->task_dist, buffer);
 
-/* 	if (slurm_unpack_slurm_addr_array(&(step_layout->node_addr),  */
-/* 					  &uint32_tmp, buffer)) */
-/* 		goto unpack_error; */
-/* 	if (uint32_tmp != step_layout->node_cnt) */
-/* 		goto unpack_error; */
+		step_layout->tasks =
+			xmalloc(sizeof(uint32_t) * step_layout->node_cnt);
+		step_layout->tids = xmalloc(sizeof(uint32_t *)
+					    * step_layout->node_cnt);
+		for (i = 0; i < step_layout->node_cnt; i++) {
+			safe_unpack32_array(&(step_layout->tids[i]),
+					    &num_tids,
+					    buffer);
+			step_layout->tasks[i] = num_tids;
+		}
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!uint16_tmp)
+			return SLURM_SUCCESS;
+
+		step_layout = xmalloc(sizeof(slurm_step_layout_t));
+		*layout = step_layout;
+
+		safe_unpackstr_xmalloc(&step_layout->node_list,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&step_layout->node_cnt, buffer);
+		safe_unpack32(&step_layout->task_cnt, buffer);
+		safe_unpack16(&step_layout->task_dist, buffer);
 
 		step_layout->tasks =
 			xmalloc(sizeof(uint32_t) * step_layout->node_cnt);
 		step_layout->tids = xmalloc(sizeof(uint32_t *)
 					    * step_layout->node_cnt);
-		for(i = 0; i < step_layout->node_cnt; i++) {
+		for (i = 0; i < step_layout->node_cnt; i++) {
 			safe_unpack32_array(&(step_layout->tids[i]),
 					    &num_tids,
 					    buffer);
@@ -363,9 +394,9 @@ unpack_error:
 extern int slurm_step_layout_destroy(slurm_step_layout_t *step_layout)
 {
 	int i=0;
-	if(step_layout) {
+	if (step_layout) {
+		xfree(step_layout->front_end);
 		xfree(step_layout->node_list);
-/* 		xfree(step_layout->node_addr); */
 		xfree(step_layout->tasks);
 		for (i = 0; i < step_layout->node_cnt; i++) {
 			xfree(step_layout->tids[i]);
@@ -419,7 +450,7 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 	if (step_layout->tasks)	/* layout already completed */
 		return SLURM_SUCCESS;
 
-	if((int)cpus_per_task < 1 || cpus_per_task == (uint16_t)NO_VAL)
+	if ((int)cpus_per_task < 1 || cpus_per_task == (uint16_t)NO_VAL)
 		cpus_per_task = 1;
 
 	step_layout->plane_size = plane_size;
@@ -428,26 +459,26 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 				     * step_layout->node_cnt);
 	step_layout->tids  = xmalloc(sizeof(uint32_t *)
 				     * step_layout->node_cnt);
-	if(!(cluster_flags & CLUSTER_FLAG_BG)) {
+	if (!(cluster_flags & CLUSTER_FLAG_BG)) {
 		hostlist_t hl = hostlist_create(step_layout->node_list);
 		/* make sure the number of nodes we think we have
 		 * is the correct number */
 		i = hostlist_count(hl);
-		if(step_layout->node_cnt > i)
+		if (step_layout->node_cnt > i)
 			step_layout->node_cnt = i;
 		hostlist_destroy(hl);
 	}
-	debug("laying out the %u tasks on %u hosts %s",
+	debug("laying out the %u tasks on %u hosts %s dist %u",
 	      step_layout->task_cnt, step_layout->node_cnt,
-	      step_layout->node_list);
-	if(step_layout->node_cnt < 1) {
+	      step_layout->node_list, task_dist);
+	if (step_layout->node_cnt < 1) {
 		error("no hostlist given can't layout tasks");
 		return SLURM_ERROR;
 	}
 
 	for (i=0; i<step_layout->node_cnt; i++) {
 /* 		name = hostlist_shift(hl); */
-/* 		if(!name) { */
+/* 		if (!name) { */
 /* 			error("hostlist incomplete for this job request"); */
 /* 			hostlist_destroy(hl); */
 /* 			return SLURM_ERROR; */
diff --git a/src/common/slurm_step_layout.h b/src/common/slurm_step_layout.h
index e08c1bd1b..5ad537663 100644
--- a/src/common/slurm_step_layout.h
+++ b/src/common/slurm_step_layout.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_strcasestr.c b/src/common/slurm_strcasestr.c
index 14794a1ad..a44679a11 100644
--- a/src/common/slurm_strcasestr.c
+++ b/src/common/slurm_strcasestr.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_strcasestr.h b/src/common/slurm_strcasestr.h
index 79e730909..6a8e50098 100644
--- a/src/common/slurm_strcasestr.h
+++ b/src/common/slurm_strcasestr.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/slurm_topology.c b/src/common/slurm_topology.c
index a3a54ec49..a6ea794f6 100644
--- a/src/common/slurm_topology.c
+++ b/src/common/slurm_topology.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,6 +55,7 @@ int switch_record_cnt = 0;
 /* ************************************************************************ */
 typedef struct slurm_topo_ops {
 	int		(*build_config)		( void );
+	bool		(*node_ranking)		( void );
 	int		(*get_node_addr)	( char* node_name,
 						  char** addr,
 						  char** pattern );
@@ -87,6 +88,7 @@ slurm_topo_get_ops( slurm_topo_context_t *c )
 	 */
 	static const char *syms[] = {
 		"topo_build_config",
+		"topo_generate_node_ranking",
 		"topo_get_node_addr",
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
@@ -268,6 +270,19 @@ slurm_topo_build_config( void )
 	return rc;
 }
 
+/* *********************************************************************** */
+/*  TAG(                      slurm_topo_generate_node_ranking          )  */
+/* NOTE: This operation is only supported by those topology plugins for    */
+/*       which the node ordering between slurmd and slurmctld is invariant */
+/* *********************************************************************** */
+extern bool
+slurm_topo_generate_node_ranking( void )
+{
+	if ( slurm_topo_init() < 0 )
+		return SLURM_ERROR;
+
+	return (*(g_topo_context->ops.node_ranking))();
+}
 
 /* *********************************************************************** */
 /*  TAG(                      slurm_topo_get_node_addr                  )  */
diff --git a/src/common/slurm_topology.h b/src/common/slurm_topology.h
index 7b144c8f5..ecde1e772 100644
--- a/src/common/slurm_topology.h
+++ b/src/common/slurm_topology.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,8 +39,8 @@
 #ifndef __SLURM_CONTROLLER_TOPO_PLUGIN_API_H__
 #define __SLURM_CONTROLLER_TOPO_PLUGIN_API_H__
 
-#include <slurm/slurm.h>
-#include <src/slurmctld/slurmctld.h>
+#include "slurm/slurm.h"
+#include "src/slurmctld/slurmctld.h"
 
 /*****************************************************************************\
  *  SWITCH topology data structures
@@ -89,6 +89,13 @@ extern int slurm_topo_fini(void);
  */
 extern int slurm_topo_build_config( void );
 
+/*
+ * slurm_topo_generate_node_ranking  -  populate node_rank fields
+ * NOTE: This operation is only supported by those topology plugins for
+ *       which the node ordering between slurmd and slurmctld is invariant.
+ */
+extern bool slurm_topo_generate_node_ranking( void );
+
 /*
  * slurm_topo_get_node_addr - build node address and the associated pattern
  *      based on the topology information
diff --git a/src/common/slurm_xlator.h b/src/common/slurm_xlator.h
index a43b3b946..55e0df9bd 100644
--- a/src/common/slurm_xlator.h
+++ b/src/common/slurm_xlator.h
@@ -34,7 +34,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -234,6 +234,10 @@
 #define	xfer_buf_data		slurm_xfer_buf_data
 #define	pack_time		slurm_pack_time
 #define	unpack_time		slurm_unpack_time
+#define	packdouble		slurm_packdouble
+#define	unpackdouble		slurm_unpackdouble
+#define	pack64			slurm_pack64
+#define	unpack64		slurm_unpack64
 #define	pack32			slurm_pack32
 #define	unpack32		slurm_unpack32
 #define	pack16			slurm_pack16
@@ -299,6 +303,7 @@
 #define	_xstrfmtcat		slurm_xstrfmtcat
 #define	_xmemcat		slurm_xmemcat
 #define	xstrdup			slurm_xstrdup
+#define	xstrdup_printf		slurm_xstrdup_printf
 #define	xbasename		slurm_xbasename
 
 /* slurm_protocol_defs.[ch] functions */
@@ -342,6 +347,16 @@
 #define jobacct_common_alloc_jobacct slurm_jobacct_common_alloc_jobacct
 #define jobacct_common_free_jobacct slurm_jobacct_common_free_jobacct
 
+/* node_select.[ch] functions */
+#define destroy_select_ba_request	slurm_destroy_select_ba_request
+
+/* parse_config.[ch] functions */
+#define s_p_get_string			slurm_s_p_get_string
+#define s_p_get_uint32			slurm_s_p_get_uint32
+#define s_p_hashtbl_create		slurm_s_p_hashtbl_create
+#define s_p_hashtbl_destroy		slurm_s_p_hashtbl_destroy
+#define s_p_parse_file			slurm_s_p_parse_file
+
 #endif /* USE_ALIAS */
 
 /* Include the function definitions after redefining their names. */
@@ -352,11 +367,14 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/node_select.h"
 #include "src/common/pack.h"
+#include "src/common/parse_config.h"
 #include "src/common/env.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/strlcpy.h"
 #include "src/common/switch.h"
+#include "src/common/working_cluster.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
diff --git a/src/common/slurmdb_defs.c b/src/common/slurmdb_defs.c
index 28c423823..89a0d990e 100644
--- a/src/common/slurmdb_defs.c
+++ b/src/common/slurmdb_defs.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -461,6 +461,7 @@ extern void slurmdb_destroy_job_rec(void *object)
 		xfree(job->account);
 		xfree(job->blockid);
 		xfree(job->cluster);
+		xfree(job->derived_es);
 		xfree(job->jobname);
 		xfree(job->partition);
 		xfree(job->nodes);
@@ -945,7 +946,6 @@ extern List slurmdb_get_info_cluster(char *cluster_names)
 	char *cluster_name = NULL;
 	void *db_conn = NULL;
 	ListIterator itr, itr2;
-	int err = 0;
 	bool all_clusters = 0;
 
 	if (cluster_names && !strcmp(cluster_names, "all"))
@@ -970,7 +970,6 @@ extern List slurmdb_get_info_cluster(char *cluster_names)
 	if (!cluster_names || all_clusters) {
 		while ((cluster_rec = list_next(itr))) {
 			if (_setup_cluster_rec(cluster_rec) != SLURM_SUCCESS) {
-				err = 1;
 				list_delete_item(itr);
 			}
 		}
@@ -986,12 +985,10 @@ extern List slurmdb_get_info_cluster(char *cluster_names)
 			if (!cluster_rec) {
 				error("No cluster '%s' known by database.",
 				      cluster_name);
-				err = 1;
 				goto next;
 			}
 
 			if (_setup_cluster_rec(cluster_rec) != SLURM_SUCCESS) {
-				err = 1;
 				list_delete_item(itr);
 			}
 		next:
@@ -1058,7 +1055,7 @@ extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc,
 extern void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t *cluster,
 				     bool free_it)
 {
-	if(!cluster)
+	if (!cluster)
 		return;
 
 	if (free_it)
@@ -1069,7 +1066,7 @@ extern void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t *cluster,
 
 extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it)
 {
-	if(!qos)
+	if (!qos)
 		return;
 
 	if (free_it)
@@ -1078,6 +1075,7 @@ extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it)
 
 	qos->flags = QOS_FLAG_NOTSET;
 
+	qos->grace_time = NO_VAL;
 	qos->preempt_mode = (uint16_t)NO_VAL;
 	qos->priority = NO_VAL;
 
@@ -1092,8 +1090,10 @@ extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it)
 	qos->max_cpu_mins_pj = (uint64_t)NO_VAL;
 	qos->max_cpu_run_mins_pu = (uint64_t)NO_VAL;
 	qos->max_cpus_pj = NO_VAL;
+	qos->max_cpus_pu = NO_VAL;
 	qos->max_jobs_pu = NO_VAL;
 	qos->max_nodes_pj = NO_VAL;
+	qos->max_nodes_pu = NO_VAL;
 	qos->max_submit_jobs_pu = NO_VAL;
 	qos->max_wall_pj = NO_VAL;
 
@@ -1103,7 +1103,7 @@ extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it)
 
 extern void slurmdb_init_wckey_rec(slurmdb_wckey_rec_t *wckey, bool free_it)
 {
-	if(!wckey)
+	if (!wckey)
 		return;
 
 	if (free_it)
@@ -1792,20 +1792,20 @@ extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
 		debug2("  Qos              : %s", "Normal");
 	}
 
-	if(assoc_ptr->parent_acct)
+	if (assoc_ptr->parent_acct)
 		debug2("  ParentAccount    : %s", assoc_ptr->parent_acct);
-	if(assoc_ptr->partition)
+	if (assoc_ptr->partition)
 		debug2("  Partition        : %s", assoc_ptr->partition);
-	if(assoc_ptr->user)
+	if (assoc_ptr->user)
 		debug2("  User             : %s(%u)",
 		       assoc_ptr->user, assoc_ptr->uid);
 
-	if(assoc_ptr->usage) {
-		if(assoc_ptr->usage->shares_norm != (double)NO_VAL)
+	if (assoc_ptr->usage) {
+		if (!fuzzy_equal(assoc_ptr->usage->shares_norm, NO_VAL))
 			debug2("  NormalizedShares : %f",
 			       assoc_ptr->usage->shares_norm);
 
-		if(assoc_ptr->usage->level_shares != NO_VAL)
+		if (assoc_ptr->usage->level_shares != NO_VAL)
 			debug2("  LevelShares      : %u",
 			       assoc_ptr->usage->level_shares);
 
diff --git a/src/common/slurmdb_defs.h b/src/common/slurmdb_defs.h
index 96828ed80..244929b3a 100644
--- a/src/common/slurmdb_defs.h
+++ b/src/common/slurmdb_defs.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,7 @@
 #ifndef _SLURMDB_DEFS_H
 #define _SLURMDB_DEFS_H
 
-#include <slurm/slurmdb.h>
+#include "slurm/slurmdb.h"
 
 /* Defined purge macros */
 #define SLURMDB_PURGE_GET_UNITS(_X) \
diff --git a/src/common/slurmdb_pack.c b/src/common/slurmdb_pack.c
index c34c4cc37..e714aa74e 100644
--- a/src/common/slurmdb_pack.c
+++ b/src/common/slurmdb_pack.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -371,7 +371,24 @@ extern void slurmdb_pack_used_limits(void *in, uint16_t rpc_version, Buf buffer)
 {
 	slurmdb_used_limits_t *object = (slurmdb_used_limits_t *)in;
 
-	if(rpc_version >= 8) {
+	if(rpc_version >= 9) {
+		if(!object) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+
+		pack64(object->cpu_run_mins, buffer);
+		pack32(object->cpus, buffer);
+		pack32(object->jobs, buffer);
+		pack32(object->nodes, buffer);
+		pack32(object->submit_jobs, buffer);
+		pack32(object->uid, buffer);
+	} else if (rpc_version >= 8) {
 		if(!object) {
 			pack64(0, buffer);
 			pack32(0, buffer);
@@ -406,7 +423,14 @@ extern int slurmdb_unpack_used_limits(void **object,
 
 	*object = (void *)object_ptr;
 
-	if(rpc_version >= 8) {
+	if (rpc_version >= 9) {
+		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
+		safe_unpack32(&object_ptr->cpus, buffer);
+		safe_unpack32(&object_ptr->jobs, buffer);
+		safe_unpack32(&object_ptr->nodes, buffer);
+		safe_unpack32(&object_ptr->submit_jobs, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+	} else if (rpc_version >= 8) {
 		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
 		safe_unpack32(&object_ptr->jobs, buffer);
 		safe_unpack32(&object_ptr->submit_jobs, buffer);
@@ -743,9 +767,10 @@ extern int slurmdb_unpack_cluster_rec(void **object, uint16_t rpc_version,
 			object_ptr->accounting_list = list_create(
 				slurmdb_destroy_cluster_accounting_rec);
 			for(i=0; i<count; i++) {
-				slurmdb_unpack_cluster_accounting_rec(
-					(void *)&slurmdb_info,
-					rpc_version, buffer);
+				if (slurmdb_unpack_cluster_accounting_rec(
+					    (void *)&slurmdb_info,
+					    rpc_version, buffer) == SLURM_ERROR)
+					goto unpack_error;
 				list_append(object_ptr->accounting_list,
 					    slurmdb_info);
 			}
@@ -777,9 +802,10 @@ extern int slurmdb_unpack_cluster_rec(void **object, uint16_t rpc_version,
 			object_ptr->accounting_list = list_create(
 				slurmdb_destroy_cluster_accounting_rec);
 			for(i=0; i<count; i++) {
-				slurmdb_unpack_cluster_accounting_rec(
-					(void *)&slurmdb_info,
-					rpc_version, buffer);
+				if (slurmdb_unpack_cluster_accounting_rec(
+					    (void *)&slurmdb_info,
+					    rpc_version, buffer) == SLURM_ERROR)
+					goto unpack_error;
 				list_append(object_ptr->accounting_list,
 					    slurmdb_info);
 			}
@@ -1288,14 +1314,14 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
 
-	if(rpc_version >= 8) {
-		if(!object) {
+	if (rpc_version >= 9) {
+		if (!object) {
 			packnull(buffer);
 			pack32(0, buffer);
-			packnull(buffer);
 
 			pack32(QOS_FLAG_NOTSET, buffer);
 
+			pack32(NO_VAL, buffer);
 			pack64(NO_VAL, buffer);
 			pack64(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -1311,6 +1337,8 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
 
 			packnull(buffer);
 
@@ -1329,6 +1357,7 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		pack32(object->flags, buffer);
 
+		pack32(object->grace_time, buffer);
 		pack64(object->grp_cpu_mins, buffer);
 		pack64(object->grp_cpu_run_mins, buffer);
 		pack32(object->grp_cpus, buffer);
@@ -1340,8 +1369,10 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack64(object->max_cpu_mins_pj, buffer);
 		pack64(object->max_cpu_run_mins_pu, buffer);
 		pack32(object->max_cpus_pj, buffer);
+		pack32(object->max_cpus_pu, buffer);
 		pack32(object->max_jobs_pu, buffer);
 		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_nodes_pu, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
 		pack32(object->max_wall_pj, buffer);
 
@@ -1349,14 +1380,14 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		pack_bit_str(object->preempt_bitstr, buffer);
 
-		if(object->preempt_list)
+		if (object->preempt_list)
 			count = list_count(object->preempt_list);
 
 		pack32(count, buffer);
 
-		if(count && count != NO_VAL) {
+		if (count && count != NO_VAL) {
 			itr = list_iterator_create(object->preempt_list);
-			while((tmp_info = list_next(itr))) {
+			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
@@ -1368,12 +1399,90 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		packdouble(object->usage_factor, buffer);
 		packdouble(object->usage_thres, buffer);
-	} else if(rpc_version >= 6) {
-		if(!object) {
+	} else if (rpc_version >= 8) {
+		if (!object) {
 			packnull(buffer);
 			pack32(0, buffer);
+
+			pack32(QOS_FLAG_NOTSET, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
 			packnull(buffer);
 
+			pack_bit_str(NULL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack32(0, buffer);
+
+			packdouble((double)NO_VAL, buffer);
+			packdouble((double)NO_VAL, buffer);
+			return;
+		}
+		packstr(object->description, buffer);
+		pack32(object->id, buffer);
+
+		pack32(object->flags, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack64(object->grp_cpu_run_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack64(object->max_cpu_mins_pj, buffer);
+		pack64(object->max_cpu_run_mins_pu, buffer);
+		pack32(object->max_cpus_pj, buffer);
+		pack32(object->max_jobs_pu, buffer);
+		pack32(object->max_nodes_pj, buffer);
+		pack32(object->max_submit_jobs_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
+
+		packstr(object->name, buffer);
+
+		pack_bit_str(object->preempt_bitstr, buffer);
+
+		if (object->preempt_list)
+			count = list_count(object->preempt_list);
+
+		pack32(count, buffer);
+
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(object->preempt_list);
+			while ((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->preempt_mode, buffer);
+		pack32(object->priority, buffer);
+
+		packdouble(object->usage_factor, buffer);
+		packdouble(object->usage_thres, buffer);
+	} else if (rpc_version >= 6) {
+		if (!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+
 			pack64(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -1452,13 +1561,14 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 
 	slurmdb_init_qos_rec(object_ptr, 0);
 
-	if(rpc_version >= 8) {
+	if (rpc_version >= 9) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 
 		safe_unpack32(&object_ptr->flags, buffer);
 
+		safe_unpack32(&object_ptr->grace_time, buffer);
 		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
 		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
 		safe_unpack32(&object_ptr->grp_cpus, buffer);
@@ -1470,8 +1580,10 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
 		safe_unpack64(&object_ptr->max_cpu_run_mins_pu, buffer);
 		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
 		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
 		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
@@ -1480,10 +1592,10 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 		unpack_bit_str(&object_ptr->preempt_bitstr, buffer);
 
 		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
+		if (count != NO_VAL) {
 			object_ptr->preempt_list =
 				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
+			for (i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
 				list_append(object_ptr->preempt_list,
@@ -1496,7 +1608,51 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 
 		safe_unpackdouble(&object_ptr->usage_factor, buffer);
 		safe_unpackdouble(&object_ptr->usage_thres, buffer);
-	} else if(rpc_version >= 6) {
+	} else if (rpc_version >= 8) {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+
+		safe_unpack32(&object_ptr->flags, buffer);
+
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
+		safe_unpack64(&object_ptr->max_cpu_run_mins_pu, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		unpack_bit_str(&object_ptr->preempt_bitstr, buffer);
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->preempt_list =
+				list_create(slurm_destroy_char);
+			for (i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preempt_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->preempt_mode, buffer);
+		safe_unpack32(&object_ptr->priority, buffer);
+
+		safe_unpackdouble(&object_ptr->usage_factor, buffer);
+		safe_unpackdouble(&object_ptr->usage_thres, buffer);
+	} else if (rpc_version >= 6) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
@@ -4327,8 +4483,10 @@ extern int slurmdb_unpack_job_cond(void **object, uint16_t rpc_version,
 			object_ptr->step_list =
 				list_create(slurmdb_destroy_selected_step);
 			for(i=0; i<count; i++) {
-				slurmdb_unpack_selected_step(
-					&job, rpc_version, buffer);
+				if (slurmdb_unpack_selected_step(
+					    &job, rpc_version, buffer)
+				    == SLURM_ERROR)
+					goto unpack_error;
 				list_append(object_ptr->step_list, job);
 			}
 		}
@@ -4592,13 +4750,14 @@ extern int slurmdb_unpack_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&count, buffer);
 		job_ptr->steps = list_create(slurmdb_destroy_step_rec);
 		for(i=0; i<count; i++) {
-			slurmdb_unpack_step_rec(&step, rpc_version, buffer);
-			if(step) {
-				step->job_ptr = job_ptr;
-				if(!job_ptr->first_step_ptr)
-					job_ptr->first_step_ptr = step;
-				list_append(job_ptr->steps, step);
-			}
+			if (slurmdb_unpack_step_rec(&step, rpc_version, buffer)
+			    == SLURM_ERROR)
+				goto unpack_error;
+
+			step->job_ptr = job_ptr;
+			if(!job_ptr->first_step_ptr)
+				job_ptr->first_step_ptr = step;
+			list_append(job_ptr->steps, step);
 		}
 
 		safe_unpack_time(&job_ptr->submit, buffer);
@@ -4650,13 +4809,14 @@ extern int slurmdb_unpack_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 
 		job_ptr->steps = list_create(slurmdb_destroy_step_rec);
 		for(i=0; i<count; i++) {
-			slurmdb_unpack_step_rec(&step, rpc_version, buffer);
-			if(step) {
-				step->job_ptr = job_ptr;
-				if(!job_ptr->first_step_ptr)
-					job_ptr->first_step_ptr = step;
-				list_append(job_ptr->steps, step);
-			}
+			if (slurmdb_unpack_step_rec(&step, rpc_version, buffer)
+			    == SLURM_ERROR)
+				goto unpack_error;
+
+			step->job_ptr = job_ptr;
+			if(!job_ptr->first_step_ptr)
+				job_ptr->first_step_ptr = step;
+			list_append(job_ptr->steps, step);
 		}
 
 		safe_unpack_time(&job_ptr->submit, buffer);
diff --git a/src/common/slurmdb_pack.h b/src/common/slurmdb_pack.h
index 7b4e6d1f6..a971e1397 100644
--- a/src/common/slurmdb_pack.h
+++ b/src/common/slurmdb_pack.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,7 @@
 #ifndef _SLURMDB_PACK_H
 #define _SLURMDB_PACK_H
 
-#include <slurm/slurmdb.h>
+#include "slurm/slurmdb.h"
 #include "slurmdb_defs.h"
 #include "pack.h"
 #include "xmalloc.h"
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index d6514fd08..458c52e71 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -107,6 +107,8 @@ static bool      rollback_started    = 0;
 static bool      halt_agent          = 0;
 static slurm_trigger_callbacks_t callback;
 static bool      callbacks_requested = 0;
+static bool      from_ctld           = 0;
+static bool      need_to_register    = 0;
 
 static void * _agent(void *x);
 static void   _close_slurmdbd_fd(void);
@@ -236,13 +238,13 @@ extern int slurm_send_slurmdbd_recv_rc_msg(uint16_t rpc_version,
 	} else {	/* resp->msg_type == DBD_RC */
 		dbd_rc_msg_t *msg = resp->data;
 		*resp_code = msg->return_code;
-		if(msg->return_code != SLURM_SUCCESS
-		   && msg->return_code != ACCOUNTING_FIRST_REG) {
+		if (msg->return_code != SLURM_SUCCESS
+		    && msg->return_code != ACCOUNTING_FIRST_REG) {
 			char *comment = msg->comment;
-			if(!comment)
+			if (!comment)
 				comment = slurm_strerror(msg->return_code);
-			if(msg->sent_type == DBD_REGISTER_CTLD &&
-			   slurm_get_accounting_storage_enforce()) {
+			if (msg->sent_type == DBD_REGISTER_CTLD &&
+			    slurm_get_accounting_storage_enforce()) {
 				error("slurmdbd: Issue with call "
 				      "%s(%u): %u(%s)",
 				      slurmdbd_msg_type_2_str(
@@ -260,7 +262,8 @@ extern int slurm_send_slurmdbd_recv_rc_msg(uint16_t rpc_version,
 					      msg->sent_type, 1),
 				      msg->sent_type, msg->return_code,
 				      comment);
-		}
+		} else if (msg->sent_type == DBD_REGISTER_CTLD)
+			need_to_register = 0;
 		slurmdbd_free_rc_msg(msg);
 	}
 	xfree(resp);
@@ -434,10 +437,15 @@ again:
 			int rc;
 			fd_set_nonblocking(slurmdbd_fd);
 			rc = _send_init_msg();
-			if ((rc == SLURM_SUCCESS) && callbacks_requested) {
-				(callback.dbd_resumed)();
-				(callback.db_resumed)();
+			if (rc == SLURM_SUCCESS) {
+				if (from_ctld)
+					need_to_register = 1;
+				if (callbacks_requested) {
+					(callback.dbd_resumed)();
+					(callback.db_resumed)();
+				}
 			}
+
 			if ((!need_db && (rc == ESLURM_DB_CONNECTION)) ||
 			    (rc == SLURM_SUCCESS)) {
 				debug("slurmdbd: Sent DbdInit msg");
@@ -539,11 +547,6 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 			(dbd_usage_msg_t *)req->data, rpc_version,
 			req->msg_type, buffer);
 		break;
-	case DBD_GET_JOBS:
-		slurmdbd_pack_get_jobs_msg((dbd_get_jobs_msg_t *)req->data,
-					   rpc_version,
-					   buffer);
-		break;
 	case DBD_INIT:
 		slurmdbd_pack_init_msg((dbd_init_msg_t *)req->data, rpc_version,
 				       buffer, slurmdbd_auth_info);
@@ -598,6 +601,8 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 					     buffer);
 		break;
 	case DBD_REGISTER_CTLD:
+		from_ctld = 1;
+		need_to_register = 0;
 		slurmdbd_pack_register_ctld_msg(
 			(dbd_register_ctld_msg_t *)req->data, rpc_version,
 			buffer);
@@ -617,6 +622,8 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 	case DBD_GET_CONFIG:
 		/* No message to pack */
 		break;
+	case DBD_GET_JOBS:
+		/* Defunct RPC */
 	default:
 		error("slurmdbd: Invalid message type pack %u(%s:%u)",
 		      req->msg_type,
@@ -711,11 +718,6 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp,
 			(dbd_usage_msg_t **)&resp->data, rpc_version,
 			resp->msg_type, buffer);
 		break;
-	case DBD_GET_JOBS:
-		rc = slurmdbd_unpack_get_jobs_msg(
-			(dbd_get_jobs_msg_t **)&resp->data,
-			rpc_version, buffer);
-		break;
 	case DBD_INIT:
 		rc = slurmdbd_unpack_init_msg((dbd_init_msg_t **)&resp->data,
 					      buffer,
@@ -796,6 +798,8 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp,
 	case DBD_GET_CONFIG:
 		/* No message to unpack */
 		break;
+	case DBD_GET_JOBS:
+		/* Defunct RPC */
 	default:
 		error("slurmdbd: Invalid message type unpack %u(%s)",
 		      resp->msg_type,
@@ -1585,8 +1589,8 @@ static int _unpack_return_code(uint16_t rpc_version, Buf buffer)
 		    == SLURM_SUCCESS) {
 			rc = msg->return_code;
 			if (rc != SLURM_SUCCESS) {
-				if(msg->sent_type == DBD_REGISTER_CTLD &&
-				   slurm_get_accounting_storage_enforce()) {
+				if (msg->sent_type == DBD_REGISTER_CTLD &&
+				    slurm_get_accounting_storage_enforce()) {
 					error("slurmdbd: DBD_RC is %d from "
 					      "%s(%u): %s",
 					      rc,
@@ -1607,7 +1611,8 @@ static int _unpack_return_code(uint16_t rpc_version, Buf buffer)
 					      msg->sent_type,
 					      msg->comment);
 
-			}
+			} else if (msg->sent_type == DBD_REGISTER_CTLD)
+				need_to_register = 0;
 			slurmdbd_free_rc_msg(msg);
 		} else
 			error("slurmdbd: unpack message error");
@@ -1661,13 +1666,19 @@ static int _handle_mult_rc_ret(uint16_t rpc_version, int read_timeout)
 		if (agent_list) {
 			ListIterator itr =
 				list_iterator_create(list_msg->my_list);
-			while((out_buf = list_next(itr))) {
-				if((rc = _unpack_return_code(
+			while ((out_buf = list_next(itr))) {
+				Buf b;
+				if ((rc = _unpack_return_code(
 					    rpc_version, out_buf))
 				    != SLURM_SUCCESS)
 					break;
 
-				free_buf(list_dequeue(agent_list));
+				if ((b = list_dequeue(agent_list))) {
+					free_buf(b);
+				} else {
+					error("slurmdbd: DBD_GOT_MULT_MSG "
+					      "unpack message error");
+				}
 			}
 			list_iterator_destroy(itr);
 		}
@@ -1679,8 +1690,8 @@ static int _handle_mult_rc_ret(uint16_t rpc_version, int read_timeout)
 		    == SLURM_SUCCESS) {
 			rc = msg->return_code;
 			if (rc != SLURM_SUCCESS) {
-				if(msg->sent_type == DBD_REGISTER_CTLD &&
-				   slurm_get_accounting_storage_enforce()) {
+				if (msg->sent_type == DBD_REGISTER_CTLD &&
+				    slurm_get_accounting_storage_enforce()) {
 					error("slurmdbd: DBD_RC is %d from "
 					      "%s(%u): %s",
 					      rc,
@@ -1700,7 +1711,9 @@ static int _handle_mult_rc_ret(uint16_t rpc_version, int read_timeout)
 						      msg->sent_type, 1),
 					      msg->sent_type,
 					      msg->comment);
-			}
+			} else if (msg->sent_type == DBD_REGISTER_CTLD)
+				need_to_register = 0;
+
 			slurmdbd_free_rc_msg(msg);
 		} else
 			error("slurmdbd: unpack message error");
@@ -1869,6 +1882,8 @@ static int _fd_writeable(slurm_fd_t fd)
 		}
 		if (ufds.revents & POLLERR) {
 			error("SlurmDBD connection experienced an error: %m");
+			if (callbacks_requested)
+				(callback.dbd_fail)();
 			return 0;
 		}
 		if ((ufds.revents & POLLOUT) == 0) {
@@ -2080,6 +2095,15 @@ static void *_agent(void *x)
 		slurm_mutex_unlock(&agent_lock);
 		/* END_TIMER; */
 		/* info("at the end with %s", TIME_STR); */
+		if (need_to_register) {
+			need_to_register = 0;
+			/* This is going to be always using the
+			   SlurmDBD plugin so sending NULL as the
+			   connection should be ok.
+			*/
+			clusteracct_storage_g_register_ctld(
+				NULL, slurmctld_conf.slurmctld_port);
+		}
 	}
 
 	slurm_mutex_lock(&agent_lock);
@@ -2382,7 +2406,7 @@ static int _purge_job_start_req(void)
 /****************************************************************************\
  * Free data structures
 \****************************************************************************/
-inline void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
+extern void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
 {
 	if(msg) {
 		if(msg->acct_list) {
@@ -2394,7 +2418,7 @@ inline void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg)
+extern void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->cluster_nodes);
@@ -2402,7 +2426,7 @@ inline void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg,
+extern void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg,
 				  slurmdbd_msg_type_t type)
 {
 	void (*my_destroy) (void *object);
@@ -2424,7 +2448,7 @@ inline void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg,
 	}
 }
 
-inline void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
+extern void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
 				   slurmdbd_msg_type_t type)
 {
 	void (*my_destroy) (void *object);
@@ -2481,20 +2505,7 @@ inline void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
 	}
 }
 
-inline void slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg)
-{
-	if (msg) {
-		xfree(msg->cluster_name);
-		if(msg->selected_steps)
-			list_destroy(msg->selected_steps);
-		if(msg->selected_parts)
-			list_destroy(msg->selected_parts);
-		xfree(msg->user);
-		xfree(msg);
-	}
-}
-
-inline void slurmdbd_free_init_msg(dbd_init_msg_t *msg)
+extern void slurmdbd_free_init_msg(dbd_init_msg_t *msg)
 {
 	if(msg) {
 		xfree(msg->cluster_name);
@@ -2502,20 +2513,21 @@ inline void slurmdbd_free_init_msg(dbd_init_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_fini_msg(dbd_fini_msg_t *msg)
+extern void slurmdbd_free_fini_msg(dbd_fini_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg)
+extern void slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg)
 {
 	if (msg) {
+		xfree(msg->comment);
 		xfree(msg->nodes);
 		xfree(msg);
 	}
 }
 
-inline void slurmdbd_free_job_start_msg(void *in)
+extern void slurmdbd_free_job_start_msg(void *in)
 {
 	dbd_job_start_msg_t *msg = (dbd_job_start_msg_t *)in;
 	if (msg) {
@@ -2536,12 +2548,12 @@ extern void slurmdbd_free_id_rc_msg(void *in)
 	xfree(msg);
 }
 
-inline void slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg)
+extern void slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurmdbd_free_list_msg(dbd_list_msg_t *msg)
+extern void slurmdbd_free_list_msg(dbd_list_msg_t *msg)
 {
 	if (msg) {
 		if(msg->my_list)
@@ -2550,7 +2562,7 @@ inline void slurmdbd_free_list_msg(dbd_list_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
+extern void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
 				     slurmdbd_msg_type_t type)
 {
 	void (*destroy_cond) (void *object);
@@ -2595,7 +2607,7 @@ inline void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
 	}
 }
 
-inline void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
+extern void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->hostlist);
@@ -2604,7 +2616,7 @@ inline void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_rc_msg(dbd_rc_msg_t *msg)
+extern void slurmdbd_free_rc_msg(dbd_rc_msg_t *msg)
 {
 	if(msg) {
 		xfree(msg->comment);
@@ -2612,17 +2624,17 @@ inline void slurmdbd_free_rc_msg(dbd_rc_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg)
+extern void slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg)
+extern void slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg)
 {
 	xfree(msg);
 }
 
-inline void slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg)
+extern void slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->jobacct);
@@ -2630,7 +2642,7 @@ inline void slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
+extern void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->name);
@@ -2640,7 +2652,7 @@ inline void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
 	}
 }
 
-inline void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
+extern void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
 				    slurmdbd_msg_type_t type)
 {
 	void (*destroy_rec) (void *object);
@@ -2672,7 +2684,7 @@ inline void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
 /****************************************************************************\
  * Pack and unpack data structures
 \****************************************************************************/
-inline void
+extern void
 slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
 			     uint16_t rpc_version, Buf buffer)
 {
@@ -2696,7 +2708,7 @@ slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
 	slurmdb_pack_user_cond(msg->cond, rpc_version, buffer);
 }
 
-inline int
+extern int
 slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
@@ -2727,7 +2739,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
 			       uint16_t rpc_version, Buf buffer)
 {
@@ -2743,7 +2755,7 @@ slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_cluster_cpus_msg(dbd_cluster_cpus_msg_t **msg,
 				 uint16_t rpc_version, Buf buffer)
 {
@@ -2775,7 +2787,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
+extern void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
 				  uint16_t rpc_version,
 				  slurmdbd_msg_type_t type, Buf buffer)
 {
@@ -2795,7 +2807,7 @@ inline void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
 	(*(my_function))(msg->rec, rpc_version, buffer);
 }
 
-inline int slurmdbd_unpack_rec_msg(dbd_rec_msg_t **msg,
+extern int slurmdbd_unpack_rec_msg(dbd_rec_msg_t **msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type, Buf buffer)
 {
@@ -2827,7 +2839,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
+extern void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type, Buf buffer)
 {
@@ -2882,7 +2894,7 @@ inline void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
 	(*(my_function))(msg->cond, rpc_version, buffer);
 }
 
-inline int slurmdbd_unpack_cond_msg(dbd_cond_msg_t **msg,
+extern int slurmdbd_unpack_cond_msg(dbd_cond_msg_t **msg,
 				    uint16_t rpc_version,
 				    slurmdbd_msg_type_t type, Buf buffer)
 {
@@ -2949,99 +2961,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg,
-				       uint16_t rpc_version, Buf buffer)
-{
-	uint32_t i = 0;
-	ListIterator itr = NULL;
-	slurmdb_selected_step_t *job = NULL;
-	char *part = NULL;
-
-	packstr(msg->cluster_name, buffer);
-
-	pack16(msg->completion, buffer);
-
-	pack32(msg->gid, buffer);
-
-	pack_time(msg->last_update, buffer);
-
-	if(msg->selected_steps)
-		i = list_count(msg->selected_steps);
-
-	pack32(i, buffer);
-	if(i) {
-		itr = list_iterator_create(msg->selected_steps);
-		while((job = list_next(itr))) {
-			slurmdb_pack_selected_step(job, rpc_version, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-
-	i = 0;
-	if(msg->selected_parts)
-		i = list_count(msg->selected_parts);
-
-	pack32(i, buffer);
-	if(i) {
-		itr = list_iterator_create(msg->selected_parts);
-		while((part = list_next(itr))) {
-			packstr(part, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	packstr(msg->user, buffer);
-}
-
-inline int slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg,
-					uint16_t rpc_version, Buf buffer)
-{
-	int i;
-	uint32_t count = 0;
-	uint32_t uint32_tmp;
-	dbd_get_jobs_msg_t *msg_ptr;
-	slurmdb_selected_step_t *job = NULL;
-	char *part = NULL;
-
-	msg_ptr = xmalloc(sizeof(dbd_get_jobs_msg_t));
-	*msg = msg_ptr;
-
-	safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer);
-
-	safe_unpack16(&msg_ptr->completion, buffer);
-
-	safe_unpack32(&msg_ptr->gid, buffer);
-
-	safe_unpack_time(&msg_ptr->last_update, buffer);
-
-	safe_unpack32(&count, buffer);
-	if(count) {
-		msg_ptr->selected_steps =
-			list_create(slurmdb_destroy_selected_step);
-		for(i=0; i<count; i++) {
-			slurmdb_unpack_selected_step(&job, rpc_version, buffer);
-			list_append(msg_ptr->selected_steps, job);
-		}
-	}
-	safe_unpack32(&count, buffer);
-	if(count) {
-		msg_ptr->selected_parts = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&part, &uint32_tmp, buffer);
-			list_append(msg_ptr->selected_parts, part);
-		}
-	}
-
-	safe_unpackstr_xmalloc(&msg_ptr->user, &uint32_tmp, buffer);
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-	slurmdbd_free_get_jobs_msg(msg_ptr);
-	*msg = NULL;
-	return SLURM_ERROR;
-}
-
-inline void
+extern void
 slurmdbd_pack_init_msg(dbd_init_msg_t *msg, uint16_t rpc_version,
 		       Buf buffer, char *auth_info)
 {
@@ -3074,7 +2994,7 @@ slurmdbd_pack_init_msg(dbd_init_msg_t *msg, uint16_t rpc_version,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_init_msg(dbd_init_msg_t **msg,
 			 Buf buffer, char *auth_info)
 {
@@ -3120,14 +3040,14 @@ unpack_error:
 	return rc;
 }
 
-inline void
+extern void
 slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg, uint16_t rpc_version, Buf buffer)
 {
 	pack16(msg->close_conn, buffer);
 	pack16(msg->commit, buffer);
 }
 
-inline int
+extern int
 slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, uint16_t rpc_version, Buf buffer)
 {
 	dbd_fini_msg_t *msg_ptr = xmalloc(sizeof(dbd_fini_msg_t));
@@ -3144,11 +3064,24 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
 			       uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= 8) {
+	if (rpc_version >= 9) {
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->comment, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->derived_ec, buffer);
+		pack_time(msg->end_time, buffer);
+		pack32(msg->exit_code, buffer);
+		pack32(msg->job_id, buffer);
+		pack16(msg->job_state, buffer);
+		packstr(msg->nodes, buffer);
+		pack32(msg->req_uid, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+	} else if (rpc_version >= 8) {
 		pack32(msg->assoc_id, buffer);
 		pack32(msg->db_index, buffer);
 		pack32(msg->derived_ec, buffer);
@@ -3174,7 +3107,7 @@ slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
 				 uint16_t rpc_version, Buf buffer)
 {
@@ -3182,7 +3115,20 @@ slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
 	dbd_job_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_comp_msg_t));
 	*msg = msg_ptr;
 
-	if (rpc_version >= 8) {
+	if (rpc_version >= 9) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->comment, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->derived_ec, buffer);
+		safe_unpack_time(&msg_ptr->end_time, buffer);
+		safe_unpack32(&msg_ptr->exit_code, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->req_uid, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+	} else if (rpc_version >= 8) {
 		safe_unpack32(&msg_ptr->assoc_id, buffer);
 		safe_unpack32(&msg_ptr->db_index, buffer);
 		safe_unpack32(&msg_ptr->derived_ec, buffer);
@@ -3214,7 +3160,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_job_start_msg(void *in,
 			    uint16_t rpc_version, Buf buffer)
 {
@@ -3270,7 +3216,7 @@ slurmdbd_pack_job_start_msg(void *in,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_job_start_msg(void **msg,
 			      uint16_t rpc_version, Buf buffer)
 {
@@ -3339,7 +3285,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_id_rc_msg(void *in,
 			uint16_t rpc_version, Buf buffer)
 {
@@ -3355,7 +3301,7 @@ slurmdbd_pack_id_rc_msg(void *in,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_id_rc_msg(void **msg,
 			  uint16_t rpc_version, Buf buffer)
 {
@@ -3378,7 +3324,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
 			      uint16_t rpc_version, Buf buffer)
 {
@@ -3390,7 +3336,7 @@ slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
 	pack_time(msg->suspend_time, buffer);
 }
 
-inline int
+extern int
 slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg,
 				uint16_t rpc_version, Buf buffer)
 {
@@ -3410,7 +3356,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
+extern void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type,
 				   Buf buffer)
@@ -3498,7 +3444,7 @@ inline void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
 		pack32(msg->return_code, buffer);
 }
 
-inline int slurmdbd_unpack_list_msg(dbd_list_msg_t **msg, uint16_t rpc_version,
+extern int slurmdbd_unpack_list_msg(dbd_list_msg_t **msg, uint16_t rpc_version,
 				    slurmdbd_msg_type_t type, Buf buffer)
 {
 	int i;
@@ -3611,7 +3557,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
+extern void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
 				     uint16_t rpc_version,
 				     slurmdbd_msg_type_t type,
 				     Buf buffer)
@@ -3652,7 +3598,7 @@ inline void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
 	(*(my_rec))(msg->rec, rpc_version, buffer);
 }
 
-inline int slurmdbd_unpack_modify_msg(dbd_modify_msg_t **msg,
+extern int slurmdbd_unpack_modify_msg(dbd_modify_msg_t **msg,
 				      uint16_t rpc_version,
 				      slurmdbd_msg_type_t type,
 				      Buf buffer)
@@ -3707,7 +3653,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
 			     uint16_t rpc_version, Buf buffer)
 {
@@ -3730,7 +3676,7 @@ slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
@@ -3769,7 +3715,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg,
 		     uint16_t rpc_version, Buf buffer)
 {
@@ -3778,7 +3724,7 @@ slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg,
 	pack16(msg->sent_type, buffer);
 }
 
-inline int
+extern int
 slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg,
 		       uint16_t rpc_version, Buf buffer)
 {
@@ -3796,7 +3742,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
 				uint16_t rpc_version, Buf buffer)
 {
@@ -3811,7 +3757,7 @@ slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg,
 				  uint16_t rpc_version, Buf buffer)
 {
@@ -3837,7 +3783,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg,
 			     uint16_t rpc_version, Buf buffer)
 {
@@ -3848,7 +3794,7 @@ slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
@@ -3869,7 +3815,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg,
 				uint16_t rpc_version, Buf buffer)
 {
@@ -3887,7 +3833,7 @@ slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg,
 	pack32(msg->total_cpus, buffer);
 }
 
-inline int
+extern int
 slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg,
 				  uint16_t rpc_version, Buf buffer)
 {
@@ -3913,7 +3859,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void
+extern void
 slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, uint16_t rpc_version,
 			     Buf buffer)
 {
@@ -3934,7 +3880,7 @@ slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, uint16_t rpc_version,
 	}
 }
 
-inline int
+extern int
 slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
@@ -3965,7 +3911,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
+extern void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
 				    uint16_t rpc_version,
 				    slurmdbd_msg_type_t type,
 				    Buf buffer)
@@ -3995,7 +3941,7 @@ inline void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
 	pack_time(msg->end, buffer);
 }
 
-inline int slurmdbd_unpack_usage_msg(dbd_usage_msg_t **msg,
+extern int slurmdbd_unpack_usage_msg(dbd_usage_msg_t **msg,
 				     uint16_t rpc_version,
 				     slurmdbd_msg_type_t type,
 				     Buf buffer)
@@ -4039,7 +3985,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-inline void slurmdbd_pack_buffer(void *in,
+extern void slurmdbd_pack_buffer(void *in,
 				 uint16_t rpc_version,
 				 Buf buffer)
 {
@@ -4048,7 +3994,7 @@ inline void slurmdbd_pack_buffer(void *in,
 	packmem(get_buf_data(object), get_buf_offset(object), buffer);
 }
 
-inline int slurmdbd_unpack_buffer(void **out,
+extern int slurmdbd_unpack_buffer(void **out,
 				  uint16_t rpc_version,
 				  Buf buffer)
 {
diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h
index f9ce8ca20..aa823ed82 100644
--- a/src/common/slurmdbd_defs.h
+++ b/src/common/slurmdbd_defs.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,7 @@
 #  include <inttypes.h>
 #endif				/*  HAVE_CONFIG_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/pack.h"
 #include "src/common/list.h"
@@ -76,7 +76,7 @@
  *	communicating with it (e.g. it will not accept messages with a
  *	version higher than SLURMDBD_VERSION).
  */
-#define SLURMDBD_VERSION	8 /* already changed for 2.2 */
+#define SLURMDBD_VERSION	9 /* already changed for 2.3 */
 #define SLURMDBD_VERSION_MIN	7
 
 /* SLURM DBD message types */
@@ -97,7 +97,7 @@ typedef enum {
 	DBD_GET_ASSOC_USAGE,  	/* Get assoc usage information  	*/
 	DBD_GET_CLUSTERS,	/* Get account information		*/
 	DBD_GET_CLUSTER_USAGE, 	/* Get cluster usage information	*/
-	DBD_GET_JOBS,		/* Get job information			*/
+	DBD_GET_JOBS,		/* VESTIGIAL / DEFUNCT RPC		*/
 	DBD_GET_USERS,  	/* Get account information		*/
 	DBD_GOT_ACCOUNTS,	/* Response to DBD_GET_ACCOUNTS		*/
 	DBD_GOT_ASSOCS, 	/* Response to DBD_GET_ASSOCS   	*/
@@ -240,6 +240,7 @@ typedef struct dbd_fini_msg {
 typedef struct dbd_job_comp_msg {
 	uint32_t assoc_id;	/* accounting association id needed to
 				 * find job record in db */
+	char *	 comment;	/* job comment field */
 	uint32_t db_index;	/* index into the db for this job */
 	uint32_t derived_ec;	/* derived job exit code or signal */
 	time_t   end_time;	/* job termintation time */
@@ -434,159 +435,154 @@ extern void slurmdbd_free_buffer(void *x);
 /*****************************************************************************\
  * Free various SlurmDBD message structures
 \*****************************************************************************/
-inline void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg);
-inline void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg);
-inline void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg, slurmdbd_msg_type_t type);
-inline void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
+extern void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg);
+extern void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg);
+extern void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg, slurmdbd_msg_type_t type);
+extern void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
 				   slurmdbd_msg_type_t type);
-inline void slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg);
-inline void slurmdbd_free_init_msg(dbd_init_msg_t *msg);
-inline void slurmdbd_free_fini_msg(dbd_fini_msg_t *msg);
-inline void slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg);
-inline void slurmdbd_free_job_start_msg(void *in);
+extern void slurmdbd_free_init_msg(dbd_init_msg_t *msg);
+extern void slurmdbd_free_fini_msg(dbd_fini_msg_t *msg);
+extern void slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg);
+extern void slurmdbd_free_job_start_msg(void *in);
 extern void slurmdbd_free_id_rc_msg(void *in);
-inline void slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg);
-inline void slurmdbd_free_list_msg(dbd_list_msg_t *msg);
-inline void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
+extern void slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg);
+extern void slurmdbd_free_list_msg(dbd_list_msg_t *msg);
+extern void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
 				     slurmdbd_msg_type_t type);
-inline void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg);
-inline void slurmdbd_free_rc_msg(dbd_rc_msg_t *msg);
-inline void slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg);
-inline void slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg);
-inline void slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg);
-inline void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg);
-inline void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
+extern void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg);
+extern void slurmdbd_free_rc_msg(dbd_rc_msg_t *msg);
+extern void slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg);
+extern void slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg);
+extern void slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg);
+extern void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg);
+extern void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
 				    slurmdbd_msg_type_t type);
 
 /*****************************************************************************\
  * Pack various SlurmDBD message structures into a buffer
 \*****************************************************************************/
-inline void slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
+extern void slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
 					 uint16_t rpc_version,
 					 Buf buffer);
-inline void slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
+extern void slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
 					   uint16_t rpc_version,
 					   Buf buffer);
-inline void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
+extern void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
 				  uint16_t rpc_version,
 				  slurmdbd_msg_type_t type, Buf buffer);
-inline void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
+extern void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type, Buf buffer);
-inline void slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg,
-				       uint16_t rpc_version, Buf buffer);
-inline void slurmdbd_pack_init_msg(dbd_init_msg_t *msg, uint16_t rpc_version,
+extern void slurmdbd_pack_init_msg(dbd_init_msg_t *msg, uint16_t rpc_version,
 				   Buf buffer, char *auth_info);
-inline void slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg,
+extern void slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg,
 				   uint16_t rpc_version, Buf buffer);
-inline void slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
+extern void slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
 					   uint16_t rpc_version,
 					   Buf buffer);
-inline void slurmdbd_pack_job_start_msg(void *in,
+extern void slurmdbd_pack_job_start_msg(void *in,
 					uint16_t rpc_version,
 					Buf buffer);
-inline void slurmdbd_pack_id_rc_msg(void *in,
+extern void slurmdbd_pack_id_rc_msg(void *in,
 				    uint16_t rpc_version,
 				    Buf buffer);
-inline void slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
+extern void slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-inline void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
+extern void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type,
 				   Buf buffer);
-inline void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
+extern void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
 				     uint16_t rpc_version,
 				     slurmdbd_msg_type_t type,
 				     Buf buffer);
-inline void slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
+extern void slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
 					 uint16_t rpc_version,
 					 Buf buffer);
-inline void slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg,
+extern void slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg,
 				 uint16_t rpc_version, Buf buffer);
-inline void slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
+extern void slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
 					    uint16_t rpc_version,
 					    Buf buffer);
-inline void slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg,
+extern void slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg,
 					 uint16_t rpc_version, Buf buffer);
-inline void slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg,
+extern void slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg,
 					    uint16_t rpc_version,
 					    Buf buffer);
-inline void slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg,
+extern void slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg,
 					 uint16_t rpc_version,
 					 Buf buffer);
-inline void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
+extern void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
 				    uint16_t rpc_version,
 				    slurmdbd_msg_type_t type,
 				    Buf buffer);
-inline void slurmdbd_pack_buffer(void *in,
+extern void slurmdbd_pack_buffer(void *in,
 				 uint16_t rpc_version,
 				 Buf buffer);
 
 /*****************************************************************************\
  * Unpack various SlurmDBD message structures from a buffer
 \*****************************************************************************/
-inline int slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg,
+extern int slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-inline int slurmdbd_unpack_cluster_cpus_msg(dbd_cluster_cpus_msg_t **msg,
+extern int slurmdbd_unpack_cluster_cpus_msg(dbd_cluster_cpus_msg_t **msg,
 					    uint16_t rpc_version,
 					    Buf buffer);
-inline int slurmdbd_unpack_rec_msg(dbd_rec_msg_t **msg,
+extern int slurmdbd_unpack_rec_msg(dbd_rec_msg_t **msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type,
 				   Buf buffer);
-inline int slurmdbd_unpack_cond_msg(dbd_cond_msg_t **msg,
+extern int slurmdbd_unpack_cond_msg(dbd_cond_msg_t **msg,
 				    uint16_t rpc_version,
 				    slurmdbd_msg_type_t type, Buf buffer);
-inline int slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg,
-					uint16_t rpc_version, Buf buffer);
-inline int slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer,
+extern int slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer,
 				    char *auth_info);
-inline int slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg,
+extern int slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg,
 				    uint16_t rpc_version, Buf buffer);
-inline int slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
+extern int slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
 					    uint16_t rpc_version,
 					    Buf buffer);
-inline int slurmdbd_unpack_job_start_msg(void **msg,
+extern int slurmdbd_unpack_job_start_msg(void **msg,
 					 uint16_t rpc_version,
 					 Buf buffer);
-inline int slurmdbd_unpack_id_rc_msg(void **msg,
+extern int slurmdbd_unpack_id_rc_msg(void **msg,
 				     uint16_t rpc_version,
 				     Buf buffer);
-inline int slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg,
+extern int slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg,
 					   uint16_t rpc_version,
 					   Buf buffer);
-inline int slurmdbd_unpack_list_msg(dbd_list_msg_t **msg,
+extern int slurmdbd_unpack_list_msg(dbd_list_msg_t **msg,
 				    uint16_t rpc_version,
 				    slurmdbd_msg_type_t type,
 				    Buf buffer);
-inline int slurmdbd_unpack_modify_msg(dbd_modify_msg_t **msg,
+extern int slurmdbd_unpack_modify_msg(dbd_modify_msg_t **msg,
 				      uint16_t rpc_version,
 				      slurmdbd_msg_type_t type,
 				      Buf buffer);
-inline int slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg,
+extern int slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-inline int slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg,
+extern int slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg,
 				  uint16_t rpc_version, Buf buffer);
-inline int slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg,
+extern int slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg,
 					     uint16_t rpc_version,
 					     Buf buffer);
-inline int slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg,
+extern int slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-inline int slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg,
+extern int slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg,
 					     uint16_t rpc_version,
 					     Buf buffer);
-inline int slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
+extern int slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-inline int slurmdbd_unpack_usage_msg(dbd_usage_msg_t **msg,
+extern int slurmdbd_unpack_usage_msg(dbd_usage_msg_t **msg,
 				     uint16_t rpc_version,
 				     slurmdbd_msg_type_t type,
 				     Buf buffer);
-inline int slurmdbd_unpack_buffer(void **in,
+extern int slurmdbd_unpack_buffer(void **in,
 				  uint16_t rpc_version,
 				  Buf buffer);
 #endif	/* !_SLURMDBD_DEFS_H */
diff --git a/src/common/stepd_api.c b/src/common/stepd_api.c
index a0bc4e864..5339b9686 100644
--- a/src/common/stepd_api.c
+++ b/src/common/stepd_api.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -108,7 +108,7 @@ _handle_stray_socket(const char *socket_name)
 
 	if ((uid = getuid()) != buf.st_uid) {
 		debug3("_handle_stray_socket: socket %s is not owned by uid %d",
-		       socket_name, uid);
+		       socket_name, (int)uid);
 		return;
 	}
 
@@ -752,7 +752,7 @@ rwfail:
 /*
  * Suspend execution of the job step.  Only root or SlurmUser is
  * authorized to use this call. Since this activity includes a 'sleep 1'
- * in the slurmstepd, initiate the the "suspend" in parallel
+ * in the slurmstepd, initiate the "suspend" in parallel.
  *
  * Returns SLURM_SUCCESS is successful.  On error returns SLURM_ERROR
  * and sets errno.
diff --git a/src/common/stepd_api.h b/src/common/stepd_api.h
index db682e0c3..62b2e281a 100644
--- a/src/common/stepd_api.h
+++ b/src/common/stepd_api.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/switch.c b/src/common/switch.c
index b4e2d559d..bbfc9c9ae 100644
--- a/src/common/switch.c
+++ b/src/common/switch.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/switch.h b/src/common/switch.h
index 1e530727d..9dcc78bcd 100644
--- a/src/common/switch.h
+++ b/src/common/switch.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/timers.c b/src/common/timers.c
index bf3e4d81c..c52ddd99a 100644
--- a/src/common/timers.c
+++ b/src/common/timers.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,29 +38,30 @@
 
 #include <stdio.h>
 #include <sys/time.h>
-#include <src/common/log.h>
+#include "src/common/log.h"
 
 /*
- * diff_tv_str - build a string showing the time difference between two times
+ * slurm_diff_tv_str - build a string showing the time difference between two
+ *		       times
  * IN tv1 - start of event
  * IN tv2 - end of event
  * OUT tv_str - place to put delta time in format "usec=%ld"
  * IN len_tv_str - size of tv_str in bytes
  * IN from - where the function was called form
  */
-inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2,
-			char *tv_str, int len_tv_str, char *from,
-			long limit)
+extern void slurm_diff_tv_str(struct timeval *tv1,struct timeval *tv2,
+			      char *tv_str, int len_tv_str, char *from,
+			      long limit)
 {
 	long delta_t;
 
 	delta_t  = (tv2->tv_sec  - tv1->tv_sec) * 1000000;
 	delta_t +=  tv2->tv_usec - tv1->tv_usec;
 	snprintf(tv_str, len_tv_str, "usec=%ld", delta_t);
-	if(from) {
-		if(!limit)
+	if (from) {
+		if (!limit)
 			limit = 1000000;
-		if(delta_t > limit) {
+		if (delta_t > limit) {
 			verbose("Warning: Note very large processing "
 				"time from %s: %s",
 				from, tv_str);
@@ -69,12 +70,12 @@ inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2,
 }
 
 /*
- * diff_tv - return the difference between two times
+ * slurm_diff_tv - return the difference between two times
  * IN tv1 - start of event
  * IN tv2 - end of event
  * RET time in micro-seconds
  */
-inline long diff_tv(struct timeval *tv1, struct timeval *tv2)
+extern long slurm_diff_tv(struct timeval *tv1, struct timeval *tv2)
 {
 	long delta_t;
 	delta_t  = (tv2->tv_sec  - tv1->tv_sec) * 1000000;
diff --git a/src/common/timers.h b/src/common/timers.h
index 817126ca9..dc1bc2500 100644
--- a/src/common/timers.h
+++ b/src/common/timers.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,16 +44,17 @@
 #define DEF_TIMERS	struct timeval tv1, tv2; char tv_str[20]
 #define START_TIMER	gettimeofday(&tv1, NULL)
 #define END_TIMER	gettimeofday(&tv2, NULL); \
-                	diff_tv_str(&tv1, &tv2, tv_str, 20, NULL, 0)
+                	slurm_diff_tv_str(&tv1, &tv2, tv_str, 20, NULL, 0)
 #define END_TIMER2(from) gettimeofday(&tv2, NULL); \
-	                 diff_tv_str(&tv1, &tv2, tv_str, 20, from, 0)
+	                 slurm_diff_tv_str(&tv1, &tv2, tv_str, 20, from, 0)
 #define END_TIMER3(from, limit) gettimeofday(&tv2, NULL); \
-	                        diff_tv_str(&tv1, &tv2, tv_str, 20, from, limit)
-#define DELTA_TIMER	diff_tv(&tv1, &tv2)
+	                        slurm_diff_tv_str(&tv1, &tv2, tv_str, 20, from, limit)
+#define DELTA_TIMER	slurm_diff_tv(&tv1, &tv2)
 #define TIME_STR 	tv_str
 
 /*
- * diff_tv_str - build a string showing the time difference between two times
+ * slurm_diff_tv_str - build a string showing the time difference between two
+ *		       times
  * IN tv1 - start of event
  * IN tv2 - end of event
  * OUT tv_str - place to put delta time in format "usec=%ld"
@@ -61,16 +62,16 @@
  * IN from - Name to be printed on long diffs
  * IN limit - limit to wait
  */
-extern inline void diff_tv_str(struct timeval *tv1,struct timeval *tv2,
-			       char *tv_str, int len_tv_str, char *from,
-			       long limit);
+extern void slurm_diff_tv_str(struct timeval *tv1,struct timeval *tv2,
+			      char *tv_str, int len_tv_str, char *from,
+			      long limit);
 
 /*
- * diff_tv - return the difference between two times
+ * slurm_diff_tv - return the difference between two times
  * IN tv1 - start of event
  * IN tv2 - end of event
  * RET time in micro-seconds
  */
-inline long diff_tv(struct timeval *tv1, struct timeval *tv2);
+extern long slurm_diff_tv(struct timeval *tv1, struct timeval *tv2);
 
 #endif
diff --git a/src/common/uid.c b/src/common/uid.c
index 874a14864..bb127c92e 100644
--- a/src/common/uid.c
+++ b/src/common/uid.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/uid.h b/src/common/uid.h
index c83f640ba..1a756474b 100644
--- a/src/common/uid.h
+++ b/src/common/uid.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/unsetenv.c b/src/common/unsetenv.c
index 1ae4a2a14..5f7598d27 100644
--- a/src/common/unsetenv.c
+++ b/src/common/unsetenv.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/unsetenv.h b/src/common/unsetenv.h
index a2e031b69..b3b5159a5 100644
--- a/src/common/unsetenv.h
+++ b/src/common/unsetenv.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/working_cluster.c b/src/common/working_cluster.c
index 1b4c67e97..12e3eed1e 100644
--- a/src/common/working_cluster.c
+++ b/src/common/working_cluster.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,6 +42,7 @@
 #include "src/common/slurmdb_defs.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+#include "src/common/node_select.h"
 
 /*
  * This functions technically should go in the slurmdb_defs.c, but
@@ -60,6 +61,33 @@ extern uint16_t slurmdb_setup_cluster_dims(void)
 		working_cluster_rec->dimensions : SYSTEM_DIMENSIONS;
 }
 
+extern int *slurmdb_setup_cluster_dim_size(void)
+{
+	if (working_cluster_rec)
+		return working_cluster_rec->dim_size;
+
+	return select_g_ba_get_dims();
+}
+
+extern bool is_cray_system(void)
+{
+	if (working_cluster_rec)
+		return working_cluster_rec->flags & CLUSTER_FLAG_CRAYXT;
+#ifdef HAVE_CRAY
+	return true;
+#endif
+	return false;
+}
+
+extern uint16_t slurmdb_setup_cluster_name_dims(void)
+{
+	if (is_cray_system())
+		return 1;	/* Cray uses 1-dimensional hostlists */
+	else if (working_cluster_rec)
+		return working_cluster_rec->dimensions;
+	return SYSTEM_DIMENSIONS;
+}
+
 extern uint32_t slurmdb_setup_cluster_flags(void)
 {
 	static uint32_t cluster_flags = NO_VAL;
diff --git a/src/common/working_cluster.h b/src/common/working_cluster.h
index 2bb2b0797..c747349b0 100644
--- a/src/common/working_cluster.h
+++ b/src/common/working_cluster.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,17 @@
 /* Return the number of dimensions in the current working cluster */
 extern uint16_t slurmdb_setup_cluster_dims(void);
 
+/* Return the size of each dimensions in the current working cluster.
+ * Returns NULL if information not available or not applicable. */
+extern int * slurmdb_setup_cluster_dim_size(void);
+
+/* Return the number of digits required in the numeric suffix of hostnames
+ * in the current working cluster */
+extern uint16_t slurmdb_setup_cluster_name_dims(void);
+
+/* Return true if the working cluster is a native Cray system */
+extern bool is_cray_system(void);
+
 /* Return the architecture flags in the current working cluster */
 extern uint32_t slurmdb_setup_cluster_flags(void);
 
diff --git a/src/common/write_labelled_message.c b/src/common/write_labelled_message.c
index 3bec7938f..47ecc4ced 100644
--- a/src/common/write_labelled_message.c
+++ b/src/common/write_labelled_message.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/write_labelled_message.h b/src/common/write_labelled_message.h
index 52044b867..415955d13 100644
--- a/src/common/write_labelled_message.h
+++ b/src/common/write_labelled_message.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xassert.c b/src/common/xassert.c
index c0897ce0b..9a001aa84 100644
--- a/src/common/xassert.c
+++ b/src/common/xassert.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xassert.h b/src/common/xassert.h
index 2cf072f64..4fcbcba25 100644
--- a/src/common/xassert.h
+++ b/src/common/xassert.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xcgroup.c b/src/common/xcgroup.c
new file mode 100644
index 000000000..568696fc5
--- /dev/null
+++ b/src/common/xcgroup.c
@@ -0,0 +1,1114 @@
+/*****************************************************************************\
+ *  xcgroup.c - cgroup related primitives
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <dirent.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "src/common/log.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+
+#include "xcgroup.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 256
+#endif
+
+/* internal functions */
+size_t _file_getsize(int fd);
+int _file_read_uint32s(char* file_path, uint32_t** pvalues, int* pnb);
+int _file_write_uint32s(char* file_path, uint32_t* values, int nb);
+int _file_read_uint64s(char* file_path, uint64_t** pvalues, int* pnb);
+int _file_write_uint64s(char* file_path, uint64_t* values, int nb);
+int _file_read_content(char* file_path, char** content, size_t *csize);
+int _file_write_content(char* file_path, char* content, size_t csize);
+
+
+/*
+ * -----------------------------------------------------------------------------
+ * xcgroup_ns primitives xcgroup_ns primitives xcgroup_ns primitives
+ * xcgroup_ns primitives xcgroup_ns primitives xcgroup_ns primitives
+ * xcgroup_ns primitives xcgroup_ns primitives xcgroup_ns primitives
+ * -----------------------------------------------------------------------------
+ */
+
+/*
+ * create a cgroup namespace for tasks containment
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_create(slurm_cgroup_conf_t *conf,
+		xcgroup_ns_t* cgns, char* mnt_point, char* mnt_args,
+		      char* subsys, char* notify_prog) {
+
+	cgns->mnt_point = xstrdup(conf->cgroup_mountpoint);
+	xstrcat(cgns->mnt_point, mnt_point);
+
+	cgns->mnt_args = xstrdup(mnt_args);
+	cgns->subsystems = xstrdup(subsys);
+	cgns->notify_prog = xstrdup(notify_prog);
+	return XCGROUP_SUCCESS;
+}
+
+/*
+ * destroy a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_destroy(xcgroup_ns_t* cgns) {
+
+	xfree(cgns->mnt_point);
+	xfree(cgns->mnt_args);
+	xfree(cgns->subsystems);
+	xfree(cgns->notify_prog);
+
+	return XCGROUP_SUCCESS;
+}
+
+/*
+ * mount a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_mount(xcgroup_ns_t* cgns)
+{
+	int fstatus;
+	char* mount_cmd_fmt;
+	char mount_cmd[1024];
+
+	char* mnt_point;
+	char* p;
+
+	xcgroup_t cg;
+
+	mode_t cmask;
+	mode_t omask;
+
+	cmask = S_IWGRP | S_IWOTH;
+	omask = umask(cmask);
+
+	fstatus = mkdir(cgns->mnt_point, 0755);
+	if (fstatus && errno != EEXIST) {
+		if (cgns->mnt_point[0] != '/') {
+			debug("unable to create cgroup ns directory '%s'"
+			      " : do not start with '/'", cgns->mnt_point);
+			umask(omask);
+			return XCGROUP_ERROR;
+		}
+		mnt_point = xstrdup(cgns->mnt_point);
+		p = mnt_point;
+		while ((p = index(p+1, '/')) != NULL) {
+			*p = '\0';
+			mkdir(mnt_point, 0755);
+			if (errno != EEXIST) {
+				debug("unable to create cgroup ns required "
+				      "directory '%s'", mnt_point);
+				xfree(mnt_point);
+				umask(omask);
+				return XCGROUP_ERROR;
+			}
+			*p='/';
+		}
+		xfree(mnt_point);
+		fstatus = mkdir(cgns->mnt_point, 0755);
+	}
+
+	if (fstatus && errno != EEXIST) {
+		debug("unable to create cgroup ns directory '%s'"
+		      " : %m", cgns->mnt_point);
+		umask(omask);
+		return XCGROUP_ERROR;
+	}
+	umask(omask);
+
+	if (cgns->mnt_args == NULL ||
+	     strlen(cgns->mnt_args) == 0) {
+		mount_cmd_fmt = "/bin/mount -o %s%s -t cgroup none %s";
+	}
+	else
+		mount_cmd_fmt = "/bin/mount -o %s, %s -t cgroup none %s";
+
+	if (snprintf(mount_cmd, 1024, mount_cmd_fmt, cgns->subsystems,
+		      cgns->mnt_args, cgns->mnt_point) >= 1024) {
+		debug2("unable to build cgroup ns mount cmd line");
+		return XCGROUP_ERROR;
+	}
+	else
+		debug3("cgroup mount cmd line is '%s'", mount_cmd);
+
+	if (system(mount_cmd))
+		return XCGROUP_ERROR;
+	else {
+		/* we then set the release_agent if necessary */
+		if (cgns->notify_prog) {
+			if (xcgroup_create(cgns, &cg, "/", 0, 0) ==
+			     XCGROUP_ERROR)
+				return XCGROUP_SUCCESS;
+			xcgroup_set_param(&cg, "release_agent",
+					  cgns->notify_prog);
+		}
+		return XCGROUP_SUCCESS;
+	}
+}
+
+/*
+ * umount a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_umount(xcgroup_ns_t* cgns)
+{
+	char* umount_cmd_fmt;
+	char umount_cmd[1024];
+
+	umount_cmd_fmt = "/bin/umount %s";
+
+	if (snprintf(umount_cmd, 1024, umount_cmd_fmt,
+		      cgns->mnt_point) >= 1024) {
+		debug2("unable to build cgroup ns umount cmd line");
+		return XCGROUP_ERROR;
+	}
+	else
+		debug3("cgroup ns umount cmd line is '%s'", umount_cmd);
+
+	if (system(umount_cmd))
+		return XCGROUP_ERROR;
+	else
+		return XCGROUP_SUCCESS;
+}
+
+/*
+ * check that a cgroup namespace is ready to be used
+ *
+ * returned values:
+ *  - XCGROUP_ERROR : not available
+ *  - XCGROUP_SUCCESS : ready to be used
+ */
+int xcgroup_ns_is_available(xcgroup_ns_t* cgns)
+{
+	int fstatus;
+	char* value;
+	size_t s;
+	xcgroup_t cg;
+
+	if (xcgroup_create(cgns, &cg, "/", 0, 0) == XCGROUP_ERROR)
+		return 0;
+
+	if (xcgroup_get_param(&cg, "release_agent",
+			       &value, &s) != XCGROUP_SUCCESS)
+		fstatus = 0;
+	else {
+		xfree(value);
+		fstatus = 1;
+	}
+
+	xcgroup_destroy(&cg);
+
+	return fstatus;
+}
+
+/*
+ * Look for the cgroup in a specific cgroup namespace that owns
+ * a particular pid
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_find_by_pid(xcgroup_ns_t* cgns, xcgroup_t* cg, pid_t pid)
+{
+	int fstatus = SLURM_ERROR;
+	char file_path[PATH_MAX];
+	char* buf;
+	size_t fsize;
+	char* p;
+	char* e;
+	char* entry;
+	char* subsys;
+	int found=0;
+
+	/* build pid cgroup meta filepath */
+	if (snprintf(file_path, PATH_MAX, "/proc/%u/cgroup",
+		      pid) >= PATH_MAX) {
+		debug2("unable to build cgroup meta filepath for pid=%u : %m",
+		       pid);
+		return XCGROUP_ERROR;
+	}
+
+	/*
+	 * read file content
+	 * multiple lines of the form :
+	 * num_mask:subsystems:relative_path
+	 */
+	fstatus = _file_read_content(file_path, &buf, &fsize);
+	if (fstatus == XCGROUP_SUCCESS) {
+		fstatus = XCGROUP_ERROR;
+		p = buf;
+		while (found==0 && (e = index(p, '\n')) != NULL) {
+			*e='\0';
+			/* get subsystems entry */
+			subsys = index(p, ':');
+			p = e + 1;
+			if (subsys == NULL)
+				continue;
+			subsys++;
+			/* get relative path entry */
+			entry = index(subsys, ':');
+			if (entry == NULL)
+				continue;
+			*entry='\0';
+			/* check subsystem versus ns one */
+			if (strcmp(cgns->subsystems, subsys) != 0) {
+				debug("skipping cgroup subsys %s(%s)",
+				      subsys, cgns->subsystems);
+				continue;
+			}
+			else
+				found=1;
+			entry++;
+			fstatus = xcgroup_load(cgns, cg, entry);
+			break;
+		}
+		xfree(buf);
+	}
+
+	return fstatus;
+}
+
+
+/*
+ * -----------------------------------------------------------------------------
+ * xcgroup primitives xcgroup primitives xcgroup primitives xcgroup primitives
+ * xcgroup primitives xcgroup primitives xcgroup primitives xcgroup primitives
+ * xcgroup primitives xcgroup primitives xcgroup primitives xcgroup primitives
+ * -----------------------------------------------------------------------------
+ */
+
+int xcgroup_create(xcgroup_ns_t* cgns, xcgroup_t* cg,
+		   char* uri, uid_t uid,  gid_t gid)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+
+	/* build cgroup absolute path*/
+	if (snprintf(file_path, PATH_MAX, "%s%s", cgns->mnt_point,
+		      uri) >= PATH_MAX) {
+		debug2("unable to build cgroup '%s' absolute path in ns '%s' "
+		       ": %m", uri, cgns->subsystems);
+		return fstatus;
+	}
+
+	/* fill xcgroup structure */
+	cg->ns = cgns;
+	cg->name = xstrdup(uri);
+	cg->path = xstrdup(file_path);
+	cg->uid = uid;
+	cg->gid = gid;
+
+	return XCGROUP_SUCCESS;
+}
+
+int xcgroup_destroy(xcgroup_t* cg)
+{
+	cg->ns = NULL;
+	xfree(cg->name);
+	xfree(cg->path);
+	cg->uid = -1;
+	cg->gid = -1;
+	return XCGROUP_SUCCESS;
+}
+
+int xcgroup_lock(xcgroup_t* cg)
+{
+	int fstatus = XCGROUP_ERROR;
+
+	if ((cg->fd = open(cg->path, O_RDONLY)) < 0) {
+		debug2("xcgroup_lock: error from open of cgroup '%s' : %m",
+		       cg->path);
+		return fstatus;
+	}
+
+	if (flock(cg->fd,  LOCK_EX) < 0) {
+		debug2("xcgroup_lock: error locking cgroup '%s' : %m",
+		       cg->path);
+		close(cg->fd);
+	}
+	else
+		fstatus = XCGROUP_SUCCESS;
+
+	return fstatus;
+}
+
+int xcgroup_unlock(xcgroup_t* cg)
+{
+	int fstatus = XCGROUP_ERROR;
+
+	if (flock(cg->fd,  LOCK_UN) < 0) {
+		debug2("xcgroup_lock: error unlocking cgroup '%s' : %m",
+		       cg->path);
+	}
+	else
+		fstatus = XCGROUP_SUCCESS;
+
+	close(cg->fd);
+	return fstatus;
+}
+
+int xcgroup_instanciate(xcgroup_t* cg)
+{
+	int fstatus = XCGROUP_ERROR;
+	mode_t cmask;
+	mode_t omask;
+
+	xcgroup_ns_t* cgns;
+	char* file_path;
+	uid_t uid;
+	gid_t gid;
+	int create_only;
+	int notify;
+
+	/* init variables based on input cgroup */
+	cgns = cg->ns;
+	file_path = cg->path;
+	uid = cg->uid;
+	gid = cg->gid;
+	create_only=0;
+	notify=1;
+
+	/* save current mask and apply working one */
+	cmask = S_IWGRP | S_IWOTH;
+	omask = umask(cmask);
+
+	/* build cgroup */
+ 	if (mkdir(file_path, 0755)) {
+		if (create_only || errno != EEXIST) {
+			debug2("unable to create cgroup '%s' : %m",
+			       file_path);
+			umask(omask);
+			return fstatus;
+		}
+	}
+	umask(omask);
+
+	/* change cgroup ownership as requested */
+	if (chown(file_path, uid, gid)) {
+		debug2("unable to chown %d:%d cgroup '%s' : %m",
+		       uid, gid, file_path);
+		return fstatus;
+	}
+
+	/* following operations failure might not result in a general
+	 * failure so set output status to success */
+	fstatus = XCGROUP_SUCCESS;
+
+	/* set notify on release flag */
+	if (notify && cgns->notify_prog)
+		xcgroup_set_params(cg, "notify_on_release=1");
+	else
+		xcgroup_set_params(cg, "notify_on_release=0");
+	return fstatus;
+}
+
+int xcgroup_load(xcgroup_ns_t* cgns, xcgroup_t* cg, char* uri)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+
+	struct stat buf;
+
+	/* build cgroup absolute path*/
+	if (snprintf(file_path, PATH_MAX, "%s%s", cgns->mnt_point,
+		      uri) >= PATH_MAX) {
+		debug2("unable to build cgroup '%s' absolute path in ns '%s' "
+		       ": %m", uri, cgns->subsystems);
+		return fstatus;
+	}
+
+	if (stat((const char*)file_path, &buf)) {
+		debug2("unable to get cgroup '%s' entry '%s' properties"
+		       ": %m", cgns->mnt_point, file_path);
+		return fstatus;
+	}
+
+	/* fill xcgroup structure */
+	cg->ns = cgns;
+	cg->name = xstrdup(uri);
+	cg->path = xstrdup(file_path);
+	cg->uid = buf.st_uid;
+	cg->gid = buf.st_gid;
+
+	return XCGROUP_SUCCESS;
+}
+
+int xcgroup_delete(xcgroup_t* cg)
+{
+	if (rmdir(cg->path))
+		return XCGROUP_ERROR;
+	else
+		return XCGROUP_SUCCESS;
+}
+
+int xcgroup_add_pids(xcgroup_t* cg, pid_t* pids, int npids)
+{
+	int fstatus = XCGROUP_ERROR;
+	char* cpath = cg->path;
+	char file_path[PATH_MAX];
+
+	if (snprintf(file_path, PATH_MAX, "%s/tasks",
+		      cpath) >= PATH_MAX) {
+		debug2("unable to add pids to '%s' : %m", cpath);
+		return fstatus;
+	}
+
+	fstatus = _file_write_uint32s(file_path, (uint32_t*)pids, npids);
+	if (fstatus != XCGROUP_SUCCESS)
+		debug2("unable to add pids to '%s'", cpath);
+	return fstatus;
+}
+
+int
+xcgroup_get_pids(xcgroup_t* cg, pid_t **pids, int *npids)
+{
+	int fstatus = XCGROUP_ERROR;
+	char* cpath = cg->path;
+	char file_path[PATH_MAX];
+
+	if (pids == NULL || npids == NULL)
+		return SLURM_ERROR;
+
+	if (snprintf(file_path, PATH_MAX, "%s/tasks",
+		      cpath) >= PATH_MAX) {
+		debug2("unable to get pids of '%s' : %m", cpath);
+		return fstatus;
+	}
+
+	fstatus = _file_read_uint32s(file_path, (uint32_t**)pids, npids);
+	if (fstatus != XCGROUP_SUCCESS)
+		debug2("unable to get pids of '%s'", cpath);
+	return fstatus;
+}
+
+int xcgroup_set_params(xcgroup_t* cg, char* parameters)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+	char* params;
+	char* value;
+	char* p;
+	char* next;
+
+	params = (char*) xstrdup(parameters);
+
+	p = params;
+	while (p != NULL && *p != '\0') {
+		next = index(p, ' ');
+		if (next) {
+			*next='\0';
+			next++;
+			while (*next == ' ')
+				next++;
+		}
+		value = index(p, '=');
+		if (value != NULL) {
+			*value='\0';
+			value++;
+			if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, p)
+			     >= PATH_MAX) {
+				debug2("unable to build filepath for '%s' and"
+				       " parameter '%s' : %m", cpath, p);
+				goto next_loop;
+			}
+			fstatus = _file_write_content(file_path, value,
+						      strlen(value));
+			if (fstatus != XCGROUP_SUCCESS)
+				debug2("unable to set parameter '%s' to "
+				       "'%s' for '%s'", p, value, cpath);
+			else
+				debug3("parameter '%s' set to '%s' for '%s'",
+				       p, value, cpath);
+		}
+		else
+			debug2("bad parameters format for entry '%s'", p);
+	next_loop:
+		p = next;
+	}
+
+	xfree(params);
+	return fstatus;
+}
+
+int xcgroup_set_param(xcgroup_t* cg, char* param, char* content)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+		return fstatus;
+	}
+
+	fstatus = _file_write_content(file_path, content, strlen(content));
+	if (fstatus != XCGROUP_SUCCESS)
+		debug2("unable to set parameter '%s' to "
+		       "'%s' for '%s'", param, content, cpath);
+	else
+		debug3("parameter '%s' set to '%s' for '%s'",
+		       param, content, cpath);
+
+	return fstatus;
+}
+
+int xcgroup_get_param(xcgroup_t* cg, char* param, char **content, size_t *csize)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+	}
+	else {
+		fstatus = _file_read_content(file_path, content, csize);
+		if (fstatus != XCGROUP_SUCCESS)
+			debug2("unable to get parameter '%s' for '%s'",
+			       param, cpath);
+	}
+	return fstatus;
+}
+
+int xcgroup_set_uint32_param(xcgroup_t* cg, char* param, uint32_t value)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+		return fstatus;
+	}
+
+	fstatus = _file_write_uint32s(file_path, &value, 1);
+	if (fstatus != XCGROUP_SUCCESS)
+		debug2("unable to set parameter '%s' to "
+		       "'%u' for '%s'", param, value, cpath);
+	else
+		debug3("parameter '%s' set to '%u' for '%s'",
+		       param, value, cpath);
+
+	return fstatus;
+}
+
+int xcgroup_get_uint32_param(xcgroup_t* cg, char* param, uint32_t* value)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+	uint32_t* values;
+	int vnb;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+	}
+	else {
+		fstatus = _file_read_uint32s(file_path, &values, &vnb);
+		if (fstatus != XCGROUP_SUCCESS)
+			debug2("unable to get parameter '%s' for '%s'",
+			       param, cpath);
+		else if (vnb < 1) {
+			debug2("empty parameter '%s' for '%s'",
+			       param, cpath);
+		}
+		else {
+			*value = values[0];
+			xfree(values);
+			fstatus = XCGROUP_SUCCESS;
+		}
+	}
+	return fstatus;
+}
+
+int xcgroup_set_uint64_param(xcgroup_t* cg, char* param, uint64_t value)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+		return fstatus;
+	}
+
+	fstatus = _file_write_uint64s(file_path, &value, 1);
+	if (fstatus != XCGROUP_SUCCESS)
+		debug2("unable to set parameter '%s' to "
+		       "'%"PRIu64"' for '%s'", param, value, cpath);
+	else
+		debug3("parameter '%s' set to '%"PRIu64"' for '%s'",
+		       param, value, cpath);
+
+	return fstatus;
+}
+
+int xcgroup_get_uint64_param(xcgroup_t* cg, char* param, uint64_t* value)
+{
+	int fstatus = XCGROUP_ERROR;
+	char file_path[PATH_MAX];
+	char* cpath = cg->path;
+	uint64_t* values;
+	int vnb;
+
+	if (snprintf(file_path, PATH_MAX, "%s/%s", cpath, param) >= PATH_MAX) {
+		debug2("unable to build filepath for '%s' and"
+		       " parameter '%s' : %m", cpath, param);
+	}
+	else {
+		fstatus = _file_read_uint64s(file_path, &values, &vnb);
+		if (fstatus != XCGROUP_SUCCESS)
+			debug2("unable to get parameter '%s' for '%s'",
+			       param, cpath);
+		else if (vnb < 1) {
+			debug2("empty parameter '%s' for '%s'",
+			       param, cpath);
+		}
+		else {
+			*value = values[0];
+			xfree(values);
+			fstatus = XCGROUP_SUCCESS;
+		}
+	}
+	return fstatus;
+}
+
+
+/*
+ * -----------------------------------------------------------------------------
+ * internal primitives internal primitives internal primitives
+ * internal primitives internal primitives internal primitives
+ * internal primitives internal primitives internal primitives
+ * -----------------------------------------------------------------------------
+ */
+
+size_t _file_getsize(int fd)
+{
+	int rc;
+	size_t fsize;
+	off_t offset;
+	char c;
+
+	/* store current position and rewind */
+	offset = lseek(fd, 0, SEEK_CUR);
+	if (offset < 0)
+		return -1;
+	lseek(fd, 0, SEEK_SET);
+
+	/* get file size */
+	fsize=0;
+	do {
+		rc = read(fd, (void*)&c, 1);
+		if (rc > 0)
+			fsize++;
+	}
+	while ((rc < 0 && errno == EINTR) || rc > 0);
+
+	/* restore position */
+	lseek(fd, offset, SEEK_SET);
+
+	if (rc < 0)
+		return -1;
+	else
+		return fsize;
+}
+
+int _file_write_uint64s(char* file_path, uint64_t* values, int nb)
+{
+	int fstatus;
+	int rc;
+	int fd;
+	char tstr[256];
+	uint64_t value;
+	int i;
+
+	/* open file for writing */
+	fd = open(file_path, O_WRONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for writing : %m", file_path);
+		return XCGROUP_ERROR;
+	}
+
+	/* add one value per line */
+	fstatus = XCGROUP_SUCCESS;
+	for (i=0 ; i < nb ; i++) {
+
+		value = values[i];
+
+		rc = snprintf(tstr, sizeof(tstr), "%"PRIu64"", value);
+		if (rc < 0) {
+			debug2("unable to build %"PRIu64" string value, "
+			       "skipping", value);
+			fstatus = XCGROUP_ERROR;
+			continue;
+		}
+
+		do {
+			rc = write(fd, tstr, strlen(tstr)+1);
+		}
+		while (rc != 0 && errno == EINTR);
+		if (rc < 1) {
+			debug2("unable to add value '%s' to file '%s' : %m",
+			       tstr, file_path);
+			if ( errno != ESRCH )
+				fstatus = XCGROUP_ERROR;
+		}
+
+	}
+
+	/* close file */
+	close(fd);
+
+	return fstatus;
+}
+
+int _file_read_uint64s(char* file_path, uint64_t** pvalues, int* pnb)
+{
+	int rc;
+	int fd;
+
+	size_t fsize;
+	char* buf;
+	char* p;
+
+	uint64_t* pa=NULL;
+	int i;
+
+	/* check input pointers */
+	if (pvalues == NULL || pnb == NULL)
+		return XCGROUP_ERROR;
+
+	/* open file for reading */
+	fd = open(file_path, O_RDONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for reading : %m", file_path);
+		return XCGROUP_ERROR;
+	}
+
+	/* get file size */
+	fsize=_file_getsize(fd);
+	if (fsize == -1) {
+		close(fd);
+		return XCGROUP_ERROR;
+	}
+
+	/* read file contents */
+	buf = (char*) xmalloc((fsize+1)*sizeof(char));
+	do {
+		rc = read(fd, buf, fsize);
+	}
+	while (rc < 0 && errno == EINTR);
+	close(fd);
+	buf[fsize]='\0';
+
+	/* count values (splitted by \n) */
+	i=0;
+	if (rc > 0) {
+		p = buf;
+		while (index(p, '\n') != NULL) {
+			i++;
+			p = index(p, '\n') + 1;
+		}
+	}
+
+	/* build uint64_t list */
+	if (i > 0) {
+		pa = (uint64_t*) xmalloc(sizeof(uint64_t) * i);
+		p = buf;
+		i = 0;
+		while (index(p, '\n') != NULL) {
+			long long unsigned int ll_tmp;
+			sscanf(p, "%llu", &ll_tmp);
+			pa[i++] = ll_tmp;
+			p = index(p, '\n') + 1;
+		}
+	}
+
+	/* free buffer */
+	xfree(buf);
+
+	/* set output values */
+	*pvalues = pa;
+	*pnb = i;
+
+	return XCGROUP_SUCCESS;
+}
+
+int _file_write_uint32s(char* file_path, uint32_t* values, int nb)
+{
+	int fstatus;
+	int rc;
+	int fd;
+	char tstr[256];
+	uint32_t value;
+	int i;
+
+	/* open file for writing */
+	fd = open(file_path, O_WRONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for writing : %m", file_path);
+		return XCGROUP_ERROR;
+	}
+
+	/* add one value per line */
+	fstatus = XCGROUP_SUCCESS;
+	for (i=0 ; i < nb ; i++) {
+
+		value = values[i];
+
+		rc = snprintf(tstr, sizeof(tstr), "%u", value);
+		if (rc < 0) {
+			debug2("unable to build %u string value, skipping",
+			       value);
+			fstatus = XCGROUP_ERROR;
+			continue;
+		}
+
+		do {
+			rc = write(fd, tstr, strlen(tstr)+1);
+		}
+		while (rc < 0 && errno == EINTR);
+		if (rc < 1) {
+			debug2("unable to add value '%s' to file '%s' : %m",
+			       tstr, file_path);
+			if ( errno != ESRCH )
+				fstatus = XCGROUP_ERROR;
+		}
+
+	}
+
+	/* close file */
+	close(fd);
+
+	return fstatus;
+}
+
+int _file_read_uint32s(char* file_path, uint32_t** pvalues, int* pnb)
+{
+	int rc;
+	int fd;
+
+	size_t fsize;
+	char* buf;
+	char* p;
+
+	uint32_t* pa=NULL;
+	int i;
+
+	/* check input pointers */
+	if (pvalues == NULL || pnb == NULL)
+		return XCGROUP_ERROR;
+
+	/* open file for reading */
+	fd = open(file_path, O_RDONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for reading : %m", file_path);
+		return XCGROUP_ERROR;
+	}
+
+	/* get file size */
+	fsize=_file_getsize(fd);
+	if (fsize == -1) {
+		close(fd);
+		return XCGROUP_ERROR;
+	}
+
+	/* read file contents */
+	buf = (char*) xmalloc((fsize+1)*sizeof(char));
+	do {
+		rc = read(fd, buf, fsize);
+	}
+	while (rc < 0 && errno == EINTR);
+	close(fd);
+	buf[fsize]='\0';
+
+	/* count values (splitted by \n) */
+	i=0;
+	if (rc > 0) {
+		p = buf;
+		while (index(p, '\n') != NULL) {
+			i++;
+			p = index(p, '\n') + 1;
+		}
+	}
+
+	/* build uint32_t list */
+	if (i > 0) {
+		pa = (uint32_t*) xmalloc(sizeof(uint32_t) * i);
+		p = buf;
+		i = 0;
+		while (index(p, '\n') != NULL) {
+			sscanf(p, "%u", pa+i);
+			p = index(p, '\n') + 1;
+			i++;
+		}
+	}
+
+	/* free buffer */
+	xfree(buf);
+
+	/* set output values */
+	*pvalues = pa;
+	*pnb = i;
+
+	return XCGROUP_SUCCESS;
+}
+
+int _file_write_content(char* file_path, char* content, size_t csize)
+{
+	int fstatus;
+	int rc;
+	int fd;
+
+	/* open file for writing */
+	fd = open(file_path, O_WRONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for writing : %m", file_path);
+		return XCGROUP_ERROR;
+	}
+
+	/* write content */
+	do {
+		rc = write(fd, content, csize);
+	}
+	while (rc != 0 && errno == EINTR);
+
+	/* check read size */
+	if (rc < csize) {
+		debug2("unable to write %lu bytes to file '%s' : %m",
+		       (long unsigned int) csize, file_path);
+		fstatus = XCGROUP_ERROR;
+	}
+	else
+		fstatus = XCGROUP_SUCCESS;
+
+	/* close file */
+	close(fd);
+
+	return fstatus;
+}
+
+int _file_read_content(char* file_path, char** content, size_t *csize)
+{
+	int fstatus;
+	int rc;
+	int fd;
+
+	size_t fsize;
+	char* buf;
+
+	fstatus = XCGROUP_ERROR;
+
+	/* check input pointers */
+	if (content == NULL || csize == NULL)
+		return fstatus;
+
+	/* open file for reading */
+	fd = open(file_path, O_RDONLY, 0700);
+	if (fd < 0) {
+		debug2("unable to open '%s' for reading : %m", file_path);
+		return fstatus;
+	}
+
+	/* get file size */
+	fsize=_file_getsize(fd);
+	if (fsize == -1) {
+		close(fd);
+		return fstatus;
+	}
+
+	/* read file contents */
+	buf = (char*) xmalloc((fsize+1)*sizeof(char));
+	buf[fsize]='\0';
+	do {
+		rc = read(fd, buf, fsize);
+	}
+	while (rc < 0 && errno == EINTR);
+
+	/* set output values */
+	if (rc >= 0) {
+		*content = buf;
+		*csize = rc;
+		fstatus = XCGROUP_SUCCESS;
+	}
+
+	/* close file */
+	close(fd);
+
+	return fstatus;
+}
diff --git a/src/common/xcgroup.h b/src/common/xcgroup.h
new file mode 100644
index 000000000..7b83d2788
--- /dev/null
+++ b/src/common/xcgroup.h
@@ -0,0 +1,317 @@
+/*****************************************************************************\
+ *  cgroup.h - cgroup related primitives headers
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#ifndef _XCGROUP_H_
+#define _XCGROUP_H_
+
+#include <sys/types.h>
+#include <dirent.h>
+#include "xcgroup_read_config.h"
+
+#define XCGROUP_ERROR    1
+#define XCGROUP_SUCCESS  0
+
+typedef struct xcgroup_ns {
+
+	char* mnt_point;  /* mount point to use for the associated cgroup */
+	char* mnt_args;   /* mount args to use in addition */
+
+	char* subsystems; /* list of comma separated subsystems to provide */
+
+	char* notify_prog;/* prog to use with notify on release action */
+
+} xcgroup_ns_t;
+
+typedef struct xcgroup {
+
+	xcgroup_ns_t* ns; /* xcgroup namespace of this xcgroup */
+	char* name;       /* name of the xcgroup relative to the ns */
+	char* path;       /* absolute path of the xcgroup in the ns */
+	uid_t uid;        /* uid of the owner */
+	gid_t gid;        /* gid of the owner */
+	int   fd;         /* used for locking */
+
+} xcgroup_t;
+
+/*
+ * create a cgroup namespace for tasks containment
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_create(slurm_cgroup_conf_t *conf,
+		      xcgroup_ns_t* cgns,
+		      char* mnt_point,char* mnt_args,
+		      char* subsys,char* notify_prog);
+
+/*
+ * destroy a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_destroy(xcgroup_ns_t* cgns);
+
+/*
+ * mount a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_mount(xcgroup_ns_t* cgns);
+
+/*
+ * umount a cgroup namespace
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_umount(xcgroup_ns_t* cgns);
+
+/*
+ * test if cgroup namespace is currently available (mounted)
+ *
+ * returned values:
+ *  - 0 if not available
+ *  - 1 if available
+ */
+int xcgroup_ns_is_available(xcgroup_ns_t* cgns);
+
+/*
+ * load a cgroup from a cgroup namespace given a pid
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_ns_find_by_pid(xcgroup_ns_t* cgns,xcgroup_t* cg,pid_t pid);
+
+/*
+ * create a cgroup structure
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_create(xcgroup_ns_t* cgns,xcgroup_t* cg,
+		   char* uri,uid_t uid, gid_t gid);
+
+/*
+ * destroy a cgroup internal structure
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_destroy(xcgroup_t* cg);
+
+/*
+ * lock a cgroup (must have been instanciated)
+ * (system level using flock)
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_lock(xcgroup_t* cg);
+
+/*
+ * unlock a cgroup
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_unlock(xcgroup_t* cg);
+
+/*
+ * instanciate a cgroup in a cgroup namespace (mkdir)
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_instanciate(xcgroup_t* cg);
+
+/*
+ * load a cgroup from a cgroup namespace into a structure
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_load(xcgroup_ns_t* cgns,xcgroup_t* cg,
+		 char* uri);
+
+/*
+ * delete a cgroup instance in a cgroup namespace (rmdir)
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_delete(xcgroup_t* cg);
+
+/*
+ * add a list of pids to a cgroup
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_add_pids(xcgroup_t* cg,pid_t* pids,int npids);
+
+/*
+ * extract the pids list of a cgroup
+ *
+ * pids array must be freed using xfree(...)
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_get_pids(xcgroup_t* cg, pid_t **pids, int *npids);
+
+/*
+ * set cgroup parameters using string of the form :
+ * parameteres="param=value[ param=value]*"
+ *
+ * param must correspond to a file of the cgroup that
+ * will be written with the value content
+ *
+ * i.e. xcgroup_set_params(&cg,"memory.swappiness=10");
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_set_params(xcgroup_t* cg,char* parameters);
+
+/*
+ * set a cgroup parameter
+ *
+ * param must correspond to a file of the cgroup that
+ * will be written with the value content
+ *
+ * i.e. xcgroup_set_params(&cf,"memory.swappiness","10");
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_set_param(xcgroup_t* cg,char* parameter,char* content);
+
+/*
+ * get a cgroup parameter
+ *
+ * param must correspond to a file of the cgroup that
+ * will be read for its content
+ *
+ * i.e. xcgroup_get_param(&cg,"memory.swappiness",&value,&size);
+ *
+ * on success, content must be free using xfree
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_get_param(xcgroup_t* cg,char* param,char **content,size_t *csize);
+
+/*
+ * set a cgroup parameter in the form of a uint32_t
+ *
+ * param must correspond to a file of the cgroup that
+ * will be written with the uint32_t value
+ *
+ * i.e. xcgroup_set_uint32_param(&cf,"memory.swappiness",value);
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_set_uint32_param(xcgroup_t* cg,char* parameter,uint32_t value);
+
+/*
+ * get a cgroup parameter in the form of a uint32_t
+ *
+ * param must correspond to a file of the cgroup that
+ * will be read for its content
+ *
+ * i.e. xcgroup_get_uint32_param(&cg,"memory.swappiness",&value);
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_get_uint32_param(xcgroup_t* cg,char* param,uint32_t* value);
+
+/*
+ * set a cgroup parameter in the form of a uint64_t
+ *
+ * param must correspond to a file of the cgroup that
+ * will be written with the uint64_t value
+ *
+ * i.e. xcgroup_set_uint64_param(&cf,"memory.swappiness",value);
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_set_uint64_param(xcgroup_t* cg,char* parameter,uint64_t value);
+
+/*
+ * get a cgroup parameter in the form of a uint64_t
+ *
+ * param must correspond to a file of the cgroup that
+ * will be read for its content
+ *
+ * i.e. xcgroup_get_uint64_param(&cg,"memory.swappiness",&value);
+ *
+ * returned values:
+ *  - XCGROUP_ERROR
+ *  - XCGROUP_SUCCESS
+ */
+int xcgroup_get_uint64_param(xcgroup_t* cg,char* param,uint64_t* value);
+
+#endif
diff --git a/src/plugins/proctrack/cgroup/read_config.c b/src/common/xcgroup_read_config.c
similarity index 56%
rename from src/plugins/proctrack/cgroup/read_config.c
rename to src/common/xcgroup_read_config.c
index 21fcc82ae..48fdf7bbb 100644
--- a/src/plugins/proctrack/cgroup/read_config.c
+++ b/src/common/xcgroup_read_config.c
@@ -1,11 +1,11 @@
 /*****************************************************************************\
- *  read_config.c - functions for reading cgroup.conf
+ *  xcgroup_read_config.c - functions for reading cgroup.conf
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,8 +41,8 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
 
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
@@ -52,65 +52,103 @@
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
-#include "read_config.h"
+#include "xcgroup_read_config.h"
+
+#define DEFAULT_CGROUP_BASEDIR "/cgroup"
 
 slurm_cgroup_conf_t *slurm_cgroup_conf = NULL;
 
 /* Local functions */
-static void _clear_slurm_cgroup_conf(void);
+static void _clear_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf);
 static char * _get_conf_path(void);
 
 /*
  * free_slurm_cgroup_conf - free storage associated with the global variable
  *	slurm_cgroup_conf
  */
-extern void free_slurm_cgroup_conf(void)
+extern void free_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf)
 {
-	_clear_slurm_cgroup_conf();
-	xfree(slurm_cgroup_conf);
+	_clear_slurm_cgroup_conf(slurm_cgroup_conf);
 }
 
-static void _clear_slurm_cgroup_conf(void)
+static void _clear_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf)
 {
 	if (slurm_cgroup_conf) {
 		slurm_cgroup_conf->cgroup_automount = false ;
-		xfree(slurm_cgroup_conf->cgroup_mount_opts);
+		xfree(slurm_cgroup_conf->cgroup_mountpoint);
+		xfree(slurm_cgroup_conf->cgroup_subsystems);
 		xfree(slurm_cgroup_conf->cgroup_release_agent);
-		xfree(slurm_cgroup_conf->user_cgroup_params);
-		xfree(slurm_cgroup_conf->job_cgroup_params);
-		xfree(slurm_cgroup_conf->jobstep_cgroup_params);
+		xfree(slurm_cgroup_conf->cgroup_prepend);
+		slurm_cgroup_conf->constrain_cores = false ;
+		slurm_cgroup_conf->task_affinity = false ;
 		slurm_cgroup_conf->constrain_ram_space = false ;
 		slurm_cgroup_conf->allowed_ram_space = 100 ;
+		slurm_cgroup_conf->max_ram_percent = 100 ;
+		slurm_cgroup_conf->min_ram_space = XCGROUP_DEFAULT_MIN_RAM;
 		slurm_cgroup_conf->constrain_swap_space = false ;
 		slurm_cgroup_conf->allowed_swap_space = 0 ;
-		slurm_cgroup_conf->constrain_cores = false ;
+		slurm_cgroup_conf->max_swap_percent = 100 ;
 		slurm_cgroup_conf->memlimit_enforcement = 0 ;
 		slurm_cgroup_conf->memlimit_threshold = 100 ;
+		slurm_cgroup_conf->constrain_devices = false ;
+		xfree(slurm_cgroup_conf->allowed_devices_file);
 	}
 }
 
+/*
+ *   Parse a floating point value in s and return in val
+ *    Return -1 on error and leave *val unchanged.
+ */
+static int str_to_float (char *s, float *val)
+{
+	float f;
+	char *p;
+
+	errno = 0;
+	f = strtof (s, &p);
+
+	if ((*p != '\0') || (errno != 0))
+		return (-1);
+
+	*val = f;
+	return (0);
+}
+
+static void conf_get_float (s_p_hashtbl_t *t, char *name, float *fp)
+{
+	char *str;
+	if (!s_p_get_string(&str, name, t))
+		return;
+	if (str_to_float (str, fp) < 0)
+		fatal ("cgroup.conf: Invalid value '%s' for %s", str, name);
+}
+
 /*
  * read_slurm_cgroup_conf - load the Slurm cgroup configuration from the
- *	cgroup.conf file. Store result into global variable slurm_cgroup_conf.
- *	This function can be called more than once.
+ *	cgroup.conf file.
  * RET SLURM_SUCCESS if no error, otherwise an error code
  */
-extern int read_slurm_cgroup_conf(void)
+extern int read_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf)
 {
 	s_p_options_t options[] = {
 		{"CgroupAutomount", S_P_BOOLEAN},
-		{"CgroupMountOptions", S_P_STRING},
-		{"CgroupReleaseAgent", S_P_STRING},
-		{"UserCgroupParams", S_P_STRING},
-		{"JobCgroupParams", S_P_STRING},
-		{"JobStepCgroupParams", S_P_STRING},
+		{"CgroupMountpoint", S_P_STRING},
+		{"CgroupSubsystems", S_P_STRING},
+		{"CgroupReleaseAgentDir", S_P_STRING},
+		{"ConstrainCores", S_P_BOOLEAN},
+		{"TaskAffinity", S_P_BOOLEAN},
 		{"ConstrainRAMSpace", S_P_BOOLEAN},
-		{"AllowedRAMSpace", S_P_UINT32},
+		{"AllowedRAMSpace", S_P_STRING},
+		{"MaxRAMPercent", S_P_STRING},
+		{"MinRAMSpace", S_P_UINT32},
 		{"ConstrainSwapSpace", S_P_BOOLEAN},
-		{"AllowedSwapSpace", S_P_UINT32},
+		{"AllowedSwapSpace", S_P_STRING},
+		{"MaxSwapPercent", S_P_STRING},
 		{"ConstrainCores", S_P_BOOLEAN},
 		{"MemoryLimitEnforcement", S_P_BOOLEAN},
-		{"MemoryLimitThreshold", S_P_UINT32},
+		{"MemoryLimitThreshold", S_P_STRING},
+		{"ConstrainDevices", S_P_BOOLEAN},
+		{"AllowedDevicesFile", S_P_STRING},
 		{NULL} };
 	s_p_hashtbl_t *tbl = NULL;
 	char *conf_path = NULL;
@@ -118,11 +156,11 @@ extern int read_slurm_cgroup_conf(void)
 
 	/* Set initial values */
 	if (slurm_cgroup_conf == NULL) {
-		slurm_cgroup_conf = xmalloc(sizeof(slurm_cgroup_conf_t));
+		return SLURM_ERROR;
 	}
-	_clear_slurm_cgroup_conf();
+	_clear_slurm_cgroup_conf(slurm_cgroup_conf);
 
-	/* Get the slurmdbd.conf path and validate the file */
+	/* Get the cgroup.conf path and validate the file */
 	conf_path = _get_conf_path();
 	if ((conf_path == NULL) || (stat(conf_path, &buf) == -1)) {
 		info("No cgroup.conf file (%s)", conf_path);
@@ -130,57 +168,92 @@ extern int read_slurm_cgroup_conf(void)
 		debug("Reading cgroup.conf file %s", conf_path);
 
 		tbl = s_p_hashtbl_create(options);
-		if (s_p_parse_file(tbl, NULL, conf_path) == SLURM_ERROR) {
+		if (s_p_parse_file(tbl, NULL, conf_path, false) ==
+		    SLURM_ERROR) {
 			fatal("Could not open/read/parse cgroup.conf file %s",
 			      conf_path);
 		}
 
 		/* cgroup initialisation parameters */
 		if (!s_p_get_boolean(&slurm_cgroup_conf->cgroup_automount,
-				     "CgroupAutomount", tbl))
+			        "CgroupAutomount", tbl))
 			slurm_cgroup_conf->cgroup_automount = false;
-		s_p_get_string(&slurm_cgroup_conf->cgroup_mount_opts,
-			       "CgroupMountOptions", tbl);
+
+		if (!s_p_get_string(&slurm_cgroup_conf->cgroup_mountpoint,
+				"CgroupMountpoint", tbl))
+			slurm_cgroup_conf->cgroup_mountpoint =
+				xstrdup(DEFAULT_CGROUP_BASEDIR);
+
+		s_p_get_string(&slurm_cgroup_conf->cgroup_subsystems,
+			       "CgroupSubsystems", tbl);
 		s_p_get_string(&slurm_cgroup_conf->cgroup_release_agent,
-			       "CgroupReleaseAgent", tbl);
-		if ( ! slurm_cgroup_conf->cgroup_release_agent )
+			       "CgroupReleaseAgentDir", tbl);
+		if (! slurm_cgroup_conf->cgroup_release_agent)
 			slurm_cgroup_conf->cgroup_release_agent =
-				xstrdup("memory,cpuset");
+				xstrdup("/etc/slurm/cgroup");
+
+		/* cgroup prepend directory */
+#ifndef MULTIPLE_SLURMD
+		slurm_cgroup_conf->cgroup_prepend = xstrdup("/slurm");
+#else
+		slurm_cgroup_conf->cgroup_prepend = xstrdup("/slurm_%n");
+#endif
 
-		/* job and jobsteps cgroup parameters */
-		s_p_get_string(&slurm_cgroup_conf->user_cgroup_params,
-			       "UserCgroupParams", tbl);
-		s_p_get_string(&slurm_cgroup_conf->job_cgroup_params,
-			       "JobCgroupParams", tbl);
-		s_p_get_string(&slurm_cgroup_conf->jobstep_cgroup_params,
-			       "JobStepCgroupParams", tbl);
+		/* Cores constraints related conf items */
+		if (!s_p_get_boolean(&slurm_cgroup_conf->constrain_cores,
+				     "ConstrainCores", tbl))
+			slurm_cgroup_conf->constrain_cores = false;
+		if (!s_p_get_boolean(&slurm_cgroup_conf->task_affinity,
+				     "TaskAffinity", tbl))
+			slurm_cgroup_conf->task_affinity = false;
 
 		/* RAM and Swap constraints related conf items */
 		if (!s_p_get_boolean(&slurm_cgroup_conf->constrain_ram_space,
 				     "ConstrainRAMSpace", tbl))
 			slurm_cgroup_conf->constrain_ram_space = false;
-		if (!s_p_get_uint32(&slurm_cgroup_conf->allowed_ram_space,
-				    "AllowedRAMSpace", tbl))
-			slurm_cgroup_conf->allowed_ram_space = 100;
+
+		conf_get_float (tbl,
+				"AllowedRAMSpace",
+				&slurm_cgroup_conf->allowed_ram_space);
+
+		conf_get_float (tbl,
+				"MaxRAMPercent",
+				&slurm_cgroup_conf->max_ram_percent);
+
 		if (!s_p_get_boolean(&slurm_cgroup_conf->constrain_swap_space,
 				     "ConstrainSwapSpace", tbl))
 			slurm_cgroup_conf->constrain_swap_space = false;
-		if (!s_p_get_uint32(&slurm_cgroup_conf->allowed_swap_space,
-				    "AllowedSwapSpace", tbl))
-			slurm_cgroup_conf->allowed_swap_space = 0;
 
-		/* Cores constraints */
-		if (!s_p_get_boolean(&slurm_cgroup_conf->constrain_cores,
-				     "ConstrainCores", tbl))
-			slurm_cgroup_conf->constrain_cores = false;
+		conf_get_float (tbl,
+				"AllowedSwapSpace",
+				&slurm_cgroup_conf->allowed_swap_space);
+
+		conf_get_float (tbl,
+				"MaxSwapPercent",
+				&slurm_cgroup_conf->max_swap_percent);
+
+		s_p_get_uint32 (&slurm_cgroup_conf->min_ram_space,
+		                "MinRAMSpace", tbl);
 
 		/* Memory limits */
 		if (!s_p_get_boolean(&slurm_cgroup_conf->memlimit_enforcement,
 				     "MemoryLimitEnforcement", tbl))
 			slurm_cgroup_conf->memlimit_enforcement = false;
-		if (!s_p_get_uint32(&slurm_cgroup_conf->memlimit_threshold,
-				    "MemoryLimitThreshold", tbl))
-			slurm_cgroup_conf->memlimit_threshold = 0;
+
+		conf_get_float (tbl,
+				"MemoryLimitThreshold",
+				&slurm_cgroup_conf->memlimit_threshold);
+
+		/* Devices constraint related conf items */
+		if (!s_p_get_boolean(&slurm_cgroup_conf->constrain_devices,
+				     "ConstrainDevices", tbl))
+			slurm_cgroup_conf->constrain_devices = false;
+
+		s_p_get_string(&slurm_cgroup_conf->allowed_devices_file,
+                               "AllowedDevicesFile", tbl);
+                if (! slurm_cgroup_conf->allowed_devices_file)
+                        slurm_cgroup_conf->allowed_devices_file =
+                                xstrdup("/etc/slurm/cgroup_allowed_devices_file.conf");
 
 		s_p_hashtbl_destroy(tbl);
 	}
diff --git a/src/plugins/proctrack/cgroup/read_config.h b/src/common/xcgroup_read_config.h
similarity index 70%
rename from src/plugins/proctrack/cgroup/read_config.h
rename to src/common/xcgroup_read_config.h
index b1619ae95..a3d0738fa 100644
--- a/src/plugins/proctrack/cgroup/read_config.h
+++ b/src/common/xcgroup_read_config.h
@@ -1,34 +1,34 @@
 /*****************************************************************************\
- *  read_config.h - functions and declarations for reading cgroup.conf
+ *  xcgroup_read_config.h - functions and declarations for reading cgroup.conf
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  
+ *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
@@ -50,45 +50,55 @@
 #include <stdint.h>
 #endif  /* HAVE_CONFIG_H */
 
+/*  Default lower bound on memory limit in MB. This is required so we
+ *   don't immediately kill slurmstepd on mem cgroup creation if
+ *   an administrator or user sets and absurdly low mem limit.
+ */
+#define XCGROUP_DEFAULT_MIN_RAM 30
 
 /* Slurm cgroup plugins configuration parameters */
 typedef struct slurm_cgroup_conf {
 
 	bool      cgroup_automount;
-	char *    cgroup_mount_opts;
+	char *    cgroup_mountpoint;
+	char *    cgroup_subsystems;
 	char *    cgroup_release_agent;
 
-	char *    user_cgroup_params;
-	char *    job_cgroup_params;
-	char *    jobstep_cgroup_params;
+	char *    cgroup_prepend;
+
+	bool      constrain_cores;
+	bool      task_affinity;
 
 	bool      constrain_ram_space;
-	uint32_t  allowed_ram_space;
+	float     allowed_ram_space;
+	float     max_ram_percent;       /* Upper bound on memory as % of RAM*/
 
-	bool      constrain_swap_space;
-	uint32_t  allowed_swap_space;
+	uint32_t  min_ram_space;         /* Lower bound on memory limit (MB) */
 
-	bool      constrain_cores;
+	bool      constrain_swap_space;
+	float     allowed_swap_space;
+	float     max_swap_percent;      /* Upper bound on swap as % of RAM  */
 
 	bool      memlimit_enforcement;
-	uint32_t  memlimit_threshold;
+	float     memlimit_threshold;
 
-} slurm_cgroup_conf_t;
+	bool      constrain_devices;
+	char *    allowed_devices_file;
 
-extern slurm_cgroup_conf_t *slurm_cgroup_conf;
+} slurm_cgroup_conf_t;
 
 /*
- * read_slurm_cgroup_conf - load the Slurm cgroup configuration from the 
- *      cgroup.conf  file. 
+ * read_slurm_cgroup_conf - load the Slurm cgroup configuration from the
+ *      cgroup.conf  file.
  *      This function can be called more than once if so desired.
  * RET SLURM_SUCCESS if no error, otherwise an error code
  */
-extern int read_slurm_cgroup_conf(void);
+extern int read_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf);
 
 /*
- * free_slurm_cgroup_conf - free storage associated with the global variable 
+ * free_slurm_cgroup_conf - free storage associated with the global variable
  *	slurm_cgroup_conf
  */
-extern void free_slurm_cgroup_conf(void);
+extern void free_slurm_cgroup_conf(slurm_cgroup_conf_t *slurm_cgroup_conf);
 
 #endif /* !_DBD_READ_CONFIG_H */
diff --git a/src/common/xcpuinfo.c b/src/common/xcpuinfo.c
new file mode 100644
index 000000000..19e7431b5
--- /dev/null
+++ b/src/common/xcpuinfo.c
@@ -0,0 +1,976 @@
+/*****************************************************************************\
+ *  xcpuinfo.c - cpuinfo related primitives
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "src/common/log.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmd/slurmd/get_mach_stat.h"
+
+#include "xcpuinfo.h"
+
+static char* _cpuinfo_path = "/proc/cpuinfo";
+
+static int _compute_block_map(uint16_t numproc,
+			      uint16_t **block_map, uint16_t **block_map_inv);
+static int _chk_cpuinfo_str(char *buffer, char *keyword, char **valptr);
+static int _chk_cpuinfo_uint32(char *buffer, char *keyword, uint32_t *val);
+
+static int _ranges_conv(char* lrange, char** prange, int mode);
+static int _range_to_map(char* range, uint16_t *map, uint16_t map_size,
+			 int add_threads);
+static int _map_to_range(uint16_t *map, uint16_t map_size, char** prange);
+
+bool     initialized = false;
+uint16_t procs, sockets, cores, threads=1;
+uint16_t block_map_size;
+uint16_t *block_map, *block_map_inv;
+
+/*
+ * get_procs - Return the count of procs on this system
+ * Input: procs - buffer for the CPU count
+ * Output: procs - filled in with CPU count, "1" if error
+ *         return code - 0 if no error, otherwise errno
+ */
+extern int
+get_procs(uint16_t *procs)
+{
+#ifdef LPAR_INFO_FORMAT2
+	/* AIX 5.3 only */
+	lpar_info_format2_t info;
+
+	*procs = 1;
+	if (lpar_get_info(LPAR_INFO_FORMAT2, &info, sizeof(info)) != 0) {
+		error("lpar_get_info() failed");
+		return EINVAL;
+	}
+
+	*procs = (uint16_t) info.online_vcpus;
+#else /* !LPAR_INFO_FORMAT2 */
+
+#  ifdef _SC_NPROCESSORS_ONLN
+	int my_proc_tally;
+
+	*procs = 1;
+	my_proc_tally = (int)sysconf(_SC_NPROCESSORS_ONLN);
+	if (my_proc_tally < 1) {
+		error ("get_procs: error running sysconf(_SC_NPROCESSORS_ONLN)");
+		return EINVAL;
+	}
+
+	*procs = (uint16_t) my_proc_tally;
+#  else
+#    ifdef HAVE_SYSCTLBYNAME
+	int ncpu;
+	size_t len = sizeof(ncpu);
+
+	*procs = 1;
+	if (sysctlbyname("hw.ncpus", &ncpu, &len, NULL, 0) == -1) {
+		error("get_procs: error running sysctl(HW_NCPU)");
+		return EINVAL;
+	}
+	*procs = (uint16_t) ncpu;
+#    else /* !HAVE_SYSCTLBYNAME */
+	*procs = 1;
+#    endif /* HAVE_SYSCTLBYNAME */
+#  endif /* _SC_NPROCESSORS_ONLN */
+#endif /* LPAR_INFO_FORMAT2 */
+
+	return 0;
+}
+
+/*
+ * get_cpuinfo - Return detailed cpuinfo on this system
+ * Input:  numproc - number of processors on the system
+ * Output: p_sockets - number of physical processor sockets
+ *         p_cores - total number of physical CPU cores
+ *         p_threads - total number of hardware execution threads
+ *         block_map - asbtract->physical block distribution map
+ *         block_map_inv - physical->abstract block distribution map (inverse)
+ *         return code - 0 if no error, otherwise errno
+ * NOTE: User must xfree block_map and block_map_inv
+ */
+typedef struct cpuinfo {
+	uint16_t seen;
+	uint32_t cpuid;
+	uint32_t physid;
+	uint16_t physcnt;
+	uint32_t coreid;
+	uint16_t corecnt;
+	uint16_t siblings;
+	uint16_t cores;
+} cpuinfo_t;
+static cpuinfo_t *cpuinfo = NULL; /* array of CPU information for get_cpuinfo */
+				  /* Note: file static for qsort/_compare_cpus*/
+extern int
+get_cpuinfo(uint16_t numproc,
+		uint16_t *p_sockets, uint16_t *p_cores, uint16_t *p_threads,
+		uint16_t *block_map_size,
+		uint16_t **block_map, uint16_t **block_map_inv)
+{
+	int retval;
+	uint16_t numcpu	   = 0;		/* number of cpus seen */
+	uint16_t numphys   = 0;		/* number of unique "physical id"s */
+	uint16_t numcores  = 0;		/* number of unique "cores id"s */
+
+	uint16_t maxsibs   = 0;		/* maximum value of "siblings" */
+	uint16_t maxcores  = 0;		/* maximum value of "cores" */
+	uint16_t minsibs   = 0xffff;	/* minimum value of "siblings" */
+	uint16_t mincores  = 0xffff;	/* minimum value of "cores" */
+
+	uint32_t maxcpuid  = 0;		/* maximum CPU ID ("processor") */
+	uint32_t maxphysid = 0;		/* maximum "physical id" */
+	uint32_t maxcoreid = 0;		/* maximum "core id" */
+	uint32_t mincpuid  = 0xffffffff;/* minimum CPU ID ("processor") */
+	uint32_t minphysid = 0xffffffff;/* minimum "physical id" */
+	uint32_t mincoreid = 0xffffffff;/* minimum "core id" */
+	int i;
+#if defined (__sun)
+#if defined (_LP64)
+	int64_t curcpu, val, sockets, cores, threads;
+#else
+	int32_t curcpu, val, sockets, cores, threads;
+#endif
+	int32_t chip_id, core_id, ncore_per_chip, ncpu_per_chip;
+#else
+	FILE *cpu_info_file;
+	char buffer[128];
+	uint16_t curcpu, sockets, cores, threads;
+#endif
+
+	*p_sockets = numproc;		/* initially all single core/thread */
+	*p_cores   = 1;
+	*p_threads = 1;
+	*block_map_size = 0;
+	*block_map      = NULL;
+	*block_map_inv  = NULL;
+
+#if defined (__sun)
+	kstat_ctl_t   *kc;
+	kstat_t       *ksp;
+	kstat_named_t *knp;
+
+	kc = kstat_open();
+	if (kc == NULL) {
+		error ("get speed: kstat error %d", errno);
+		return errno;
+	}
+#else
+	cpu_info_file = fopen(_cpuinfo_path, "r");
+	if (cpu_info_file == NULL) {
+		error ("get_cpuinfo: error %d opening %s",
+			errno, _cpuinfo_path);
+		return errno;
+	}
+#endif
+
+	/* Note: assumes all processor IDs are within [0:numproc-1] */
+	/*       treats physical/core IDs as tokens, not indices */
+	if (cpuinfo)
+		memset(cpuinfo, 0, numproc * sizeof(cpuinfo_t));
+	else
+		cpuinfo = xmalloc(numproc * sizeof(cpuinfo_t));
+
+#if defined (__sun)
+	ksp = kstat_lookup(kc, "cpu_info", -1, NULL);
+	for (; ksp != NULL; ksp = ksp->ks_next) {
+		if (strcmp(ksp->ks_module, "cpu_info"))
+			continue;
+
+		numcpu++;
+		kstat_read(kc, ksp, NULL);
+
+		knp = kstat_data_lookup(ksp, "chip_id");
+		chip_id = knp->value.l;
+		knp = kstat_data_lookup(ksp, "core_id");
+		core_id = knp->value.l;
+		knp = kstat_data_lookup(ksp, "ncore_per_chip");
+		ncore_per_chip = knp->value.l;
+		knp = kstat_data_lookup(ksp, "ncpu_per_chip");
+		ncpu_per_chip = knp->value.l;
+
+		if (chip_id >= numproc) {
+			debug("cpuid is %ld (> %d), ignored", curcpu, numproc);
+			continue;
+		}
+
+		cpuinfo[chip_id].seen = 1;
+		cpuinfo[chip_id].cpuid = chip_id;
+
+		maxcpuid = MAX(maxcpuid, chip_id);
+		mincpuid = MIN(mincpuid, chip_id);
+
+		for (i = 0; i < numproc; i++) {
+			if ((cpuinfo[i].coreid == core_id) &&
+			    (cpuinfo[i].corecnt))
+				break;
+		}
+
+		if (i == numproc) {
+			numcores++;
+		} else {
+			cpuinfo[i].corecnt++;
+		}
+
+		if (chip_id < numproc) {
+			cpuinfo[chip_id].corecnt++;
+			cpuinfo[chip_id].coreid = core_id;
+		}
+
+		maxcoreid = MAX(maxcoreid, core_id);
+		mincoreid = MIN(mincoreid, core_id);
+
+		if (ncore_per_chip > numproc) {
+			debug("cores is %u (> %d), ignored",
+			      ncore_per_chip, numproc);
+				continue;
+		}
+
+		if (chip_id < numproc)
+			cpuinfo[chip_id].cores = ncore_per_chip;
+
+		maxcores = MAX(maxcores, ncore_per_chip);
+		mincores = MIN(mincores, ncore_per_chip);
+	}
+#else
+
+	curcpu = 0;
+	while (fgets(buffer, sizeof(buffer), cpu_info_file) != NULL) {
+		uint32_t val;
+		if (_chk_cpuinfo_uint32(buffer, "processor", &val)) {
+			numcpu++;
+			curcpu = val;
+		    	if (val >= numproc) {	/* out of bounds, ignore */
+				debug("cpuid is %u (> %d), ignored",
+					val, numproc);
+				continue;
+			}
+			cpuinfo[val].seen = 1;
+			cpuinfo[val].cpuid = val;
+			maxcpuid = MAX(maxcpuid, val);
+			mincpuid = MIN(mincpuid, val);
+		} else if (_chk_cpuinfo_uint32(buffer, "physical id", &val)) {
+			/* see if the ID has already been seen */
+			for (i=0; i<numproc; i++) {
+				if ((cpuinfo[i].physid == val)
+				&&  (cpuinfo[i].physcnt))
+					break;
+			}
+
+			if (i == numproc) {		/* new ID... */
+				numphys++;		/* ...increment total */
+			} else {			/* existing ID... */
+				cpuinfo[i].physcnt++;	/* ...update ID cnt */
+			}
+
+			if (curcpu < numproc) {
+				cpuinfo[curcpu].physcnt++;
+				cpuinfo[curcpu].physid = val;
+			}
+
+			maxphysid = MAX(maxphysid, val);
+			minphysid = MIN(minphysid, val);
+		} else if (_chk_cpuinfo_uint32(buffer, "core id", &val)) {
+			/* see if the ID has already been seen */
+			for (i = 0; i < numproc; i++) {
+				if ((cpuinfo[i].coreid == val)
+				&&  (cpuinfo[i].corecnt))
+					break;
+			}
+
+			if (i == numproc) {		/* new ID... */
+				numcores++;		/* ...increment total */
+			} else {			/* existing ID... */
+				cpuinfo[i].corecnt++;	/* ...update ID cnt */
+			}
+
+			if (curcpu < numproc) {
+				cpuinfo[curcpu].corecnt++;
+				cpuinfo[curcpu].coreid = val;
+			}
+
+			maxcoreid = MAX(maxcoreid, val);
+			mincoreid = MIN(mincoreid, val);
+		} else if (_chk_cpuinfo_uint32(buffer, "siblings", &val)) {
+			/* Note: this value is a count, not an index */
+		    	if (val > numproc) {	/* out of bounds, ignore */
+				debug("siblings is %u (> %d), ignored",
+					val, numproc);
+				continue;
+			}
+			if (curcpu < numproc)
+				cpuinfo[curcpu].siblings = val;
+			maxsibs = MAX(maxsibs, val);
+			minsibs = MIN(minsibs, val);
+		} else if (_chk_cpuinfo_uint32(buffer, "cpu cores", &val)) {
+			/* Note: this value is a count, not an index */
+		    	if (val > numproc) {	/* out of bounds, ignore */
+				debug("cores is %u (> %d), ignored",
+					val, numproc);
+				continue;
+			}
+			if (curcpu < numproc)
+				cpuinfo[curcpu].cores = val;
+			maxcores = MAX(maxcores, val);
+			mincores = MIN(mincores, val);
+		}
+	}
+
+	fclose(cpu_info_file);
+#endif
+
+	/*** Sanity check ***/
+	if (minsibs == 0) minsibs = 1;		/* guaranteee non-zero */
+	if (maxsibs == 0) {
+	    	minsibs = 1;
+	    	maxsibs = 1;
+	}
+	if (maxcores == 0) {			/* no core data */
+	    	mincores = 0;
+	    	maxcores = 0;
+	}
+
+	/*** Compute Sockets/Cores/Threads ***/
+	if ((minsibs == maxsibs) &&		/* homogeneous system */
+	    (mincores == maxcores)) {
+		sockets = numphys; 		/* unique "physical id" */
+		if (sockets <= 1) {		/* verify single socket */
+			sockets = numcpu / maxsibs; /* maximum "siblings" */
+		}
+		if (sockets == 0)
+			sockets = 1;		/* guarantee non-zero */
+
+		cores = numcores / sockets;	/* unique "core id" */
+		cores = MAX(maxcores, cores);	/* maximum "cpu cores" */
+
+		if (cores == 0) {
+			cores = numcpu / sockets;	/* assume multi-core */
+			if (cores > 1) {
+				debug3("Warning: cpuinfo missing 'core id' or "
+					"'cpu cores' but assuming multi-core");
+			}
+		}
+		if (cores == 0)
+			cores = 1;	/* guarantee non-zero */
+
+		threads = numcpu / (sockets * cores); /* solve for threads */
+		if (threads == 0)
+			threads = 1;	/* guarantee non-zero */
+	} else {				/* heterogeneous system */
+		sockets = numcpu;
+		cores   = 1;			/* one core per socket */
+		threads = 1;			/* one core per core */
+	}
+
+	*p_sockets = sockets;		/* update output parameters */
+	*p_cores   = cores;
+	*p_threads = threads;
+
+#if DEBUG_DETAIL
+	/*** Display raw data ***/
+	debug3("");
+	debug3("numcpu:     %u", numcpu);
+	debug3("numphys:    %u", numphys);
+	debug3("numcores:   %u", numcores);
+
+	debug3("cores:      %u->%u", mincores, maxcores);
+	debug3("sibs:       %u->%u", minsibs,  maxsibs);
+
+	debug3("cpuid:      %u->%u", mincpuid,  maxcpuid);
+	debug3("physid:     %u->%u", minphysid, maxphysid);
+	debug3("coreid:     %u->%u", mincoreid, maxcoreid);
+
+	for (i = 0; i <= maxcpuid; i++) {
+		debug3("CPU %d:", i);
+		debug3(" seen:     %u", cpuinfo[i].seen);
+		debug3(" physid:   %u", cpuinfo[i].physid);
+		debug3(" physcnt:  %u", cpuinfo[i].physcnt);
+		debug3(" siblings: %u", cpuinfo[i].siblings);
+		debug3(" cores:    %u", cpuinfo[i].cores);
+		debug3(" coreid:   %u", cpuinfo[i].coreid);
+		debug3(" corecnt:  %u", cpuinfo[i].corecnt);
+		debug3("");
+	}
+
+	debug3("");
+	debug3("Sockets:          %u", sockets);
+	debug3("Cores per socket: %u", cores);
+	debug3("Threads per core: %u", threads);
+#endif
+
+	*block_map_size = numcpu;
+	retval = _compute_block_map(*block_map_size, block_map, block_map_inv);
+
+	xfree(cpuinfo);		/* done with raw cpuinfo data */
+
+	return retval;
+}
+
+/* _chk_cpuinfo_str
+ *	check a line of cpuinfo data (buffer) for a keyword.  If it
+ *	exists, return the string value for that keyword in *valptr.
+ * Input:  buffer - single line of cpuinfo data
+ *	   keyword - keyword to check for
+ * Output: valptr - string value corresponding to keyword
+ *         return code - true if keyword found, false if not found
+ */
+static int _chk_cpuinfo_str(char *buffer, char *keyword, char **valptr)
+{
+	char *ptr;
+	if (strncmp(buffer, keyword, strlen(keyword)))
+		return false;
+
+	ptr = strstr(buffer, ":");
+	if (ptr != NULL)
+		ptr++;
+	*valptr = ptr;
+	return true;
+}
+
+/* _chk_cpuinfo_uint32
+ *	check a line of cpuinfo data (buffer) for a keyword.  If it
+ *	exists, return the uint16 value for that keyword in *valptr.
+ * Input:  buffer - single line of cpuinfo data
+ *	   keyword - keyword to check for
+ * Output: valptr - uint32 value corresponding to keyword
+ *         return code - true if keyword found, false if not found
+ */
+static int _chk_cpuinfo_uint32(char *buffer, char *keyword, uint32_t *val)
+{
+	char *valptr;
+	if (_chk_cpuinfo_str(buffer, keyword, &valptr)) {
+		*val = strtoul(valptr, (char **)NULL, 10);
+		return true;
+	} else {
+		return false;
+	}
+}
+
+/*
+ * _compute_block_map - Compute abstract->machine block mapping (and inverse)
+ *   allows computation of CPU ID masks for an abstract block distribution
+ *   of logical processors which can then be mapped the IDs used in the
+ *   actual machine processor ID ordering (which can be BIOS/OS dependendent)
+ * Input:  numproc - number of processors on the system
+ *	   cpu - array of cpuinfo (file static for qsort/_compare_cpus)
+ * Output: block_map, block_map_inv - asbtract->physical block distribution map
+ *         return code - 0 if no error, otherwise errno
+ * NOTE: User must free block_map and block_map_inv
+ *
+ * For example, given a system with 8 logical processors arranged as:
+ *
+ *	Sockets:          4
+ *	Cores per socket: 2
+ *	Threads per core: 1
+ *
+ * and a logical CPU ID assignment of:
+ *
+ *	Machine logical CPU ID assignment:
+ *	Logical CPU ID:        0  1  2  3  4  5  6  7
+ *	Physical Socket ID:    0  1  3  2  0  1  3  2
+ *
+ * The block_map would be:
+ *
+ *	Abstract -> Machine logical CPU ID block mapping:
+ *	Input: (Abstract ID)   0  1  2  3  4  5  6  7
+ *	Output: (Machine ID)   0  4  1  5  3  7  2  6  <--- block_map[]
+ *	Physical Socket ID:    0  0  1  1  2  2  3  3
+ *
+ * and it's inverse would be:
+ *
+ *	Machine -> Abstract logical CPU ID block mapping: (inverse)
+ *	Input: (Machine ID)    0  1  2  3  4  5  6  7
+ *	Output: (Abstract ID)  0  2  6  4  1  3  7  5  <--- block_map_inv[]
+ *	Physical Socket ID:    0  1  3  2  0  1  3  2
+ */
+
+/* physical cpu comparison with void * arguments to allow use with
+ * libc qsort()
+ */
+static int _icmp16(uint16_t a, uint16_t b)
+{
+    	if (a < b) {
+		return -1;
+	} else if (a == b) {
+		return 0;
+	} else {
+		return 1;
+	}
+}
+static int _icmp32(uint32_t a, uint32_t b)
+{
+	if (a < b) {
+		return -1;
+	} else if (a == b) {
+		return 0;
+	} else {
+		return 1;
+	}
+}
+
+static int _compare_cpus(const void *a1, const void *b1) {
+	uint16_t *a = (uint16_t *) a1;
+	uint16_t *b = (uint16_t *) b1;
+	int cmp;
+
+	cmp = -1 * _icmp16(cpuinfo[*a].seen,cpuinfo[*b].seen); /* seen to front */
+	if (cmp != 0)
+		return cmp;
+
+	cmp = _icmp32(cpuinfo[*a].physid, cpuinfo[*b].physid); /* key 1: physid */
+	if (cmp != 0)
+		return cmp;
+
+	cmp = _icmp32(cpuinfo[*a].coreid, cpuinfo[*b].coreid); /* key 2: coreid */
+	if (cmp != 0)
+		return cmp;
+
+	cmp = _icmp32(cpuinfo[*a].cpuid, cpuinfo[*b].cpuid);   /* key 3: cpu id */
+	return cmp;
+}
+
+static int _compute_block_map(uint16_t numproc,
+			      uint16_t **block_map, uint16_t **block_map_inv)
+{
+	uint16_t i;
+	/* Compute abstract->machine block mapping (and inverse) */
+	if (block_map) {
+		*block_map = xmalloc(numproc * sizeof(uint16_t));
+		for (i = 0; i < numproc; i++) {
+			(*block_map)[i] = i;
+		}
+		qsort(*block_map, numproc, sizeof(uint16_t), &_compare_cpus);
+	}
+	if (block_map_inv) {
+		*block_map_inv = xmalloc(numproc * sizeof(uint16_t));
+		for (i = 0; i < numproc; i++) {
+			uint16_t idx = (*block_map)[i];
+			(*block_map_inv)[idx] = i;
+		}
+	}
+
+#if DEBUG_DETAIL
+	/* Display the mapping tables */
+
+	debug3("\nMachine logical CPU ID assignment:");
+	debug3("Logical CPU ID:      ");
+	for (i = 0; i < numproc; i++) {
+		debug3("%3d", i);
+	}
+	debug3("");
+	debug3("Physical Socket ID:  ");
+	for (i = 0; i < numproc; i++) {
+		debug3("%3u", cpuinfo[i].physid);
+	}
+	debug3("");
+
+	if (block_map) {
+		debug3("\nAbstract -> Machine logical CPU ID block mapping:");
+		debug3("Input: (Abstract ID) ");
+		for (i = 0; i < numproc; i++) {
+			debug3("%3d", i);
+		}
+		debug3("");
+		debug3("Output: (Machine ID) ");
+		for (i = 0; i < numproc; i++) {
+			debug3("%3u", (*block_map)[i]);
+		}
+		debug3("");
+		debug3("Physical Socket ID:  ");
+		for (i = 0; i < numproc; i++) {
+			uint16_t id = (*block_map)[i];
+			debug3("%3u", cpuinfo[id].physid);
+		}
+		debug3("");
+	}
+
+	if (block_map_inv) {
+		debug3("\nMachine -> Abstract logical CPU ID block mapping: "
+			"(inverse)");
+		debug3("Input: (Machine ID)  ");
+		for (i = 0; i < numproc; i++) {
+			debug3("%3d", i);
+		}
+		debug3("");
+		debug3("Output: (Abstract ID)");
+		for (i = 0; i < numproc; i++) {
+			debug3("%3u", (*block_map_inv)[i]);
+		}
+		debug3("");
+		debug3("Physical Socket ID:  ");
+		for (i = 0; i < numproc; i++) {
+			debug3("%3u", cpuinfo[i].physid);
+		}
+		debug3("");
+	}
+#endif
+	return 0;
+}
+
+int _ranges_conv(char* lrange,char** prange,int mode);
+
+/* for testing purpose */
+/* uint16_t procs=8, sockets=2, cores=2, threads=2; */
+/* uint16_t block_map_size=8; */
+/* uint16_t block_map[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; */
+/* uint16_t block_map_inv[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; */
+/* xcpuinfo_abs_to_mac("0,2,4,6",&mach); */
+/* xcpuinfo_mac_to_abs(mach,&abs); */
+
+int
+xcpuinfo_init()
+{
+	if ( initialized )
+		return XCPUINFO_SUCCESS;
+
+	if ( get_procs(&procs) )
+		return XCPUINFO_ERROR;
+
+	if ( get_cpuinfo(procs,&sockets,&cores,&threads,
+			 &block_map_size,&block_map,&block_map_inv) )
+		return XCPUINFO_ERROR;
+
+	initialized = true ;
+
+	return XCPUINFO_SUCCESS;
+}
+
+int
+xcpuinfo_fini()
+{
+	if ( ! initialized )
+		return XCPUINFO_SUCCESS;
+
+	initialized = false ;
+	procs = sockets = cores = threads = 0;
+	block_map_size = 0;
+	xfree(block_map);
+	xfree(block_map_inv);
+
+	return XCPUINFO_SUCCESS;
+}
+
+int
+xcpuinfo_abs_to_mac(char* lrange,char** prange)
+{
+	return _ranges_conv(lrange,prange,0);
+}
+
+int
+xcpuinfo_mac_to_abs(char* lrange,char** prange)
+{
+	return _ranges_conv(lrange,prange,1);
+}
+
+int
+xcpuinfo_abs_to_map(char* lrange,uint16_t **map,uint16_t *map_size)
+{
+	*map_size = block_map_size;
+	*map = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
+	/* abstract range does not already include the hyperthreads */
+	return _range_to_map(lrange,*map,*map_size,1);
+}
+
+int
+xcpuinfo_map_to_mac(uint16_t *map,uint16_t map_size,char** range)
+{
+	return _map_to_range(map,map_size,range);
+}
+
+int
+xcpuinfo_mac_to_map(char* lrange,uint16_t **map,uint16_t *map_size)
+{
+	*map_size = block_map_size;
+	*map = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
+	/* machine range already includes the hyperthreads */
+	return _range_to_map(lrange,*map,*map_size,0);
+}
+
+int
+xcpuinfo_absmap_to_macmap(uint16_t *amap,uint16_t amap_size,
+			  uint16_t **bmap,uint16_t *bmap_size)
+{
+	/* int i; */
+
+	/* abstract to machine conversion using block map */
+	uint16_t *map_out;
+
+	*bmap_size = amap_size;
+	map_out = (uint16_t*) xmalloc(amap_size*sizeof(uint16_t));
+	*bmap = map_out;
+
+	return XCPUINFO_SUCCESS;
+}
+
+int
+xcpuinfo_macmap_to_absmap(uint16_t *amap,uint16_t amap_size,
+			  uint16_t **bmap,uint16_t *bmap_size)
+{
+	int i;
+
+	/* machine to abstract conversion using inverted block map */
+	uint16_t *cmap;
+	cmap = block_map_inv;
+	*bmap_size = amap_size;
+	*bmap = (uint16_t*) xmalloc(amap_size*sizeof(uint16_t));
+	for( i = 0 ; i < amap_size ; i++) {
+		if ( amap[i] )
+			(*bmap)[cmap[i]]=1;
+		else
+			(*bmap)[cmap[i]]=0;
+	}
+	return XCPUINFO_SUCCESS;
+}
+
+/*
+ * set to 1 each element of already allocated map of size
+ * map_size if they are present in the input range
+ * if add_thread does not equal 0, the input range is a treated
+ * as a core range, and it will be mapped to an array of uint16_t
+ * that will include all the hyperthreads associated to the cores.
+ */
+static int
+_range_to_map(char* range,uint16_t *map,uint16_t map_size,int add_threads)
+{
+	int bad_nb=0;
+	int num_fl=0;
+	int con_fl=0;
+	int last=0;
+
+	char *dup;
+	char *p;
+	char *s=NULL;
+
+	uint16_t start=0,end=0,i;
+
+	/* duplicate input range */
+	dup = xstrdup(range);
+	p = dup;
+	while ( ! last ) {
+		if ( isdigit(*p) ) {
+			if ( !num_fl ) {
+				num_fl++;
+				s=p;
+			}
+		}
+		else if ( *p == '-' ) {
+			if ( s && num_fl ) {
+				*p = '\0';
+				start = (uint16_t) atoi(s);
+				con_fl=1;
+				num_fl=0;
+				s=NULL;
+			}
+		}
+		else if ( *p == ',' || *p == '\0') {
+			if ( *p == '\0' )
+				last = 1;
+			if ( s && num_fl ) {
+				*p = '\0';
+				end = (uint16_t) atoi(s);
+				if ( !con_fl )
+					start = end ;
+				con_fl=2;
+				num_fl=0;
+				s=NULL;
+			}
+		}
+		else {
+			bad_nb++;
+			break;
+		}
+		if ( con_fl == 2 ) {
+			if ( add_threads ) {
+				start = start * threads;
+				end = (end+1)*threads - 1 ;
+			}
+			for( i = start ; i <= end && i < map_size ; i++) {
+				map[i]=1;
+			}
+			con_fl=0;
+		}
+		p++;
+	}
+
+	xfree(dup);
+
+	if ( bad_nb > 0 ) {
+		/* bad format for input range */
+		return XCPUINFO_ERROR;
+	}
+
+	return XCPUINFO_SUCCESS;
+}
+
+
+/*
+ * allocate and build a range of ids using an input map
+ * having printable element set to 1
+ */
+static int
+_map_to_range(uint16_t *map,uint16_t map_size,char** prange)
+{
+	size_t len;
+	int num_fl=0;
+	int con_fl=0;
+
+	char id[12];
+	char *str;
+
+	uint16_t start=0,end=0,i;
+
+	str = xstrdup("");
+	for ( i = 0 ; i < map_size ; i++ ) {
+
+		if ( map[i] ) {
+			num_fl=1;
+			end=i;
+			if ( !con_fl ) {
+				start=end;
+				con_fl=1;
+			}
+		}
+		else if ( num_fl ) {
+			if ( start < end ) {
+				sprintf(id,"%u-%u,",start,end);
+				xstrcat(str,id);
+			}
+			else {
+				sprintf(id,"%u,",start);
+				xstrcat(str,id);
+			}
+			con_fl = num_fl = 0;
+		}
+	}
+	if ( num_fl ) {
+		if ( start < end ) {
+			sprintf(id,"%u-%u,",start,end);
+			xstrcat(str,id);
+		}
+		else {
+			sprintf(id,"%u,",start);
+			xstrcat(str,id);
+		}
+	}
+
+	len = strlen(str);
+	if ( len > 0 ) {
+		str[len-1]='\0';
+	}
+	else {
+		xfree(str);
+		return XCPUINFO_ERROR;
+	}
+
+	if ( prange != NULL )
+		*prange = str;
+	else
+		xfree(str);
+
+	return XCPUINFO_SUCCESS;
+}
+
+/*
+ * convert a range into an other one according to
+ * a modus operandi being 0 or 1 for abstract to machine
+ * or machine to abstract representation of cores
+ */
+static int
+_ranges_conv(char* lrange,char** prange,int mode)
+{
+	int fstatus;
+	int i;
+	uint16_t *amap;
+	uint16_t *map;
+	uint16_t *map_out;
+
+	/* init internal data if not already done */
+	if ( xcpuinfo_init() != XCPUINFO_SUCCESS )
+		return XCPUINFO_ERROR;
+
+	if ( mode ) {
+		/* machine to abstract conversion */
+		amap = block_map_inv;
+	}
+	else {
+		/* abstract to machine conversion */
+		amap = block_map;
+	}
+
+	/* allocate map for local work */
+	map = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
+	map_out = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
+
+	/* extract the input map */
+	fstatus = _range_to_map(lrange,map,block_map_size,!mode);
+	if ( fstatus ) {
+		goto exit;
+	}
+
+	/* do the conversion (see src/slurmd/slurmd/get_mach_stat.c) */
+	for( i = 0 ; i < block_map_size ; i++) {
+		if ( map[i] )
+			map_out[amap[i]]=1;
+	}
+
+	/* build the ouput range */
+	fstatus = _map_to_range(map_out,block_map_size,prange);
+
+exit:
+	xfree(map);
+	xfree(map_out);
+	return fstatus;
+}
diff --git a/src/plugins/proctrack/cgroup/xcpuinfo.h b/src/common/xcpuinfo.h
similarity index 57%
rename from src/plugins/proctrack/cgroup/xcpuinfo.h
rename to src/common/xcpuinfo.h
index f8cf8af5a..1e8cdd5ba 100644
--- a/src/plugins/proctrack/cgroup/xcpuinfo.h
+++ b/src/common/xcpuinfo.h
@@ -3,32 +3,32 @@
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  
+ *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
@@ -44,6 +44,12 @@
 #define XCPUINFO_ERROR    1
 #define XCPUINFO_SUCCESS  0
 
+extern int get_procs(uint16_t *procs);
+extern int get_cpuinfo(uint16_t numproc,
+		       uint16_t *sockets, uint16_t *cores, uint16_t *threads,
+		       uint16_t *block_map_size,
+		       uint16_t **block_map, uint16_t **block_map_inv);
+
 /*
  * Initialize xcpuinfo internal data
  *
@@ -90,4 +96,71 @@ int xcpuinfo_abs_to_mac(char* lrange,char** prange);
  */
 int xcpuinfo_mac_to_abs(char* lrange,char** prange);
 
+/*
+ * Use xcpuinfo internal data to convert an abstract range
+ * of cores (slurm internal format) into the equivalent 
+ * map of cores
+ *
+ * range is of the form 0-1,4-5
+ *
+ * on success, the output map must be freed using xfree
+ *
+ * returned values:
+ *  - XCPUINFO_ERROR
+ *  - XCPUINFO_SUCCESS
+ */
+int xcpuinfo_abs_to_map(char* lrange,uint16_t **map,uint16_t *map_size);
+
+/*
+ * Use xcpuinfo internal data to convert a machine range
+ * of cores into the equivalent map of cores
+ *
+ * range is of the form 0-1,4-5
+ *
+ * on success, the output map must be freed using xfree
+ *
+ * returned values:
+ *  - XCPUINFO_ERROR
+ *  - XCPUINFO_SUCCESS
+ */
+int xcpuinfo_mac_to_map(char* lrange,uint16_t **map,uint16_t *map_size);
+
+/*
+ * Use xcpuinfo internal data to convert a machine map
+ * of cores into the equivalent machine range of cores
+ *
+ * on success, the output map must be freed using xfree
+ *
+ * returned values:
+ *  - XCPUINFO_ERROR
+ *  - XCPUINFO_SUCCESS
+ */
+int xcpuinfo_map_to_mac(uint16_t *map,uint16_t map_size,char** range);
+
+/*
+ * Use xcpuinfo internal data to convert an abstract map of cores
+ * into the equivalent machine map of cores
+ *
+ * on success, the output map must be freed using xfree
+ *
+ * returned values:
+ *  - XCPUINFO_ERROR
+ *  - XCPUINFO_SUCCESS
+ */
+int xcpuinfo_absmap_to_macmap(uint16_t *amap,uint16_t amap_size,
+			      uint16_t **bmap,uint16_t *bmap_size);
+
+/*
+ * Use xcpuinfo internal data to convert a machine map of cores
+ * into the equivalent abstract map of cores
+ *
+ * on success, the output map must be freed using xfree
+ *
+ * returned values:
+ *  - XCPUINFO_ERROR
+ *  - XCPUINFO_SUCCESS
+ */
+int xcpuinfo_macmap_to_absmap(uint16_t *amap,uint16_t amap_size,
+			      uint16_t **bmap,uint16_t *bmap_size);
+
 #endif
diff --git a/src/common/xmalloc.c b/src/common/xmalloc.c
index 49e111125..38b6d85f4 100644
--- a/src/common/xmalloc.c
+++ b/src/common/xmalloc.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xmalloc.h b/src/common/xmalloc.h
index 2db328a20..de0d00751 100644
--- a/src/common/xmalloc.h
+++ b/src/common/xmalloc.h
@@ -12,7 +12,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xsignal.c b/src/common/xsignal.c
index 3edb13c24..c665bb743 100644
--- a/src/common/xsignal.c
+++ b/src/common/xsignal.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,7 @@
 #include <errno.h>
 #include <signal.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/log.h"
 #include "src/common/macros.h"
diff --git a/src/common/xsignal.h b/src/common/xsignal.h
index bd760fd6e..37d4a88df 100644
--- a/src/common/xsignal.h
+++ b/src/common/xsignal.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/common/xstring.c b/src/common/xstring.c
index ed937a06d..8c17fe632 100644
--- a/src/common/xstring.c
+++ b/src/common/xstring.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,8 +57,9 @@
 
 #include <stdarg.h>
 #include <ctype.h>
+#include <time.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/macros.h"
 #include "src/common/strlcpy.h"
@@ -325,8 +326,7 @@ char *xstrdup_printf(const char *fmt, ...)
  */
 char * xstrndup(const char *str, size_t n)
 {
-	size_t siz,
-	       rsiz;
+	size_t siz;
 	char   *result;
 
 	if (str == NULL)
@@ -338,7 +338,7 @@ char * xstrndup(const char *str, size_t n)
 	siz++;
 	result = (char *)xmalloc(siz);
 
-	rsiz = strlcpy(result, str, siz);
+	(void) strlcpy(result, str, siz);
 
 	return result;
 }
@@ -469,7 +469,7 @@ bool xstring_is_whitespace(const char *str)
 
 	len = strlen(str);
 	for (i = 0; i < len; i++) {
-		if (!isspace(str[i])) {
+		if (!isspace((int)str[i])) {
 			return false;
 		}
 	}
@@ -485,7 +485,7 @@ char *xstrtolower(char *str)
 	if(str) {
 		int j = 0;
 		while(str[j]) {
-			str[j] = tolower(str[j]);
+			str[j] = tolower((int)str[j]);
 			j++;
 		}
 	}
diff --git a/src/common/xstring.h b/src/common/xstring.h
index ef52c6757..fd2c50222 100644
--- a/src/common/xstring.h
+++ b/src/common/xstring.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/database/Makefile.in b/src/database/Makefile.in
index 74c4dd819..b71354035 100644
--- a/src/database/Makefile.in
+++ b/src/database/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -143,7 +145,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -180,6 +185,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -237,6 +243,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -272,6 +279,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 80ed86278..d165e8230 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  mysql_common.c - common functions for the the mysql storage plugin.
+ *  mysql_common.c - common functions for the mysql storage plugin.
  *****************************************************************************
  *
  *  Copyright (C) 2004-2007 The Regents of the University of California.
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/database/mysql_common.h b/src/database/mysql_common.h
index b4cc95ba0..eb600aadd 100644
--- a/src/database/mysql_common.h
+++ b/src/database/mysql_common.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  mysql_common.h - common functions for the the mysql storage plugin.
+ *  mysql_common.h - common functions for the mysql storage plugin.
  *****************************************************************************
  *
  *  Copyright (C) 2004-2007 The Regents of the University of California.
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,7 +57,7 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/list.h"
 #include "src/common/xstring.h"
 #include <mysql.h>
diff --git a/src/database/pgsql_common.c b/src/database/pgsql_common.c
index 9223c89d2..f46bf86f3 100644
--- a/src/database/pgsql_common.c
+++ b/src/database/pgsql_common.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  pgsql_common.c - common functions for the the pgsql storage plugin.
+ *  pgsql_common.c - common functions for the pgsql storage plugin.
  *****************************************************************************
  *
  *  Copyright (C) 2004-2007 The Regents of the University of California.
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/database/pgsql_common.h b/src/database/pgsql_common.h
index 34194edf7..e21500af3 100644
--- a/src/database/pgsql_common.h
+++ b/src/database/pgsql_common.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  pgsql_common.h - common functions for the the pgsql storage plugin.
+ *  pgsql_common.h - common functions for the pgsql storage plugin.
  *****************************************************************************
  *
  *  Copyright (C) 2004-2007 The Regents of the University of California.
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/common/xstring.h"
 #include <libpq-fe.h>
diff --git a/src/db_api/Makefile.am b/src/db_api/Makefile.am
index 99b5958f1..4b392b532 100644
--- a/src/db_api/Makefile.am
+++ b/src/db_api/Makefile.am
@@ -51,7 +51,8 @@ BUILT_SOURCES = $(VERSION_SCRIPT) libslurmdb.la
 # and libslurmdb in the same program because of extra symbols this is needed.
 # libslurmdb.o are for convenience, and it is not installed.
 noinst_PROGRAMS = libslurmdb.o
-
+# This is needed if compiling on windows
+EXEEXT=
 
 libslurmdb_la_SOURCES =	\
 	account_functions.c \
diff --git a/src/db_api/Makefile.in b/src/db_api/Makefile.in
index 1e5ec642e..2ddd2a3ec 100644
--- a/src/db_api/Makefile.in
+++ b/src/db_api/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -152,7 +154,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -178,7 +183,8 @@ ECHO_N = @ECHO_N@
 ECHO_T = @ECHO_T@
 EGREP = @EGREP@
 ELAN_LIBS = @ELAN_LIBS@
-EXEEXT = @EXEEXT@
+# This is needed if compiling on windows
+EXEEXT = 
 FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
 FGREP = @FGREP@
 GREP = @GREP@
@@ -189,6 +195,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -246,6 +253,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -281,6 +289,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/db_api/account_functions.c b/src/db_api/account_functions.c
index 9d8b020f5..6f7037b72 100644
--- a/src/db_api/account_functions.c
+++ b/src/db_api/account_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/archive_functions.c b/src/db_api/archive_functions.c
index 585321b2a..775662318 100644
--- a/src/db_api/archive_functions.c
+++ b/src/db_api/archive_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/assoc_functions.c b/src/db_api/assoc_functions.c
index 1323738a5..fac2588f4 100644
--- a/src/db_api/assoc_functions.c
+++ b/src/db_api/assoc_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/cluster_functions.c b/src/db_api/cluster_functions.c
index a9f401301..fce9d0310 100644
--- a/src/db_api/cluster_functions.c
+++ b/src/db_api/cluster_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/cluster_report_functions.c b/src/db_api/cluster_report_functions.c
index 790cd5fc5..7330bc0bf 100644
--- a/src/db_api/cluster_report_functions.c
+++ b/src/db_api/cluster_report_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,8 +41,8 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurmdb_defs.h"
 #include "src/common/slurm_accounting_storage.h"
diff --git a/src/db_api/connection_functions.c b/src/db_api/connection_functions.c
index 2e7ae1e78..c72edce0e 100644
--- a/src/db_api/connection_functions.c
+++ b/src/db_api/connection_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 /*
diff --git a/src/db_api/coord_functions.c b/src/db_api/coord_functions.c
index 2a148ee3f..bc0544f06 100644
--- a/src/db_api/coord_functions.c
+++ b/src/db_api/coord_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/extra_get_functions.c b/src/db_api/extra_get_functions.c
index bd256d973..c87166b52 100644
--- a/src/db_api/extra_get_functions.c
+++ b/src/db_api/extra_get_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,10 +42,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/job_report_functions.c b/src/db_api/job_report_functions.c
index 8d702571c..aa540f45d 100644
--- a/src/db_api/job_report_functions.c
+++ b/src/db_api/job_report_functions.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,10 +42,9 @@
 
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/xstring.h"
diff --git a/src/db_api/qos_functions.c b/src/db_api/qos_functions.c
index 36c9a092d..fcea72de1 100644
--- a/src/db_api/qos_functions.c
+++ b/src/db_api/qos_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/resv_report_functions.c b/src/db_api/resv_report_functions.c
index 7ab75e842..7e76cb02a 100644
--- a/src/db_api/resv_report_functions.c
+++ b/src/db_api/resv_report_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,9 +41,8 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
diff --git a/src/db_api/usage_functions.c b/src/db_api/usage_functions.c
index 4f9471107..7b122c503 100644
--- a/src/db_api/usage_functions.c
+++ b/src/db_api/usage_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/user_functions.c b/src/db_api/user_functions.c
index 65bae5636..75f1702a2 100644
--- a/src/db_api/user_functions.c
+++ b/src/db_api/user_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/db_api/user_report_functions.c b/src/db_api/user_report_functions.c
index a90da09cf..9083949bc 100644
--- a/src/db_api/user_report_functions.c
+++ b/src/db_api/user_report_functions.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,10 +40,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurmdb_defs.h"
 #include "src/common/slurm_accounting_storage.h"
diff --git a/src/db_api/wckey_functions.c b/src/db_api/wckey_functions.c
index 8d5d0e899..e02658d66 100644
--- a/src/db_api/wckey_functions.c
+++ b/src/db_api/wckey_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/slurm_accounting_storage.h"
 
diff --git a/src/plugins/Makefile.in b/src/plugins/Makefile.in
index b72d9e206..b2d5ead51 100644
--- a/src/plugins/Makefile.in
+++ b/src/plugins/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/Makefile.in b/src/plugins/accounting_storage/Makefile.in
index bf44f179a..ea02d16f9 100644
--- a/src/plugins/accounting_storage/Makefile.in
+++ b/src/plugins/accounting_storage/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/common/Makefile.in b/src/plugins/accounting_storage/common/Makefile.in
index 47fff3d36..dab8409bf 100644
--- a/src/plugins/accounting_storage/common/Makefile.in
+++ b/src/plugins/accounting_storage/common/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -114,7 +116,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -151,6 +156,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -208,6 +214,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -243,6 +250,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/common/common_as.c b/src/plugins/accounting_storage/common/common_as.c
index 027685a85..21beb505e 100644
--- a/src/plugins/accounting_storage/common/common_as.c
+++ b/src/plugins/accounting_storage/common/common_as.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -205,10 +205,14 @@ extern int addto_update_list(List update_list, slurmdb_update_type_t type,
 			qos->max_cpu_run_mins_pu = (uint64_t)INFINITE;
 		if(qos->max_cpus_pj == NO_VAL)
 			qos->max_cpus_pj = INFINITE;
+		if(qos->max_cpus_pu == NO_VAL)
+			qos->max_cpus_pu = INFINITE;
 		if(qos->max_jobs_pu == NO_VAL)
 			qos->max_jobs_pu = INFINITE;
 		if(qos->max_nodes_pj == NO_VAL)
 			qos->max_nodes_pj = INFINITE;
+		if(qos->max_nodes_pu == NO_VAL)
+			qos->max_nodes_pu = INFINITE;
 		if(qos->max_submit_jobs_pu == NO_VAL)
 			qos->max_submit_jobs_pu = INFINITE;
 		if(qos->max_wall_pj == NO_VAL)
@@ -626,16 +630,10 @@ extern int archive_run_script(slurmdb_archive_cond_t *arch_cond,
 		   char *cluster_name, time_t last_submit)
 {
 	char * args[] = {arch_cond->archive_script, NULL};
-	const char *tmpdir;
 	struct stat st;
 	char **env = NULL;
 	time_t curr_end;
 
-#ifdef _PATH_TMP
-	tmpdir = _PATH_TMP;
-#else
-	tmpdir = "/tmp";
-#endif
 	if (stat(arch_cond->archive_script, &st) < 0) {
 		errno = errno;
 		error("archive_run_script: failed to stat %s: %m",
@@ -827,11 +825,12 @@ extern int archive_write_file(Buf buffer, char *cluster_name,
 	if (rc)
 		(void) unlink(new_file);
 	else {			/* file shuffle */
-		int ign;	/* avoid warning */
 		(void) unlink(old_file);
-		ign =  link(reg_file, old_file);
+		if (link(reg_file, old_file))
+			error("Link(%s, %s): %m", reg_file, old_file);
 		(void) unlink(reg_file);
-		ign =  link(new_file, reg_file);
+		if (link(new_file, reg_file))
+			error("Link(%s, %s): %m", new_file, reg_file);
 		(void) unlink(new_file);
 	}
 	xfree(old_file);
diff --git a/src/plugins/accounting_storage/common/common_as.h b/src/plugins/accounting_storage/common/common_as.h
index 641480975..8da209dcb 100644
--- a/src/plugins/accounting_storage/common/common_as.h
+++ b/src/plugins/accounting_storage/common/common_as.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/filetxt/Makefile.in b/src/plugins/accounting_storage/filetxt/Makefile.in
index 462e62436..81cf73dc7 100644
--- a/src/plugins/accounting_storage/filetxt/Makefile.in
+++ b/src/plugins/accounting_storage/filetxt/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -141,7 +143,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -178,6 +183,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -235,6 +241,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -270,6 +277,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index 7fb49e101..4f368d29c 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -533,6 +533,19 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn, uint16_t port)
 	return SLURM_SUCCESS;
 }
 
+extern int clusteracct_storage_p_register_disconn_ctld(
+	void *db_conn, char *control_host)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int clusteracct_storage_p_fini_ctld(void *db_conn,
+					   char *ip, uint16_t port,
+					   char *cluster_nodes)
+{
+	return SLURM_SUCCESS;
+}
+
 extern int clusteracct_storage_p_cluster_cpus(void *db_conn,
 					      char *cluster_nodes,
 					      uint32_t cpus,
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index e95096036..a8b9e2c8f 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,6 +46,7 @@
 
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
+#include "src/common/list.h"
 #include "filetxt_jobacct_process.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmdbd/read_config.h"
@@ -1074,12 +1075,15 @@ extern List filetxt_jobacct_process_get_jobs(slurmdb_job_cond_t *job_cond)
 				if (fptr)
 					*fptr = 0;
 				break;
-			} else
+			} else {
 				*fptr++ = 0;
+			}
 		}
-		f[++i] = 0;
+		if (i < MAX_RECORD_FIELDS)
+			i++;
+		f[i] = 0;
 
-		if(i < HEADER_LENGTH) {
+		if (i < HEADER_LENGTH) {
 			continue;
 		}
 
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
index 310955612..d177375dc 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/Makefile.in b/src/plugins/accounting_storage/mysql/Makefile.in
index 1e8beca3d..e13d6c434 100644
--- a/src/plugins/accounting_storage/mysql/Makefile.in
+++ b/src/plugins/accounting_storage/mysql/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -191,7 +193,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -228,6 +233,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -285,6 +291,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -320,6 +327,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 1d923fc6e..97898014d 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -5,11 +5,12 @@
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2011 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
+ *  Written by Danny Auble <da@schedmd.com, da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -467,6 +468,7 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		{ "name", "tinytext not null" },
 		{ "control_host", "tinytext not null default ''" },
 		{ "control_port", "int unsigned not null default 0" },
+		{ "last_port", "int unsigned not null default 0" },
 		{ "rpc_version", "smallint unsigned not null default 0" },
 		{ "classification", "smallint unsigned default 0" },
 		{ "dimensions", "smallint unsigned default 1" },
@@ -483,10 +485,13 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		{ "name", "tinytext not null" },
 		{ "description", "text" },
 		{ "flags", "int unsigned default 0" },
+		{ "grace_time", "int unsigned default NULL" },
 		{ "max_jobs_per_user", "int default NULL" },
 		{ "max_submit_jobs_per_user", "int default NULL" },
 		{ "max_cpus_per_job", "int default NULL" },
+		{ "max_cpus_per_user", "int default NULL" },
 		{ "max_nodes_per_job", "int default NULL" },
+		{ "max_nodes_per_user", "int default NULL" },
 		{ "max_wall_duration_per_job", "int default NULL" },
 		{ "max_cpu_mins_per_job", "bigint default NULL" },
 		{ "max_cpu_run_mins_per_user", "bigint default NULL" },
@@ -2145,7 +2150,8 @@ extern void *acct_storage_p_get_connection(const slurm_trigger_callbacks_t *cb,
 			error("couldn't set sql_mode");
 			acct_storage_p_close_connection(&mysql_conn);
 			errno = rc;
-		}
+		} else
+			errno = SLURM_SUCCESS;
 	}
 
 	return (void *)mysql_conn;
@@ -2207,7 +2213,6 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 	}
 
 	if (commit && list_count(mysql_conn->update_list)) {
-		int rc;
 		char *query = NULL;
 		MYSQL_RES *result = NULL;
 		MYSQL_ROW row;
@@ -2227,7 +2232,7 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 		}
 		xfree(query);
 		while ((row = mysql_fetch_row(result))) {
-			rc = slurmdb_send_accounting_update(
+			(void) slurmdb_send_accounting_update(
 				mysql_conn->update_list,
 				row[2], row[0],
 				slurm_atoul(row[1]),
@@ -2235,7 +2240,7 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 		}
 		mysql_free_result(result);
 	skip:
-		rc = assoc_mgr_update(mysql_conn->update_list);
+		(void) assoc_mgr_update(mysql_conn->update_list);
 
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
 		itr2 = list_iterator_create(as_mysql_cluster_list);
@@ -2581,6 +2586,72 @@ extern int clusteracct_storage_p_register_ctld(mysql_conn_t *mysql_conn,
 		mysql_conn, mysql_conn->cluster_name, port);
 }
 
+extern uint16_t clusteracct_storage_p_register_disconn_ctld(
+	mysql_conn_t *mysql_conn, char *control_host)
+{
+	uint16_t control_port = 0;
+	char *query = NULL;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+
+	if (!mysql_conn->cluster_name) {
+		error("%s:%d no cluster name", THIS_FILE, __LINE__);
+		return control_port;
+	} else if (!control_host) {
+		error("%s:%d no control host for cluster %s",
+		      THIS_FILE, __LINE__, mysql_conn->cluster_name);
+		return control_port;
+	}
+
+	query = xstrdup_printf("select last_port from %s where name='%s';",
+			       cluster_table, mysql_conn->cluster_name);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		error("register_disconn_ctld: no result given for cluster %s",
+		      mysql_conn->cluster_name);
+		return control_port;
+	}
+	xfree(query);
+
+	if ((row = mysql_fetch_row(result))) {
+		control_port = slurm_atoul(row[0]);
+		/* If there is ever a network issue talking to the DBD, and
+		   both the DBD and the ctrl stay up when the ctld goes to
+		   talk to the DBD again it may not re-register (<=2.2).
+		   Since the slurmctld didn't go down we can presume the port
+		   is still the same and just use the last information as the
+		   information we should use and go along our merry way.
+		*/
+		query = xstrdup_printf(
+			"update %s set control_host='%s', "
+			"control_port=%u where name='%s';",
+			cluster_table, control_host, control_port,
+			mysql_conn->cluster_name);
+		debug3("%d(%s:%d) query\n%s",
+		       mysql_conn->conn, THIS_FILE, __LINE__, query);
+		if (mysql_db_query(mysql_conn, query) != SLURM_SUCCESS)
+			control_port = 0;
+		xfree(query);
+	}
+	mysql_free_result(result);
+
+	return control_port;
+}
+
+extern int clusteracct_storage_p_fini_ctld(mysql_conn_t *mysql_conn,
+					   slurmdb_cluster_rec_t *cluster_rec)
+{
+	if (!cluster_rec || (!mysql_conn->cluster_name && !cluster_rec->name)) {
+		error("%s:%d no cluster name", THIS_FILE, __LINE__);
+		return SLURM_ERROR;
+	}
+
+	if (!cluster_rec->name)
+		cluster_rec->name = mysql_conn->cluster_name;
+
+	return as_mysql_fini_ctld(mysql_conn, cluster_rec);
+}
+
 extern int clusteracct_storage_p_cluster_cpus(mysql_conn_t *mysql_conn,
 					      char *cluster_nodes,
 					      uint32_t cpus,
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
index 962211080..0e0fa9c4f 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,6 +60,7 @@
 #define	debug5			slurm_debug5
 
 #include "src/common/assoc_mgr.h"
+#include "src/common/macros.h"
 #include "src/common/slurmdbd_defs.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/uid.h"
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_acct.c b/src/plugins/accounting_storage/mysql/as_mysql_acct.c
index 6065f5161..f4aae5c9b 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_acct.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_acct.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -115,6 +115,7 @@ static int _get_account_coords(mysql_conn_t *mysql_conn,
 		coord->name = xstrdup(row[0]);
 		coord->direct = 0;
 	}
+	mysql_free_result(result);
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_acct.h b/src/plugins/accounting_storage/mysql/as_mysql_acct.h
index 187cbea5b..d4c27037d 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_acct.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_acct.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_archive.c b/src/plugins/accounting_storage/mysql/as_mysql_archive.c
index 2e7987841..2c81e597e 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_archive.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_archive.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_archive.h b/src/plugins/accounting_storage/mysql/as_mysql_archive.h
index fe5c3bbed..03de8d496 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_archive.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_archive.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
index 3b696b704..64b531ac5 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -2379,15 +2379,14 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	int rc = SLURM_SUCCESS;
 	int i=0;
 	slurmdb_association_rec_t *object = NULL;
-	char *cols = NULL, *vals = NULL, *txn_query = NULL,
-		*extra = NULL, *query = NULL, *update = NULL, *tmp_extra = NULL;
+	char *cols = NULL, *vals = NULL, *txn_query = NULL;
+	char *extra = NULL, *query = NULL, *update = NULL, *tmp_extra = NULL;
 	char *parent = NULL;
 	time_t now = time(NULL);
 	char *user_name = NULL;
 	char *tmp_char = NULL;
 	int assoc_id = 0;
 	int incr = 0, my_left = 0, my_par_id = 0;
-	int affect_rows = 0;
 	int moved_parent = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -2657,7 +2656,6 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 				object->rgt = rgt;
 			}
 
-			affect_rows = 2;
 			xstrfmtcat(query,
 				   "update \"%s_%s\" set deleted=0, "
 				   "id_assoc=LAST_INSERT_ID(id_assoc)%s %s;",
@@ -2682,7 +2680,7 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 		 * the assoc_id will already be set
 		 */
 		if (!assoc_id) {
-			affect_rows = last_affected_rows(mysql_conn);
+			(void) last_affected_rows(mysql_conn);
 			assoc_id = mysql_insert_id(mysql_conn->db_conn);
 			//info("last id was %d", assoc_id);
 		}
@@ -2968,7 +2966,7 @@ extern List as_mysql_modify_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	int rc = SLURM_SUCCESS;
 	char *object = NULL;
 	char *vals = NULL, *extra = NULL, *query = NULL;
-	int set = 0, i = 0;
+	int i = 0;
 	bool is_admin=0, same_user=0;
 	MYSQL_RES *result = NULL;
 	slurmdb_user_rec_t user;
@@ -3028,7 +3026,7 @@ is_same_user:
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	set = _setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void) _setup_association_cond_limits(assoc_cond, prefix, &extra);
 
 	/* This needs to be here to make sure we only modify the
 	   correct set of associations The first clause was already
@@ -3132,7 +3130,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	int rc = SLURM_SUCCESS;
 	char *object = NULL, *cluster_name = NULL;
 	char *extra = NULL, *query = NULL, *name_char = NULL;
-	int set = 0, i = 0, is_admin=0;
+	int i = 0, is_admin = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	slurmdb_user_rec_t user;
@@ -3165,7 +3163,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	set = _setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void)_setup_association_cond_limits(assoc_cond, prefix, &extra);
 
 	xstrcat(object, rassoc_req_inx[0]);
 	for(i=1; i<RASSOC_COUNT; i++)
@@ -3279,7 +3277,6 @@ extern List as_mysql_get_assocs(mysql_conn_t *mysql_conn, uid_t uid,
 	char *tmp = NULL;
 	List assoc_list = NULL;
 	ListIterator itr = NULL;
-	int set = 0;
 	int i=0, is_admin=1;
 	uint16_t private_data = 0;
 	slurmdb_user_rec_t user;
@@ -3315,7 +3312,7 @@ extern List as_mysql_get_assocs(mysql_conn_t *mysql_conn, uid_t uid,
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	set = _setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void) _setup_association_cond_limits(assoc_cond, prefix, &extra);
 
 	if (assoc_cond->cluster_list && list_count(assoc_cond->cluster_list))
 		use_cluster_list = assoc_cond->cluster_list;
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_assoc.h b/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
index 2b4d25bc5..5174f6829 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_cluster.c b/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
index 4fea7753f..1fc7a8433 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -353,7 +353,8 @@ extern List as_mysql_modify_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	if (cluster->control_port) {
-		xstrfmtcat(vals, ", control_port=%u", cluster->control_port);
+		xstrfmtcat(vals, ", control_port=%u, last_port=%u",
+			   cluster->control_port, cluster->control_port);
 		set++;
 		clust_reg = true;
 	}
@@ -1119,6 +1120,7 @@ extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
 	char hostname[255];
 	time_t now = time(NULL);
 	uint32_t flags = slurmdb_setup_cluster_flags();
+	int rc = SLURM_SUCCESS;
 
 	if (slurmdbd_conf)
 		fatal("clusteracct_storage_g_register_ctld "
@@ -1143,9 +1145,10 @@ extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
 
 	query = xstrdup_printf(
 		"update %s set deleted=0, mod_time=%ld, "
-		"control_host='%s', control_port=%u, rpc_version=%d, "
-		"dimensions=%d, flags=%u, plugin_id_select=%d where name='%s';",
-		cluster_table, now, address, port, SLURMDBD_VERSION,
+		"control_host='%s', control_port=%u, last_port=%u, "
+		"rpc_version=%d, dimensions=%d, flags=%u, "
+		"plugin_id_select=%d where name='%s';",
+		cluster_table, now, address, port, port, SLURMDBD_VERSION,
 		SYSTEM_DIMENSIONS, flags, select_get_plugin_id(), cluster);
 	xstrfmtcat(query,
 		   "insert into %s "
@@ -1159,7 +1162,79 @@ extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
 	debug3("%d(%s:%d) query\n%s",
 	       mysql_conn->conn, THIS_FILE, __LINE__, query);
 
-	return mysql_db_query(mysql_conn, query);
+	rc = mysql_db_query(mysql_conn, query);
+	xfree(query);
+	return rc;
+}
+
+extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
+			      slurmdb_cluster_rec_t *cluster_rec)
+{
+	int rc = SLURM_SUCCESS;
+	time_t now = time(NULL);
+	char *query = NULL;
+
+	if (check_connection(mysql_conn) != SLURM_SUCCESS)
+		return ESLURM_DB_CONNECTION;
+
+	/* Here we need to check make sure we are updating the entry
+	   correctly just incase the backup has already gained
+	   control.  If we check the ip and port it is a pretty safe
+	   bet we have the right ctld.
+	*/
+	query = xstrdup_printf(
+		"update %s set mod_time=%ld, control_host='', "
+		"control_port=0 where name='%s' && "
+		"control_host='%s' && control_port=%u;",
+		cluster_table, now, cluster_rec->name,
+		cluster_rec->control_host, cluster_rec->control_port);
+	debug3("%d(%s:%d) query\n%s",
+	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	rc = mysql_db_query(mysql_conn, query);
+	xfree(query);
+
+	if (rc != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+	if (!last_affected_rows(mysql_conn)
+	    || (slurmdbd_conf && !slurmdbd_conf->track_ctld))
+		return rc;
+
+	/* If cpus is 0 we can get the current number of cpus by
+	   sending 0 for the cpus param in the as_mysql_cluster_cpus
+	   function.
+	*/
+	if (!cluster_rec->cpu_count) {
+		cluster_rec->cpu_count = as_mysql_cluster_cpus(
+			mysql_conn, cluster_rec->control_host, 0, now);
+	}
+
+	/* Since as_mysql_cluster_cpus could change the
+	   last_affected_rows we can't group this with the above
+	   return.
+	*/
+	if (!cluster_rec->cpu_count)
+		return rc;
+
+	/* If we affected things we need to now drain the nodes in the
+	 * cluster.  This is to give better stats on accounting that
+	 * the ctld was gone so no jobs were able to be scheduled.  We
+	 * drain the nodes since the rollup functionality understands
+	 * how to deal with that and running jobs so we don't get bad
+	 * info.
+	 */
+	query = xstrdup_printf(
+		"insert into \"%s_%s\" (cpu_count, state, "
+		"time_start, reason) "
+		"values ('%u', %u, %ld, 'slurmctld disconnect')",
+		cluster_rec->name, event_table,
+		cluster_rec->cpu_count, NODE_STATE_DOWN, (long)now);
+	debug3("%d(%s:%d) query\n%s",
+	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	rc = mysql_db_query(mysql_conn, query);
+	xfree(query);
+
+	return rc;
 }
 
 extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
@@ -1178,10 +1253,9 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 	/* Record the processor count */
 	query = xstrdup_printf(
 		"select cpu_count, cluster_nodes from \"%s_%s\" where "
-		"time_end=0 and node_name='' limit 1",
+		"time_end=0 and node_name='' and state=0 limit 1",
 		mysql_conn->cluster_name, event_table);
-	if (!(result = mysql_db_query_ret(
-		      mysql_conn, query, 0))) {
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
 		if (mysql_errno(mysql_conn->db_conn) == ER_NO_SUCH_TABLE)
 			rc = ESLURM_ACCESS_DENIED;
@@ -1205,10 +1279,21 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 		 * may not be up when we run this in the controller or
 		 * in the slurmdbd.
 		 */
+		if (!cpus) {
+			rc = 0;
+			goto end_it;
+		}
+
 		first = 1;
 		goto add_it;
 	}
 
+	/* If cpus is 0 we want to return the cpu count for this cluster */
+	if (!cpus) {
+		rc = atoi(row[0]);
+		goto end_it;
+	}
+
 	if (slurm_atoul(row[0]) == cpus) {
 		debug3("we have the same cpu count as before for %s, "
 		       "no need to update the database.",
@@ -1226,12 +1311,12 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 					event_table, cluster_nodes);
 				rc = mysql_db_query(mysql_conn, query);
 				xfree(query);
-				goto end_it;
+				goto update_it;
 			} else if (!strcmp(cluster_nodes, row[1])) {
 				debug3("we have the same nodes in the cluster "
 				       "as before no need to "
 				       "update the database.");
-				goto end_it;
+				goto update_it;
 			}
 		} else
 			goto end_it;
@@ -1259,6 +1344,14 @@ add_it:
 		cluster_nodes, cpus, event_time);
 	rc = mysql_db_query(mysql_conn, query);
 	xfree(query);
+update_it:
+	query = xstrdup_printf(
+		"update \"%s_%s\" set time_end=%ld where time_end=0 "
+		"and state=%u and node_name='';",
+		mysql_conn->cluster_name, event_table, event_time,
+		NODE_STATE_DOWN);
+	rc = mysql_db_query(mysql_conn, query);
+	xfree(query);
 end_it:
 	mysql_free_result(result);
 	if (first && rc == SLURM_SUCCESS)
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_cluster.h b/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
index 8495c945f..3b2c6873b 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -69,6 +69,9 @@ extern int as_mysql_node_up(mysql_conn_t *mysql_conn,
 extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
 				  char *cluster, uint16_t port);
 
+extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
+			      slurmdb_cluster_rec_t *cluster_rec);
+
 extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 				 char *cluster_nodes, uint32_t cpus,
 				 time_t event_time);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_convert.c b/src/plugins/accounting_storage/mysql/as_mysql_convert.c
index d9398b150..a13625fd2 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_convert.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_convert.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_convert.h b/src/plugins/accounting_storage/mysql/as_mysql_convert.h
index e4291b18c..99644f73b 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_convert.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_convert.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_job.c b/src/plugins/accounting_storage/mysql/as_mysql_job.c
index d4b4e8d1e..c5a37deaa 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_job.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_job.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -236,33 +236,40 @@ extern int as_mysql_job_start(mysql_conn_t *mysql_conn,
 
 	job_state = job_ptr->job_state;
 
+	if (job_ptr->resize_time) {
+		begin_time  = job_ptr->resize_time;
+		submit_time = job_ptr->resize_time;
+		start_time  = job_ptr->resize_time;
+	} else {
+		begin_time  = job_ptr->details->begin_time;
+		submit_time = job_ptr->details->submit_time;
+		start_time  = job_ptr->start_time;
+	}
+
 	/* Since we need a new db_inx make sure the old db_inx
 	 * removed. This is most likely the only time we are going to
 	 * be notified of the change also so make the state without
 	 * the resize. */
 	if (IS_JOB_RESIZING(job_ptr)) {
 		/* If we have a db_index lets end the previous record. */
-		if (job_ptr->db_index)
-			as_mysql_job_complete(mysql_conn, job_ptr);
-		else
+		if (!job_ptr->db_index) {
 			error("We don't have a db_index for job %u, "
 			      "this should never happen.", job_ptr->job_id);
+			job_ptr->db_index = _get_db_index(mysql_conn,
+							  submit_time,
+							  job_ptr->job_id,
+							  job_ptr->assoc_id);
+		}
+
+		if (job_ptr->db_index)
+			as_mysql_job_complete(mysql_conn, job_ptr);
+
 		job_state &= (~JOB_RESIZING);
 		job_ptr->db_index = 0;
 	}
 
 	job_state &= JOB_STATE_BASE;
 
-	if (job_ptr->resize_time) {
-		begin_time  = job_ptr->resize_time;
-		submit_time = job_ptr->resize_time;
-		start_time  = job_ptr->resize_time;
-	} else {
-		begin_time  = job_ptr->details->begin_time;
-		submit_time = job_ptr->details->submit_time;
-		start_time  = job_ptr->start_time;
-	}
-
 	/* See what we are hearing about here if no start time. If
 	 * this job latest time is before the last roll up we will
 	 * need to reset it to look at this job. */
@@ -614,8 +621,8 @@ extern List as_mysql_modify_job(mysql_conn_t *mysql_conn, uint32_t uid,
 		list_append(ret_list, object);
 		mysql_free_result(result);
 	} else {
-		errno = SLURM_NO_CHANGE_IN_DATA;
-		debug3("didn't effect anything\n%s", query);
+		errno = ESLURM_INVALID_JOB_ID;
+		debug3("as_mysql_modify_job: Job not found\n%s", query);
 		xfree(vals);
 		xfree(query);
 		mysql_free_result(result);
@@ -718,14 +725,26 @@ extern int as_mysql_job_complete(mysql_conn_t *mysql_conn,
 		}
 	}
 
+	/*
+	 * make sure we handle any quotes that may be in the comment
+	 */
+
 	query = xstrdup_printf("update \"%s_%s\" set "
-			       "time_end=%ld, state=%d, nodelist='%s', "
-			       "derived_ec=%d, exit_code=%d, "
-			       "kill_requid=%d where job_db_inx=%d;",
+			       "time_end=%ld, state=%d, nodelist='%s'",
 			       mysql_conn->cluster_name, job_table,
-			       end_time, job_state, nodes,
-			       job_ptr->derived_ec, job_ptr->exit_code,
-			       job_ptr->requid, job_ptr->db_index);
+			       end_time, job_state, nodes);
+
+	if (job_ptr->derived_ec != NO_VAL)
+		xstrfmtcat(query, ", derived_ec=%u", job_ptr->derived_ec);
+
+	if (job_ptr->comment) {
+		char *comment = slurm_add_slash_to_quotes(job_ptr->comment);
+		xstrfmtcat(query, ", derived_es='%s'", comment);
+		xfree(comment);
+	}
+
+	xstrfmtcat(query, ", exit_code=%d, kill_requid=%d where job_db_inx=%d;",
+		   job_ptr->exit_code, job_ptr->requid, job_ptr->db_index);
 
 	debug3("%d(%s:%d) query\n%s",
 	       mysql_conn->conn, THIS_FILE, __LINE__, query);
@@ -743,10 +762,6 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 	char node_list[BUFFER_SIZE];
 	char *node_inx = NULL, *step_name = NULL;
 	time_t start_time, submit_time;
-
-#ifdef HAVE_BG
-	char *ionodes = NULL;
-#endif
 	char *query = NULL;
 
 	if (!step_ptr->job_ptr->db_index
@@ -790,43 +805,52 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 		snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->gres);
 		nodes = cpus = tasks = 1;
 	} else {
+		char *ionodes = NULL, *temp_nodes = NULL;
 		char temp_bit[BUF_SIZE];
 
 		if (step_ptr->step_node_bitmap) {
 			node_inx = bit_fmt(temp_bit, sizeof(temp_bit),
 					   step_ptr->step_node_bitmap);
 		}
-#ifdef HAVE_BG
-		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
-		select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
-					    SELECT_JOBDATA_IONODES,
-					    &ionodes);
-		if (ionodes) {
-			snprintf(node_list, BUFFER_SIZE,
-				 "%s[%s]", step_ptr->job_ptr->nodes, ionodes);
-			xfree(ionodes);
-		} else
-			snprintf(node_list, BUFFER_SIZE, "%s",
-				 step_ptr->job_ptr->nodes);
+#ifdef HAVE_BG_L_P
+		/* Only L and P use this code */
+		if (step_ptr->job_ptr->details)
+			tasks = cpus = step_ptr->job_ptr->details->min_cpus;
+		else
+			tasks = cpus = step_ptr->job_ptr->cpu_cnt;
 		select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_NODE_CNT,
 					    &nodes);
+		temp_nodes = step_ptr->job_ptr->nodes;
 #else
 		if (!step_ptr->step_layout
 		    || !step_ptr->step_layout->task_cnt) {
 			tasks = cpus = step_ptr->job_ptr->total_cpus;
-			snprintf(node_list, BUFFER_SIZE, "%s",
-				 step_ptr->job_ptr->nodes);
 			nodes = step_ptr->job_ptr->total_nodes;
+			temp_nodes = step_ptr->job_ptr->nodes;
 		} else {
 			cpus = step_ptr->cpu_count;
 			tasks = step_ptr->step_layout->task_cnt;
+#ifdef HAVE_BGQ
+			select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+						    SELECT_JOBDATA_NODE_CNT,
+						    &nodes);
+#else
 			nodes = step_ptr->step_layout->node_cnt;
+#endif
 			task_dist = step_ptr->step_layout->task_dist;
-			snprintf(node_list, BUFFER_SIZE, "%s",
-				 step_ptr->step_layout->node_list);
+			temp_nodes = step_ptr->step_layout->node_list;
 		}
 #endif
+		select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+					    SELECT_JOBDATA_IONODES,
+					    &ionodes);
+		if (ionodes) {
+			snprintf(node_list, BUFFER_SIZE, "%s[%s]",
+				 temp_nodes, ionodes);
+			xfree(ionodes);
+		} else
+			snprintf(node_list, BUFFER_SIZE, "%s", temp_nodes);
 	}
 
 	if (!step_ptr->job_ptr->db_index) {
@@ -884,7 +908,7 @@ extern int as_mysql_step_complete(mysql_conn_t *mysql_conn,
 	time_t now;
 	int elapsed;
 	int comp_status;
-	int cpus = 0, tasks = 0;
+	int cpus = 0;
 	struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct;
 	struct jobacctinfo dummy_jobacct;
 	double ave_vsize = 0, ave_rss = 0, ave_pages = 0;
@@ -923,23 +947,20 @@ extern int as_mysql_step_complete(mysql_conn_t *mysql_conn,
 
 	if (slurmdbd_conf) {
 		now = step_ptr->job_ptr->end_time;
-		tasks = step_ptr->job_ptr->details->num_tasks;
 		cpus = step_ptr->cpu_count;
 	} else if (step_ptr->step_id == SLURM_BATCH_SCRIPT) {
 		now = time(NULL);
-		cpus = tasks = 1;
+		cpus = 1;
 	} else {
 		now = time(NULL);
-#ifdef HAVE_BG
-		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
-
+#ifdef HAVE_BG_L_P
+		/* Only L and P use this code */
+		cpus = step_ptr->job_ptr->details->min_cpus;
 #else
 		if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt)
-			tasks = cpus = step_ptr->job_ptr->total_cpus;
-		else {
+			cpus = step_ptr->job_ptr->total_cpus;
+		else
 			cpus = step_ptr->cpu_count;
-			tasks = step_ptr->step_layout->task_cnt;
-		}
 #endif
 	}
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_job.h b/src/plugins/accounting_storage/mysql/as_mysql_job.h
index 6267478af..2de6601fe 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_job.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_job.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
index 6befc4416..f279b81f6 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -273,6 +273,7 @@ static void _state_time_string(char **extra, uint32_t state,
 	case JOB_FAILED:
 	case JOB_TIMEOUT:
 	case JOB_NODE_FAIL:
+	case JOB_PREEMPTED:
 	default:
 		xstrfmtcat(*extra, "(t1.state='%u' && (t1.time_end && ", state);
 		if (start) {
@@ -327,7 +328,7 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 	int set = 0;
 	char *prefix="t2";
 	int rc = SLURM_SUCCESS;
-	int last_id = -1, curr_id = -1, last_state = -1;
+	int last_id = -1, curr_id = -1;
 	local_cluster_t *curr_cluster = NULL;
 
 	/* This is here to make sure we are looking at only this user
@@ -451,7 +452,6 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 
 		job = slurmdb_create_job_rec();
 		job->state = slurm_atoul(row[JOB_REQ_STATE]);
-		last_state = job->state;
 		if (curr_id == last_id)
 			/* put in reverse so we order by the submit getting
 			   larger which it is given to us in reverse
@@ -830,6 +830,7 @@ extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
 	hostlist_t temp_hl = NULL;
 	hostlist_iterator_t h_itr = NULL;
 	char *query = NULL;
+	int dims = 0;
 
 	if (!job_cond || !job_cond->used_nodes)
 		return NULL;
@@ -842,7 +843,28 @@ extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
 		return NULL;
 	}
 
-	temp_hl = hostlist_create(job_cond->used_nodes);
+	/* get the dimensions of this cluster so we know how to deal
+	   with the hostlists */
+	query = xstrdup_printf("select dimensions from %s where name='%s'",
+			       cluster_table,
+			       (char *)list_peek(job_cond->cluster_list));
+
+	debug4("%d(%s:%d) query\n%s",
+	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		return NULL;
+	}
+	xfree(query);
+
+	if (!(row = mysql_fetch_row(result))) {
+		error("Couldn't get the dimensions of cluster '%s'.",
+		      (char *)list_peek(job_cond->cluster_list));
+		return NULL;
+	}
+	dims = atoi(row[0]);
+
+	temp_hl = hostlist_create_dims(job_cond->used_nodes, dims);
 	if (hostlist_count(temp_hl) <= 0) {
 		error("we didn't get any real hosts to look for.");
 		goto no_hosts;
@@ -879,12 +901,12 @@ extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
 		int loc = 0;
 		local_cluster_t *local_cluster =
 			xmalloc(sizeof(local_cluster_t));
-		local_cluster->hl = hostlist_create(row[0]);
+		local_cluster->hl = hostlist_create_dims(row[0], dims);
 		local_cluster->start = slurm_atoul(row[1]);
 		local_cluster->end   = slurm_atoul(row[2]);
 		local_cluster->asked_bitmap =
 			bit_alloc(hostlist_count(local_cluster->hl));
-		while ((host = hostlist_next(h_itr))) {
+		while ((host = hostlist_next_dims(h_itr, dims))) {
 			if ((loc = hostlist_find(
 				     local_cluster->hl, host)) != -1)
 				bit_set(local_cluster->asked_bitmap, loc);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.h b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.h
index 39f3347a6..47cdd3bf9 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_problems.c b/src/plugins/accounting_storage/mysql/as_mysql_problems.c
index 787c70ba1..df1f2ef24 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_problems.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_problems.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_problems.h b/src/plugins/accounting_storage/mysql/as_mysql_problems.h
index 94c3713a3..2b48911ad 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_problems.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_problems.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_qos.c b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
index 2434d2916..556eb6f70 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_qos.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -112,19 +112,23 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 			qos->grp_cpu_run_mins = (uint64_t)INFINITE;
 		if (qos->max_cpus_pj == NO_VAL)
 			qos->max_cpus_pj = INFINITE;
+		if (qos->max_cpus_pu == NO_VAL)
+			qos->max_cpus_pu = INFINITE;
 		if (qos->max_jobs_pu == NO_VAL)
 			qos->max_jobs_pu = INFINITE;
 		if (qos->max_nodes_pj == NO_VAL)
 			qos->max_nodes_pj = INFINITE;
+		if (qos->max_nodes_pu == NO_VAL)
+			qos->max_nodes_pu = INFINITE;
 		if (qos->max_submit_jobs_pu == NO_VAL)
 			qos->max_submit_jobs_pu = INFINITE;
 		if (qos->max_wall_pj == NO_VAL)
 			qos->max_wall_pj = INFINITE;
 		if (qos->preempt_mode == (uint16_t)NO_VAL)
 			qos->preempt_mode = (uint16_t)INFINITE;
-		if (qos->usage_factor == (double)NO_VAL)
+		if (fuzzy_equal(qos->usage_factor, NO_VAL))
 			qos->usage_factor = (double)INFINITE;
-		if (qos->usage_thres == (double)NO_VAL)
+		if (fuzzy_equal(qos->usage_thres, NO_VAL))
 			qos->usage_thres = (double)INFINITE;
 	}
 
@@ -156,6 +160,17 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		}
 	}
 
+	if (qos->grace_time == INFINITE) {
+		xstrcat(*cols, ", grace_time");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", grace_time=NULL");
+	} else if ((qos->grace_time != NO_VAL) &&
+		   ((int32_t)qos->grace_time >= 0)) {
+		xstrcat(*cols, ", grace_time");
+		xstrfmtcat(*vals, ", %u", qos->grace_time);
+		xstrfmtcat(*extra, ", grace_time=%u", qos->grace_time);
+	}
+
 	if (qos->grp_cpu_mins == (uint64_t)INFINITE) {
 		xstrcat(*cols, ", grp_cpu_mins");
 		xstrcat(*vals, ", NULL");
@@ -275,6 +290,17 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", max_cpus_per_job=%u", qos->max_cpus_pj);
 	}
 
+	if (qos->max_cpus_pu == INFINITE) {
+		xstrcat(*cols, ", max_cpus_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_cpus_per_user=NULL");
+	} else if ((qos->max_cpus_pu != NO_VAL)
+		   && ((int32_t)qos->max_cpus_pu >= 0)) {
+		xstrcat(*cols, ", max_cpus_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_cpus_pu);
+		xstrfmtcat(*extra, ", max_cpus_per_user=%u", qos->max_cpus_pu);
+	}
+
 	if (qos->max_jobs_pu == INFINITE) {
 		xstrcat(*cols, ", max_jobs_per_user");
 		xstrcat(*vals, ", NULL");
@@ -298,6 +324,18 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 			   qos->max_nodes_pj);
 	}
 
+	if (qos->max_nodes_pu == INFINITE) {
+		xstrcat(*cols, ", max_nodes_per_user");
+		xstrcat(*vals, ", NULL");
+		xstrcat(*extra, ", max_nodes_per_user=NULL");
+	} else if ((qos->max_nodes_pu != NO_VAL)
+		   && ((int32_t)qos->max_nodes_pu >= 0)) {
+		xstrcat(*cols, ", max_nodes_per_user");
+		xstrfmtcat(*vals, ", %u", qos->max_nodes_pu);
+		xstrfmtcat(*extra, ", max_nodes_per_user=%u",
+			   qos->max_nodes_pu);
+	}
+
 	if (qos->max_submit_jobs_pu == INFINITE) {
 		xstrcat(*cols, ", max_submit_jobs_per_user");
 		xstrcat(*vals, ", NULL");
@@ -385,22 +423,22 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", priority=%u", qos->priority);
 	}
 
-	if (qos->usage_factor == (double)INFINITE) {
+	if (fuzzy_equal(qos->usage_factor, INFINITE)) {
 		xstrcat(*cols, ", usage_factor");
 		xstrcat(*vals, ", 1");
 		xstrcat(*extra, ", usage_factor=1");
-	} else if ((qos->usage_factor != (double)NO_VAL)
+	} else if (!fuzzy_equal(qos->usage_factor, NO_VAL)
 		   && (qos->usage_factor >= 0)) {
 		xstrcat(*cols, ", usage_factor");
 		xstrfmtcat(*vals, ", %f", qos->usage_factor);
 		xstrfmtcat(*extra, ", usage_factor=%f", qos->usage_factor);
 	}
 
-	if (qos->usage_thres == (double)INFINITE) {
+	if (fuzzy_equal(qos->usage_thres, INFINITE)) {
 		xstrcat(*cols, ", usage_thres");
 		xstrcat(*vals, ", NULL");
 		xstrcat(*extra, ", usage_thres=NULL");
-	} else if ((qos->usage_thres != (double)NO_VAL)
+	} else if (!fuzzy_equal(qos->usage_thres, NO_VAL)
 		   && (qos->usage_thres >= 0)) {
 		xstrcat(*cols, ", usage_thres");
 		xstrfmtcat(*vals, ", %f", qos->usage_thres);
@@ -640,6 +678,7 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		qos_rec->flags = qos->flags;
 
 		qos_rec->grp_cpus = qos->grp_cpus;
+		qos_rec->grace_time = qos->grace_time;
 		qos_rec->grp_cpu_mins = qos->grp_cpu_mins;
 		qos_rec->grp_cpu_run_mins = qos->grp_cpu_run_mins;
 		qos_rec->grp_jobs = qos->grp_jobs;
@@ -648,10 +687,12 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		qos_rec->grp_wall = qos->grp_wall;
 
 		qos_rec->max_cpus_pj = qos->max_cpus_pj;
+		qos_rec->max_cpus_pu = qos->max_cpus_pu;
 		qos_rec->max_cpu_mins_pj = qos->max_cpu_mins_pj;
 		qos_rec->max_cpu_run_mins_pu = qos->max_cpu_run_mins_pu;
 		qos_rec->max_jobs_pu  = qos->max_jobs_pu;
 		qos_rec->max_nodes_pj = qos->max_nodes_pj;
+		qos_rec->max_nodes_pu = qos->max_nodes_pu;
 		qos_rec->max_submit_jobs_pu  = qos->max_submit_jobs_pu;
 		qos_rec->max_wall_pj = qos->max_wall_pj;
 
@@ -921,6 +962,7 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"description",
 		"id",
 		"flags",
+		"grace_time",
 		"grp_cpu_mins",
 		"grp_cpu_run_mins",
 		"grp_cpus",
@@ -931,8 +973,10 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"max_cpu_mins_per_job",
 		"max_cpu_run_mins_per_user",
 		"max_cpus_per_job",
+		"max_cpus_per_user",
 		"max_jobs_per_user",
 		"max_nodes_per_job",
+		"max_nodes_per_user",
 		"max_submit_jobs_per_user",
 		"max_wall_duration_per_job",
 		"preempt",
@@ -946,6 +990,7 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_DESC,
 		QOS_REQ_ID,
 		QOS_REQ_FLAGS,
+		QOS_REQ_GRACE,
 		QOS_REQ_GCM,
 		QOS_REQ_GCRM,
 		QOS_REQ_GC,
@@ -956,8 +1001,10 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_MCMPJ,
 		QOS_REQ_MCRM,
 		QOS_REQ_MCPJ,
+		QOS_REQ_MCPU,
 		QOS_REQ_MJPU,
 		QOS_REQ_MNPJ,
+		QOS_REQ_MNPU,
 		QOS_REQ_MSJPU,
 		QOS_REQ_MWPJ,
 		QOS_REQ_PREE,
@@ -1064,6 +1111,11 @@ empty:
 		if (row[QOS_REQ_NAME] && row[QOS_REQ_NAME][0])
 			qos->name = xstrdup(row[QOS_REQ_NAME]);
 
+		if (row[QOS_REQ_GRACE])
+			qos->grace_time = slurm_atoul(row[QOS_REQ_GRACE]);
+		else
+			qos->grace_time = (uint32_t)NO_VAL;
+
 		if (row[QOS_REQ_GCM])
 			qos->grp_cpu_mins = slurm_atoull(row[QOS_REQ_GCM]);
 		else
@@ -1106,6 +1158,10 @@ empty:
 			qos->max_cpus_pj = slurm_atoul(row[QOS_REQ_MCPJ]);
 		else
 			qos->max_cpus_pj = INFINITE;
+		if (row[QOS_REQ_MCPU])
+			qos->max_cpus_pu = slurm_atoul(row[QOS_REQ_MCPU]);
+		else
+			qos->max_cpus_pu = INFINITE;
 		if (row[QOS_REQ_MJPU])
 			qos->max_jobs_pu = slurm_atoul(row[QOS_REQ_MJPU]);
 		else
@@ -1114,6 +1170,10 @@ empty:
 			qos->max_nodes_pj = slurm_atoul(row[QOS_REQ_MNPJ]);
 		else
 			qos->max_nodes_pj = INFINITE;
+		if (row[QOS_REQ_MNPU])
+			qos->max_nodes_pu = slurm_atoul(row[QOS_REQ_MNPU]);
+		else
+			qos->max_nodes_pu = INFINITE;
 		if (row[QOS_REQ_MSJPU])
 			qos->max_submit_jobs_pu =
 				slurm_atoul(row[QOS_REQ_MSJPU]);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_qos.h b/src/plugins/accounting_storage/mysql/as_mysql_qos.h
index e7f174716..4b2cedbad 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_qos.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_qos.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_resv.c b/src/plugins/accounting_storage/mysql/as_mysql_resv.c
index 8df6b955f..902ccc589 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_resv.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_resv.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -471,7 +471,6 @@ extern List as_mysql_get_resvs(mysql_conn_t *mysql_conn, uid_t uid,
 	char *extra = NULL;
 	char *tmp = NULL;
 	List resv_list = NULL;
-	int set = 0;
 	int i=0, is_admin=1;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -542,7 +541,7 @@ extern List as_mysql_get_resvs(mysql_conn_t *mysql_conn, uid_t uid,
 		job_cond.usage_end = resv_cond->time_end;
 	}
 
-	set = _setup_resv_cond_limits(resv_cond, &extra);
+	(void) _setup_resv_cond_limits(resv_cond, &extra);
 
 	with_usage = resv_cond->with_usage;
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_resv.h b/src/plugins/accounting_storage/mysql/as_mysql_resv.h
index f2f0e8604..f0ddae89f 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_resv.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_resv.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
index 416e360a6..c24c2f812 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,7 @@
 
 #include "as_mysql_rollup.h"
 #include "as_mysql_archive.h"
+#include "src/common/parse_time.h"
 
 typedef struct {
 	int id;
@@ -146,6 +147,305 @@ static int _process_purge(mysql_conn_t *mysql_conn,
 	return rc;
 }
 
+static int _process_cluster_usage(mysql_conn_t *mysql_conn,
+				  char *cluster_name,
+				  time_t curr_start, time_t curr_end,
+				  time_t now, local_cluster_usage_t *c_usage)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = NULL;
+	uint64_t total_used;
+	char start_char[20], end_char[20];
+
+	if (!c_usage)
+		return rc;
+	/* Now put the lists into the usage tables */
+
+	/* sanity check to make sure we don't have more
+	   allocated cpus than possible. */
+	if (c_usage->total_time < c_usage->a_cpu) {
+		slurm_make_time_str(&curr_start, start_char,
+				    sizeof(start_char));
+		slurm_make_time_str(&curr_end, end_char,
+				    sizeof(end_char));
+		error("We have more allocated time than is "
+		      "possible (%"PRIu64" > %"PRIu64") for "
+		      "cluster %s(%d) from %s - %s",
+		      c_usage->a_cpu, c_usage->total_time,
+		      cluster_name, c_usage->cpu_count,
+		      start_char, end_char);
+		c_usage->a_cpu = c_usage->total_time;
+	}
+
+	total_used = c_usage->a_cpu + c_usage->d_cpu + c_usage->pd_cpu;
+
+	/* Make sure the total time we care about
+	   doesn't go over the limit */
+	if (c_usage->total_time < total_used) {
+		int64_t overtime;
+
+		slurm_make_time_str(&curr_start, start_char,
+				    sizeof(start_char));
+		slurm_make_time_str(&curr_end, end_char,
+				    sizeof(end_char));
+		error("We have more time than is "
+		      "possible (%"PRIu64"+%"PRIu64"+%"
+		      PRIu64")(%"PRIu64") > %"PRIu64" for "
+		      "cluster %s(%d) from %s - %s",
+		      c_usage->a_cpu, c_usage->d_cpu,
+		      c_usage->pd_cpu, total_used,
+		      c_usage->total_time,
+		      cluster_name, c_usage->cpu_count,
+		      start_char, end_char);
+
+		/* First figure out how much actual down time
+		   we have and then how much
+		   planned down time we have. */
+		overtime = (int64_t)(c_usage->total_time -
+				     (c_usage->a_cpu + c_usage->d_cpu));
+		if (overtime < 0) {
+			c_usage->d_cpu += overtime;
+			if ((int64_t)c_usage->d_cpu < 0)
+				c_usage->d_cpu = 0;
+		}
+
+		overtime = (int64_t)(c_usage->total_time -
+				     (c_usage->a_cpu + c_usage->d_cpu
+				      + c_usage->pd_cpu));
+		if (overtime < 0) {
+			c_usage->pd_cpu += overtime;
+			if ((int64_t)c_usage->pd_cpu < 0)
+				c_usage->pd_cpu = 0;
+		}
+
+		total_used = c_usage->a_cpu +
+			c_usage->d_cpu + c_usage->pd_cpu;
+		/* info("We now have (%"PRIu64"+%"PRIu64"+" */
+		/*      "%"PRIu64")(%"PRIu64") " */
+		/*       "?= %"PRIu64"", */
+		/*       c_usage->a_cpu, c_usage->d_cpu, */
+		/*       c_usage->pd_cpu, total_used, */
+		/*       c_usage->total_time); */
+	}
+
+	c_usage->i_cpu = c_usage->total_time - total_used - c_usage->r_cpu;
+	/* sanity check just to make sure we have a
+	 * legitimate time after we calulated
+	 * idle/reserved time put extra in the over
+	 * commit field
+	 */
+	/* info("%s got idle of %lld", c_usage->name, */
+	/*      (int64_t)c_usage->i_cpu); */
+	if ((int64_t)c_usage->i_cpu < 0) {
+		/* info("got %d %d %d", c_usage->r_cpu, */
+		/*      c_usage->i_cpu, c_usage->o_cpu); */
+		c_usage->r_cpu += (int64_t)c_usage->i_cpu;
+		c_usage->o_cpu -= (int64_t)c_usage->i_cpu;
+		c_usage->i_cpu = 0;
+		if ((int64_t)c_usage->r_cpu < 0)
+			c_usage->r_cpu = 0;
+	}
+
+	/* info("cluster %s(%u) down %"PRIu64" alloc %"PRIu64" " */
+	/*      "resv %"PRIu64" idle %"PRIu64" over %"PRIu64" " */
+	/*      "total= %"PRIu64" ?= %"PRIu64" from %s", */
+	/*      cluster_name, */
+	/*      c_usage->cpu_count, c_usage->d_cpu, c_usage->a_cpu, */
+	/*      c_usage->r_cpu, c_usage->i_cpu, c_usage->o_cpu, */
+	/*      c_usage->d_cpu + c_usage->a_cpu + */
+	/*      c_usage->r_cpu + c_usage->i_cpu, */
+	/*      c_usage->total_time, */
+	/*      ctime(&c_usage->start)); */
+	/* info("to %s", ctime(&c_usage->end)); */
+	query = xstrdup_printf("insert into \"%s_%s\" "
+			       "(creation_time, "
+			       "mod_time, time_start, "
+			       "cpu_count, alloc_cpu_secs, "
+			       "down_cpu_secs, pdown_cpu_secs, "
+			       "idle_cpu_secs, over_cpu_secs, "
+			       "resv_cpu_secs) "
+			       "values (%ld, %ld, %ld, %d, "
+			       "%"PRIu64", %"PRIu64", %"PRIu64", "
+			       "%"PRIu64", %"PRIu64", %"PRIu64")",
+			       cluster_name, cluster_hour_table,
+			       now, now,
+			       c_usage->start,
+			       c_usage->cpu_count,
+			       c_usage->a_cpu, c_usage->d_cpu,
+			       c_usage->pd_cpu, c_usage->i_cpu,
+			       c_usage->o_cpu, c_usage->r_cpu);
+
+	/* Spacing out the inserts here instead of doing them
+	   all at once in the end proves to be faster.  Just FYI
+	   so we don't go testing again and again.
+	*/
+	if (query) {
+		xstrfmtcat(query,
+			   " on duplicate key update "
+			   "mod_time=%ld, cpu_count=VALUES(cpu_count), "
+			   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
+			   "down_cpu_secs=VALUES(down_cpu_secs), "
+			   "pdown_cpu_secs=VALUES(pdown_cpu_secs), "
+			   "idle_cpu_secs=VALUES(idle_cpu_secs), "
+			   "over_cpu_secs=VALUES(over_cpu_secs), "
+			   "resv_cpu_secs=VALUES(resv_cpu_secs)",
+			   now);
+		debug3("%d(%s:%d) query\n%s",
+		       mysql_conn->conn, THIS_FILE, __LINE__, query);
+		rc = mysql_db_query(mysql_conn, query);
+		xfree(query);
+		if (rc != SLURM_SUCCESS)
+			error("Couldn't add cluster hour rollup");
+	}
+
+	return rc;
+}
+
+static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
+						   char *cluster_name,
+						   time_t curr_start,
+						   time_t curr_end,
+						   List cluster_down_list)
+{
+	local_cluster_usage_t *c_usage = NULL;
+	char *query = NULL;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	int i = 0;
+
+	char *event_req_inx[] = {
+		"node_name",
+		"cpu_count",
+		"time_start",
+		"time_end",
+		"state",
+	};
+	char *event_str = NULL;
+	enum {
+		EVENT_REQ_NAME,
+		EVENT_REQ_CPU,
+		EVENT_REQ_START,
+		EVENT_REQ_END,
+		EVENT_REQ_STATE,
+		EVENT_REQ_COUNT
+	};
+
+	xstrfmtcat(event_str, "%s", event_req_inx[i]);
+	for(i=1; i<EVENT_REQ_COUNT; i++) {
+		xstrfmtcat(event_str, ", %s", event_req_inx[i]);
+	}
+
+	/* first get the events during this time.  All that is
+	 * except things with the maintainance flag set in the
+	 * state.  We handle those later with the reservations.
+	 */
+	query = xstrdup_printf("select %s from \"%s_%s\" where "
+			       "!(state & %d) && (time_start < %ld "
+			       "&& (time_end >= %ld "
+			       "|| time_end = 0)) "
+			       "order by node_name, time_start",
+			       event_str, cluster_name, event_table,
+			       NODE_STATE_MAINT,
+			       curr_end, curr_start);
+	xfree(event_str);
+
+	debug3("%d(%s:%d) query\n%s",
+	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		return NULL;
+	}
+	xfree(query);
+
+	while ((row = mysql_fetch_row(result))) {
+		time_t row_start = slurm_atoul(row[EVENT_REQ_START]);
+		time_t row_end = slurm_atoul(row[EVENT_REQ_END]);
+		uint32_t row_cpu = slurm_atoul(row[EVENT_REQ_CPU]);
+		uint16_t state = slurm_atoul(row[EVENT_REQ_STATE]);
+		if (row_start < curr_start)
+			row_start = curr_start;
+
+		if (!row_end || row_end > curr_end)
+			row_end = curr_end;
+
+		/* Don't worry about it if the time is less
+		 * than 1 second.
+		 */
+		if ((row_end - row_start) < 1)
+			continue;
+
+		/* this means we are a cluster registration
+		   entry */
+		if (!row[EVENT_REQ_NAME][0]) {
+			/* if the cpu count changes we will
+			 * only care about the last cpu count but
+			 * we will keep a total of the time for
+			 * all cpus to get the correct cpu time
+			 * for the entire period.
+			 */
+			if (state || !c_usage) {
+				local_cluster_usage_t *loc_c_usage;
+
+				loc_c_usage = xmalloc(
+					sizeof(local_cluster_usage_t));
+				loc_c_usage->cpu_count = row_cpu;
+				loc_c_usage->total_time =
+					(row_end - row_start) * row_cpu;
+				loc_c_usage->start = row_start;
+				loc_c_usage->end = row_end;
+				/* If this has a state it
+				   means the slurmctld went
+				   down and we should put this
+				   on the list and remove any
+				   jobs from this time that
+				   were running later.
+				*/
+				if (state)
+					list_append(cluster_down_list,
+						    loc_c_usage);
+				else
+					c_usage = loc_c_usage;
+				loc_c_usage = NULL;
+			} else {
+				c_usage->cpu_count = row_cpu;
+				c_usage->total_time +=
+					(row_end - row_start) * row_cpu;
+				c_usage->end = row_end;
+			}
+			continue;
+		}
+
+		/* only record down time for the cluster we
+		   are looking for.  If it was during this
+		   time period we would already have it.
+		*/
+		if (c_usage) {
+			int local_start = row_start;
+			int local_end = row_end;
+			int seconds;
+			if (c_usage->start > local_start)
+				local_start = c_usage->start;
+			if (c_usage->end < local_end)
+				local_end = c_usage->end;
+			seconds = (local_end - local_start);
+			if (seconds > 0) {
+				/* info("node %s adds " */
+				/*      "(%d)(%d-%d) * %d = %d " */
+				/*      "to %d", */
+				/*      row[EVENT_REQ_NAME], */
+				/*      seconds, */
+				/*      local_end, local_start, */
+				/*      row_cpu, */
+				/*      seconds * row_cpu, */
+				/*      row_cpu); */
+				c_usage->d_cpu += seconds * row_cpu;
+			}
+		}
+	}
+	mysql_free_result(result);
+	return c_usage;
+}
+
 extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				  char *cluster_name,
 				  time_t start, time_t end,
@@ -165,25 +465,11 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	ListIterator w_itr = NULL;
 	ListIterator r_itr = NULL;
 	List assoc_usage_list = list_create(_destroy_local_id_usage);
-	List cluster_usage_list = list_create(_destroy_local_cluster_usage);
+	List cluster_down_list = list_create(_destroy_local_cluster_usage);
 	List wckey_usage_list = list_create(_destroy_local_id_usage);
 	List resv_usage_list = list_create(_destroy_local_resv_usage);
 	uint16_t track_wckey = slurm_get_track_wckey();
-
-	char *event_req_inx[] = {
-		"node_name",
-		"cpu_count",
-		"time_start",
-		"time_end",
-	};
-	char *event_str = NULL;
-	enum {
-		EVENT_REQ_NAME,
-		EVENT_REQ_CPU,
-		EVENT_REQ_START,
-		EVENT_REQ_END,
-		EVENT_REQ_COUNT
-	};
+	/* char start_char[20], end_char[20]; */
 
 	char *job_req_inx[] = {
 		"job_db_inx",
@@ -245,12 +531,6 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		RESV_REQ_COUNT
 	};
 
-	i=0;
-	xstrfmtcat(event_str, "%s", event_req_inx[i]);
-	for(i=1; i<EVENT_REQ_COUNT; i++) {
-		xstrfmtcat(event_str, ", %s", event_req_inx[i]);
-	}
-
 	i=0;
 	xstrfmtcat(job_str, "%s", job_req_inx[i]);
 	for(i=1; i<JOB_REQ_COUNT; i++) {
@@ -272,13 +552,15 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 /* 	info("begin start %s", ctime(&curr_start)); */
 /* 	info("begin end %s", ctime(&curr_end)); */
 	a_itr = list_iterator_create(assoc_usage_list);
-	c_itr = list_iterator_create(cluster_usage_list);
+	c_itr = list_iterator_create(cluster_down_list);
 	w_itr = list_iterator_create(wckey_usage_list);
 	r_itr = list_iterator_create(resv_usage_list);
 	while (curr_start < end) {
 		int last_id = -1;
 		int last_wckeyid = -1;
 		int seconds = 0;
+		int tot_time = 0;
+		local_cluster_usage_t *loc_c_usage = NULL;
 		local_cluster_usage_t *c_usage = NULL;
 		local_resv_usage_t *r_usage = NULL;
 		local_id_usage_t *a_usage = NULL;
@@ -289,102 +571,9 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 /* 		info("start %s", ctime(&curr_start)); */
 /* 		info("end %s", ctime(&curr_end)); */
 
-		/* first get the events during this time.  All that is
-		 * except things with the maintainance flag set in the
-		 * state.  We handle those later with the reservations.
-		 */
-		query = xstrdup_printf("select %s from \"%s_%s\" where "
-				       "!(state & %d) && (time_start < %ld "
-				       "&& (time_end >= %ld "
-				       "|| time_end = 0)) "
-				       "order by node_name, time_start",
-				       event_str, cluster_name, event_table,
-				       NODE_STATE_MAINT,
-				       curr_end, curr_start);
-
-		debug3("%d(%s:%d) query\n%s",
-		       mysql_conn->conn, THIS_FILE, __LINE__, query);
-		if (!(result = mysql_db_query_ret(
-			     mysql_conn, query, 0))) {
-			xfree(query);
-			return SLURM_ERROR;
-		}
-		xfree(query);
-
-		while ((row = mysql_fetch_row(result))) {
-			time_t row_start = slurm_atoul(row[EVENT_REQ_START]);
-			time_t row_end = slurm_atoul(row[EVENT_REQ_END]);
-			uint32_t row_cpu = slurm_atoul(row[EVENT_REQ_CPU]);
-
-			if (row_start < curr_start)
-				row_start = curr_start;
-
-			if (!row_end || row_end > curr_end)
-				row_end = curr_end;
-
-			/* Don't worry about it if the time is less
-			 * than 1 second.
-			 */
-			if ((row_end - row_start) < 1)
-				continue;
-
-			/* this means we are a cluster registration
-			   entry */
-			if (!row[EVENT_REQ_NAME][0]) {
-				/* if the cpu count changes we will
-				 * only care about the last cpu count but
-				 * we will keep a total of the time for
-				 * all cpus to get the correct cpu time
-				 * for the entire period.
-				 */
-				if (!c_usage) {
-					c_usage = xmalloc(
-						sizeof(local_cluster_usage_t));
-					c_usage->cpu_count = row_cpu;
-					c_usage->total_time =
-						(row_end - row_start) * row_cpu;
-					c_usage->start = row_start;
-					c_usage->end = row_end;
-					list_append(cluster_usage_list,
-						    c_usage);
-				} else {
-					c_usage->cpu_count = row_cpu;
-					c_usage->total_time +=
-						(row_end - row_start) * row_cpu;
-					c_usage->end = row_end;
-				}
-				continue;
-			}
-
-			/* only record down time for the cluster we
-			   are looking for.  If it was during this
-			   time period we would already have it.
-			*/
-			if (c_usage) {
-				int local_start = row_start;
-				int local_end = row_end;
-				if (c_usage->start > local_start)
-					local_start = c_usage->start;
-				if (c_usage->end < local_end)
-					local_end = c_usage->end;
-
-				if ((local_end - local_start) > 0) {
-					seconds = (local_end - local_start);
-
-/* 					info("node %s adds " */
-/* 					     "(%d)(%d-%d) * %d = %d " */
-/* 					     "to %d", */
-/* 					     row[EVENT_REQ_NAME], */
-/* 					     seconds, */
-/* 					     local_end, local_start, */
-/* 					     row_cpu,  */
-/* 					     seconds * row_cpu,  */
-/* 					     row_cpu); */
-					c_usage->d_cpu += seconds * row_cpu;
-				}
-			}
-		}
-		mysql_free_result(result);
+		c_usage = _setup_cluster_usage(mysql_conn, cluster_name,
+					       curr_start, curr_end,
+					       cluster_down_list);
 
 		// now get the reservations during this time
 		query = xstrdup_printf("select %s from \"%s_%s\" where "
@@ -398,6 +587,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		if (!(result = mysql_db_query_ret(
 			     mysql_conn, query, 0))) {
 			xfree(query);
+			_destroy_local_cluster_usage(c_usage);
 			return SLURM_ERROR;
 		}
 		xfree(query);
@@ -470,16 +660,15 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				c_usage->pd_cpu += r_usage->total_time;
 			else
 				c_usage->a_cpu += r_usage->total_time;
-			/* char *start_char = xstrdup(ctime(&r_usage->start));*/
-			/* char *end_char = xstrdup(ctime(&r_usage->end)); */
-			/* start_char[strlen(start_char)-1] = '\0'; */
+			/* slurm_make_time_str(&r_usage->start, start_char, */
+			/* 		    sizeof(start_char)); */
+			/* slurm_make_time_str(&r_usage->end, end_char, */
+			/* 		    sizeof(end_char)); */
 			/* info("adding this much %lld to cluster %s " */
 			/*      "%d %d %s - %s", */
 			/*      r_usage->total_time, c_usage->name, */
 			/*      (row_flags & RESERVE_FLAG_MAINT),  */
 			/*      r_usage->id, start_char, end_char); */
-			/* xfree(start_char); */
-			/* xfree(end_char); */
 		}
 		mysql_free_result(result);
 
@@ -496,6 +685,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		if (!(result = mysql_db_query_ret(
 			     mysql_conn, query, 0))) {
 			xfree(query);
+			_destroy_local_cluster_usage(c_usage);
 			return SLURM_ERROR;
 		}
 		xfree(query);
@@ -547,6 +737,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					     mysql_conn,
 					     query, 0))) {
 					xfree(query);
+					_destroy_local_cluster_usage(c_usage);
 					return SLURM_ERROR;
 				}
 				xfree(query);
@@ -563,11 +754,11 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 						local_start = row_start;
 					if (row_end < local_end)
 						local_end = row_end;
-
-					if ((local_end - local_start) < 1)
+					tot_time = (local_end - local_start);
+					if (tot_time < 1)
 						continue;
 
-					seconds -= (local_end - local_start);
+					seconds -= tot_time;
 				}
 				mysql_free_result(result2);
 			}
@@ -610,6 +801,32 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			/* do the cluster allocated calculation */
 		calc_cluster:
 
+			/* Now figure out there was a disconnected
+			   slurmctld durning this job.
+			*/
+			list_iterator_reset(c_itr);
+			while ((loc_c_usage = list_next(c_itr))) {
+				int temp_end = row_end;
+				int temp_start = row_start;
+				if (loc_c_usage->start > temp_start)
+					temp_start = loc_c_usage->start;
+				if (loc_c_usage->end < temp_end)
+					temp_end = loc_c_usage->end;
+				seconds = (temp_end - temp_start);
+				if (seconds > 0) {
+					/* info(" Job %u was running for " */
+					/*      "%"PRIu64" seconds while " */
+					/*      "cluster %s's slurmctld " */
+					/*      "wasn't responding", */
+					/*      job_id, */
+					/*      (uint64_t) */
+					/*      (seconds * row_acpu), */
+					/*      cluster_name); */
+					loc_c_usage->total_time -=
+						seconds * row_acpu;
+				}
+			}
+
 			/* first figure out the reservation */
 			if (resv_id) {
 				if (seconds <= 0)
@@ -622,9 +839,9 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   unused time over the associations
 				   able to run in the reservation.
 				   Since the job was to run, or ran a
-				   reservation we don't care about eligible time
-				   since that could totally skew the
-				   clusters reserved time
+				   reservation we don't care about
+				   eligible time since that could
+				   totally skew the clusters reserved time
 				   since the job may be able to run
 				   outside of the reservation. */
 				list_iterator_reset(r_itr);
@@ -666,44 +883,41 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				continue;
 
 			if (row_start && (seconds > 0)) {
-/* 					info("%d assoc %d adds " */
-/* 					     "(%d)(%d-%d) * %d = %d " */
-/* 					     "to %d", */
-/* 					     job_id, */
-/* 					     a_usage->id, */
-/* 					     seconds, */
-/* 					     row_end, row_start, */
-/* 					     row_acpu, */
-/* 					     seconds * row_acpu, */
-/* 					     row_acpu); */
+				/* info("%d assoc %d adds " */
+				/*      "(%d)(%d-%d) * %d = %d " */
+				/*      "to %d", */
+				/*      job_id, */
+				/*      a_usage->id, */
+				/*      seconds, */
+				/*      row_end, row_start, */
+				/*      row_acpu, */
+				/*      seconds * row_acpu, */
+				/*      row_acpu); */
 
 				c_usage->a_cpu += seconds * row_acpu;
 			}
 
 			/* now reserved time */
 			if (!row_start || (row_start >= c_usage->start)) {
-				row_end = row_start;
-				row_start = row_eligible;
-				if (c_usage->start > row_start)
-					row_start = c_usage->start;
-				if (c_usage->end < row_end)
-					row_end = c_usage->end;
-
-				if ((row_end - row_start) > 0) {
-					seconds = (row_end - row_start)
-						* row_rcpu;
-
-/* 					info("%d assoc %d reserved " */
-/* 					     "(%d)(%d-%d) * %d = %d " */
-/* 					     "to %d", */
-/* 					     job_id, */
-/* 					     assoc_id, */
-/* 					     seconds, */
-/* 					     row_end, row_start, */
-/* 					     row_rcpu, */
-/* 					     seconds * row_rcpu, */
-/* 					     row_rcpu); */
-					c_usage->r_cpu += seconds;
+				int temp_end = row_start;
+				int temp_start = row_eligible;
+				if (c_usage->start > temp_start)
+					temp_start = c_usage->start;
+				if (c_usage->end < temp_end)
+					temp_end = c_usage->end;
+				seconds = (temp_end - temp_start);
+				if (seconds > 0) {
+					/* info("%d assoc %d reserved " */
+					/*      "(%d)(%d-%d) * %d = %d " */
+					/*      "to %d", */
+					/*      job_id, */
+					/*      assoc_id, */
+					/*      seconds, */
+					/*      temp_end, temp_start, */
+					/*      row_rcpu, */
+					/*      seconds * row_rcpu, */
+					/*      row_rcpu); */
+					c_usage->r_cpu += seconds * row_rcpu;
 				}
 			}
 		}
@@ -754,170 +968,16 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			list_iterator_destroy(tmp_itr);
 		}
 
-		/* Now put the lists into the usage tables */
+		/* now apply the down time from the slurmctld disconnects */
 		list_iterator_reset(c_itr);
-		while ((c_usage = list_next(c_itr))) {
-			uint64_t total_used = 0;
-
-			/* sanity check to make sure we don't have more
-			   allocated cpus than possible. */
-			if (c_usage->total_time < c_usage->a_cpu) {
-				char *start_char = xstrdup(ctime(&curr_start));
-				char *end_char = xstrdup(ctime(&curr_end));
-				start_char[strlen(start_char)-1] = '\0';
-				error("We have more allocated time than is "
-				      "possible (%"PRIu64" > %"PRIu64") for "
-				      "cluster %s(%d) from %s - %s",
-				      c_usage->a_cpu, c_usage->total_time,
-				      cluster_name, c_usage->cpu_count,
-				      start_char, end_char);
-				xfree(start_char);
-				xfree(end_char);
-				c_usage->a_cpu = c_usage->total_time;
-			}
-
-			total_used = c_usage->a_cpu +
-				c_usage->d_cpu + c_usage->pd_cpu;
-
-			/* Make sure the total time we care about
-			   doesn't go over the limit */
-			if (c_usage->total_time < (total_used)) {
-				char *start_char = xstrdup(ctime(&curr_start));
-				char *end_char = xstrdup(ctime(&curr_end));
-				int64_t overtime;
-
-				start_char[strlen(start_char)-1] = '\0';
-				error("We have more time than is "
-				      "possible (%"PRIu64"+%"PRIu64"+%"
-				      PRIu64")(%"PRIu64") > %"PRIu64" for "
-				      "cluster %s(%d) from %s - %s",
-				      c_usage->a_cpu, c_usage->d_cpu,
-				      c_usage->pd_cpu, total_used,
-				      c_usage->total_time,
-				      cluster_name, c_usage->cpu_count,
-				      start_char, end_char);
-				xfree(start_char);
-				xfree(end_char);
-
-				/* First figure out how much actual down time
-				   we have and then how much
-				   planned down time we have. */
-				overtime = (int64_t)(c_usage->total_time -
-						     (c_usage->a_cpu
-						      + c_usage->d_cpu));
-				if (overtime < 0) {
-					c_usage->d_cpu += overtime;
-					if ((int64_t)c_usage->d_cpu < 0)
-						c_usage->d_cpu = 0;
-				}
-
-				overtime = (int64_t)(c_usage->total_time -
-						     (c_usage->a_cpu
-						      + c_usage->d_cpu
-						      + c_usage->pd_cpu));
-				if (overtime < 0) {
-					c_usage->pd_cpu += overtime;
-					if ((int64_t)c_usage->pd_cpu < 0)
-						c_usage->pd_cpu = 0;
-				}
-
-				total_used = c_usage->a_cpu +
-					c_usage->d_cpu + c_usage->pd_cpu;
-				/* info("We now have (%"PRIu64"+%"PRIu64"+" */
-				/*      "%"PRIu64")(%"PRIu64") " */
-				/*       "?= %"PRIu64"", */
-				/*       c_usage->a_cpu, c_usage->d_cpu, */
-				/*       c_usage->pd_cpu, total_used, */
-				/*       c_usage->total_time); */
-			}
-
-			c_usage->i_cpu = c_usage->total_time -
-				total_used - c_usage->r_cpu;
-			/* sanity check just to make sure we have a
-			 * legitimate time after we calulated
-			 * idle/reserved time put extra in the over
-			 * commit field
-			 */
-/* 			info("%s got idle of %lld", c_usage->name,  */
-/* 			     (int64_t)c_usage->i_cpu); */
-			if ((int64_t)c_usage->i_cpu < 0) {
-/* 				info("got %d %d %d", c_usage->r_cpu, */
-/* 				     c_usage->i_cpu, c_usage->o_cpu); */
-				c_usage->r_cpu += (int64_t)c_usage->i_cpu;
-				c_usage->o_cpu -= (int64_t)c_usage->i_cpu;
-				c_usage->i_cpu = 0;
-				if ((int64_t)c_usage->r_cpu < 0)
-					c_usage->r_cpu = 0;
-			}
-
-/* 			info("cluster %s(%d) down %d alloc %d " */
-/* 			     "resv %d idle %d over %d " */
-/* 			     "total= %d = %d from %s", */
-/* 			     c_usage->name, */
-/* 			     c_usage->cpu_count, c_usage->d_cpu, */
-/* 			     c_usage->a_cpu, */
-/* 			     c_usage->r_cpu, c_usage->i_cpu, c_usage->o_cpu, */
-/* 			     c_usage->d_cpu + c_usage->a_cpu + */
-/* 			     c_usage->r_cpu + c_usage->i_cpu, */
-/* 			     c_usage->total_time, */
-/* 			     ctime(&c_usage->start)); */
-/* 			info("to %s", ctime(&c_usage->end)); */
-			if (query) {
-				xstrfmtcat(query,
-					   ", (%ld, %ld, %ld, %d, "
-					   "%"PRIu64", %"PRIu64", %"PRIu64", "
-					   "%"PRIu64", %"PRIu64", %"PRIu64")",
-					   now, now,
-					   c_usage->start,
-					   c_usage->cpu_count, c_usage->a_cpu,
-					   c_usage->d_cpu, c_usage->pd_cpu,
-					   c_usage->i_cpu, c_usage->o_cpu,
-					   c_usage->r_cpu);
-			} else {
-				xstrfmtcat(query,
-					   "insert into \"%s_%s\" "
-					   "(creation_time, "
-					   "mod_time, time_start, "
-					   "cpu_count, alloc_cpu_secs, "
-					   "down_cpu_secs, pdown_cpu_secs, "
-					   "idle_cpu_secs, over_cpu_secs, "
-					   "resv_cpu_secs) "
-					   "values (%ld, %ld, %ld, %d, "
-					   "%"PRIu64", %"PRIu64", %"PRIu64", "
-					   "%"PRIu64", %"PRIu64", %"PRIu64")",
-					   cluster_name, cluster_hour_table,
-					   now, now,
-					   c_usage->start,
-					   c_usage->cpu_count,
-					   c_usage->a_cpu, c_usage->d_cpu,
-					   c_usage->pd_cpu, c_usage->i_cpu,
-					   c_usage->o_cpu, c_usage->r_cpu);
-			}
-		}
-
-		/* Spacing out the inserts here instead of doing them
-		   all at once in the end proves to be faster.  Just FYI
-		   so we don't go testing again and again.
-		*/
-		if (query) {
-			xstrfmtcat(query,
-				   " on duplicate key update "
-				   "mod_time=%ld, cpu_count=VALUES(cpu_count), "
-				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
-				   "down_cpu_secs=VALUES(down_cpu_secs), "
-				   "pdown_cpu_secs=VALUES(pdown_cpu_secs), "
-				   "idle_cpu_secs=VALUES(idle_cpu_secs), "
-				   "over_cpu_secs=VALUES(over_cpu_secs), "
-				   "resv_cpu_secs=VALUES(resv_cpu_secs)",
-				   now);
-			debug3("%d(%s:%d) query\n%s",
-			       mysql_conn->conn, THIS_FILE, __LINE__, query);
-			rc = mysql_db_query(mysql_conn, query);
-			xfree(query);
-			if (rc != SLURM_SUCCESS) {
-				error("Couldn't add cluster hour rollup");
-				goto end_it;
-			}
+		while ((loc_c_usage = list_next(c_itr)))
+			c_usage->d_cpu += loc_c_usage->total_time;
+
+		if ((rc = _process_cluster_usage(
+			     mysql_conn, cluster_name, curr_start,
+			     curr_end, now, c_usage)) != SLURM_SUCCESS) {
+			_destroy_local_cluster_usage(c_usage);
+			goto end_it;
 		}
 
 		list_iterator_reset(a_itr);
@@ -957,6 +1017,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			xfree(query);
 			if (rc != SLURM_SUCCESS) {
 				error("Couldn't add assoc hour rollup");
+				_destroy_local_cluster_usage(c_usage);
 				goto end_it;
 			}
 		}
@@ -1001,13 +1062,15 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			xfree(query);
 			if (rc != SLURM_SUCCESS) {
 				error("Couldn't add wckey hour rollup");
+				_destroy_local_cluster_usage(c_usage);
 				goto end_it;
 			}
 		}
 
 	end_loop:
+		_destroy_local_cluster_usage(c_usage);
 		list_flush(assoc_usage_list);
-		list_flush(cluster_usage_list);
+		list_flush(cluster_down_list);
 		list_flush(wckey_usage_list);
 		list_flush(resv_usage_list);
 		curr_start = curr_end;
@@ -1015,7 +1078,6 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	}
 end_it:
 	xfree(suspend_str);
-	xfree(event_str);
 	xfree(job_str);
 	xfree(resv_str);
 	list_iterator_destroy(a_itr);
@@ -1024,7 +1086,7 @@ end_it:
 	list_iterator_destroy(r_itr);
 
 	list_destroy(assoc_usage_list);
-	list_destroy(cluster_usage_list);
+	list_destroy(cluster_down_list);
 	list_destroy(wckey_usage_list);
 	list_destroy(resv_usage_list);
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_rollup.h b/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
index 8388dd241..276dd3f21 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_txn.c b/src/plugins/accounting_storage/mysql/as_mysql_txn.c
index dcc838c89..6933e4fb8 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_txn.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_txn.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_txn.h b/src/plugins/accounting_storage/mysql/as_mysql_txn.h
index dbd1f1330..d607c0d1a 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_txn.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_txn.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_usage.c b/src/plugins/accounting_storage/mysql/as_mysql_usage.c
index d17c233d4..c5552cfcf 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_usage.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_usage.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_usage.h b/src/plugins/accounting_storage/mysql/as_mysql_usage.h
index 1651eb75b..b73c1f2bb 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_usage.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_usage.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_user.c b/src/plugins/accounting_storage/mysql/as_mysql_user.c
index 4d7095497..a4fb90cb4 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_user.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_user.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_user.h b/src/plugins/accounting_storage/mysql/as_mysql_user.h
index 75e7386a0..f0de0f7a3 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_user.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_user.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_wckey.c b/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
index 8956b4964..dcbe189d6 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -635,7 +635,6 @@ extern List as_mysql_modify_wckeys(mysql_conn_t *mysql_conn,
 	int rc = SLURM_SUCCESS;
 	char *extra = NULL, *object = NULL, *vals = NULL;
 	char *user_name = NULL;
-	int set = 0;
 	List use_cluster_list = as_mysql_cluster_list;
 	ListIterator itr;
 
@@ -671,7 +670,7 @@ extern List as_mysql_modify_wckeys(mysql_conn_t *mysql_conn,
 	}
 is_same_user:
 
-	set = _setup_wckey_cond_limits(wckey_cond, &extra);
+	(void) _setup_wckey_cond_limits(wckey_cond, &extra);
 
 	if (wckey->is_def == 1)
 		xstrcat(vals, ", is_def=1");
@@ -721,7 +720,6 @@ extern List as_mysql_remove_wckeys(mysql_conn_t *mysql_conn,
 	int rc = SLURM_SUCCESS;
 	char *extra = NULL, *object = NULL;
 	char *user_name = NULL;
-	int set = 0;
 	List use_cluster_list = as_mysql_cluster_list;
 	ListIterator itr;
 
@@ -733,7 +731,7 @@ extern List as_mysql_remove_wckeys(mysql_conn_t *mysql_conn,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
-	set = _setup_wckey_cond_limits(wckey_cond, &extra);
+	(void) _setup_wckey_cond_limits(wckey_cond, &extra);
 
 	if (wckey_cond->cluster_list && list_count(wckey_cond->cluster_list))
 		use_cluster_list = wckey_cond->cluster_list;
@@ -778,7 +776,6 @@ extern List as_mysql_get_wckeys(mysql_conn_t *mysql_conn, uid_t uid,
 	char *tmp = NULL;
 	char *cluster_name = NULL;
 	List wckey_list = NULL;
-	int set = 0;
 	int i=0, is_admin=1;
 	uint16_t private_data = 0;
 	slurmdb_user_rec_t user;
@@ -809,7 +806,7 @@ extern List as_mysql_get_wckeys(mysql_conn_t *mysql_conn, uid_t uid,
 		}
 	}
 
-	set = _setup_wckey_cond_limits(wckey_cond, &extra);
+	(void) _setup_wckey_cond_limits(wckey_cond, &extra);
 
 	if (wckey_cond->cluster_list && list_count(wckey_cond->cluster_list))
 		use_cluster_list = wckey_cond->cluster_list;
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_wckey.h b/src/plugins/accounting_storage/mysql/as_mysql_wckey.h
index f9e121bf3..42bd53c85 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_wckey.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_wckey.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/none/Makefile.in b/src/plugins/accounting_storage/none/Makefile.in
index 7268f316f..dc4a137b4 100644
--- a/src/plugins/accounting_storage/none/Makefile.in
+++ b/src/plugins/accounting_storage/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -140,7 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -177,6 +182,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -234,6 +240,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -269,6 +276,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c
index 3d0e7e124..877bd4f42 100644
--- a/src/plugins/accounting_storage/none/accounting_storage_none.c
+++ b/src/plugins/accounting_storage/none/accounting_storage_none.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -359,6 +359,19 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn, uint16_t port)
 	return SLURM_SUCCESS;
 }
 
+extern int clusteracct_storage_p_register_disconn_ctld(
+	void *db_conn, char *control_host)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int clusteracct_storage_p_fini_ctld(void *db_conn,
+					   char *ip, uint16_t port,
+					   char *cluster_nodes)
+{
+	return SLURM_SUCCESS;
+}
+
 extern int clusteracct_storage_p_cluster_cpus(void *db_conn,
 					      char *cluster_nodes,
 					      uint32_t cpus,
diff --git a/src/plugins/accounting_storage/pgsql/Makefile.in b/src/plugins/accounting_storage/pgsql/Makefile.in
index 8aad60da5..03aaf5098 100644
--- a/src/plugins/accounting_storage/pgsql/Makefile.in
+++ b/src/plugins/accounting_storage/pgsql/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -186,7 +188,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -223,6 +228,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -280,6 +286,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -315,6 +322,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
index 61b3abc3b..9c52aa74b 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -595,6 +595,19 @@ extern int clusteracct_storage_p_register_ctld(pgsql_conn_t *pg_conn,
 	return cs_pg_register_ctld(pg_conn, pg_conn->cluster_name, port);
 }
 
+extern int clusteracct_storage_p_register_disconn_ctld(
+	pgsql_conn_t *pg_conn, char *control_host)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int clusteracct_storage_p_fini_ctld(void *db_conn,
+					   char *ip, uint16_t port,
+					   char *cluster_nodes)
+{
+	return SLURM_SUCCESS;
+}
+
 extern int clusteracct_storage_p_cluster_cpus(pgsql_conn_t *pg_conn,
 					      char *cluster_nodes,
 					      uint32_t cpus,
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.h b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.h
index 448e4c7cd..66cb201b2 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.h
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_acct.c b/src/plugins/accounting_storage/pgsql/as_pg_acct.c
index 618831317..a239e70e7 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_acct.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_acct.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_acct.h b/src/plugins/accounting_storage/pgsql/as_pg_acct.h
index 99449a60a..b38a1c645 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_acct.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_acct.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_archive.c b/src/plugins/accounting_storage/pgsql/as_pg_archive.c
index a8567d309..9ae7cf655 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_archive.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_archive.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_archive.h b/src/plugins/accounting_storage/pgsql/as_pg_archive.h
index 4cfbcd6ab..f33cd0a16 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_archive.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_archive.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_assoc.c b/src/plugins/accounting_storage/pgsql/as_pg_assoc.c
index 663e376b1..3f8ec1661 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_assoc.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_assoc.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_assoc.h b/src/plugins/accounting_storage/pgsql/as_pg_assoc.h
index 7f7e5beda..f5db3ada5 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_assoc.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_assoc.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_cluster.c b/src/plugins/accounting_storage/pgsql/as_pg_cluster.c
index 9e70fe968..c2ce59fac 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_cluster.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_cluster.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_cluster.h b/src/plugins/accounting_storage/pgsql/as_pg_cluster.h
index 1e7dab8d4..68829a574 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_cluster.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_cluster.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_common.c b/src/plugins/accounting_storage/pgsql/as_pg_common.c
index b746c5c1a..a1fff6606 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_common.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_common.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_common.h b/src/plugins/accounting_storage/pgsql/as_pg_common.h
index 2e48f20d1..98c84f504 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_common.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_common.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_event.c b/src/plugins/accounting_storage/pgsql/as_pg_event.c
index 74a5ce63c..3d6aa243c 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_event.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_event.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_event.h b/src/plugins/accounting_storage/pgsql/as_pg_event.h
index 6325e998b..3ea609e4a 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_event.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_event.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_get_jobs.c b/src/plugins/accounting_storage/pgsql/as_pg_get_jobs.c
index ddffbc3ba..c1de50ea0 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_get_jobs.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_get_jobs.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -265,6 +265,7 @@ static void _state_time_string(char **extra, uint32_t state,
 	case JOB_FAILED:
 	case JOB_TIMEOUT:
 	case JOB_NODE_FAIL:
+	case JOB_PREEMPTED:
 	default:
 		xstrfmtcat(*extra, "(t1.state=%u AND (t1.time_end!=0 AND ", state);
 		if(start && !end) {
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_job.c b/src/plugins/accounting_storage/pgsql/as_pg_job.c
index 5f9224d44..fa84e6b2d 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_job.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_job.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -860,7 +860,7 @@ js_pg_step_complete(pgsql_conn_t *pg_conn,
 	time_t now;
 	int elapsed;
 	int comp_status;
-	int cpus = 0, tasks = 0;
+	int cpus = 0;
 	struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct;
 	struct jobacctinfo dummy_jobacct;
 	double ave_vsize = 0, ave_rss = 0, ave_pages = 0;
@@ -904,20 +904,17 @@ js_pg_step_complete(pgsql_conn_t *pg_conn,
 
 	if(slurmdbd_conf) {
 		now = step_ptr->job_ptr->end_time;
-		tasks = step_ptr->job_ptr->details->num_tasks;
 		cpus = step_ptr->cpu_count;
 	} else {
 		now = time(NULL);
 #ifdef HAVE_BG
-		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
+		cpus = step_ptr->job_ptr->details->min_cpus;
 
 #else
 		if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt)
-			tasks = cpus = step_ptr->job_ptr->total_cpus;
-		else {
+			cpus = step_ptr->job_ptr->total_cpus;
+		else
 			cpus = step_ptr->cpu_count;
-			tasks = step_ptr->step_layout->task_cnt;
-		}
 #endif
 	}
 
@@ -1014,10 +1011,9 @@ js_pg_suspend(pgsql_conn_t *pg_conn, uint32_t old_db_inx,
 {
  	char *query = NULL;
  	int rc = SLURM_SUCCESS;
- 	time_t submit_time;
  	uint32_t job_db_inx;
 
- 	if(check_db_connection(pg_conn) != SLURM_SUCCESS)
+ 	if (check_db_connection(pg_conn) != SLURM_SUCCESS)
  		return ESLURM_DB_CONNECTION;
 
  	if (! cluster_in_db(pg_conn, pg_conn->cluster_name) ) {
@@ -1025,16 +1021,11 @@ js_pg_suspend(pgsql_conn_t *pg_conn, uint32_t old_db_inx,
  		return SLURM_ERROR;
  	}
 
- 	if (job_ptr->resize_time)
- 		submit_time = job_ptr->resize_time;
- 	else
- 		submit_time = job_ptr->details->submit_time;
-
  	if (_check_job_db_index(pg_conn, job_ptr) != SLURM_SUCCESS)
  		return SLURM_SUCCESS;
 
- 	if(IS_JOB_RESIZING(job_ptr)) {
- 		if(!old_db_inx) {
+ 	if (IS_JOB_RESIZING(job_ptr)) {
+ 		if (!old_db_inx) {
  			error("No old db inx given for job %u cluster %s, "
  			      "can't update suspend table.",
  			      job_ptr->job_id, pg_conn->cluster_name);
@@ -1050,14 +1041,14 @@ js_pg_suspend(pgsql_conn_t *pg_conn, uint32_t old_db_inx,
  	} else
  		job_db_inx = job_ptr->db_index;
 
- 	query = xstrdup_printf(
+ 	xstrfmtcat(query,
  		"UPDATE %s.%s SET time_suspended=%d-time_suspended, state=%d "
- 		"WHERE job_db_inx=%d", pg_conn->cluster_name, job_table,
+ 		"WHERE job_db_inx=%d;", pg_conn->cluster_name, job_table,
  		(int)job_ptr->suspend_time,
  		(int)(job_ptr->job_state & JOB_STATE_BASE),
  		(int)job_ptr->db_index);
 
- 	if(IS_JOB_SUSPENDED(job_ptr))
+ 	if (IS_JOB_SUSPENDED(job_ptr))
  		xstrfmtcat(query,
  			   "INSERT INTO %s.%s (job_db_inx, id_assoc, "
  			   "  time_start, time_end) VALUES (%d, %d, %ld, 0);",
@@ -1073,10 +1064,10 @@ js_pg_suspend(pgsql_conn_t *pg_conn, uint32_t old_db_inx,
  			   job_ptr->db_index);
 
  	rc = DEF_QUERY_RET_RC;
- 	if(rc == SLURM_SUCCESS) {
+ 	if (rc == SLURM_SUCCESS) {
  		query = xstrdup_printf(
  			"UPDATE %s.%s SET time_suspended=%d-time_suspended, "
- 			"state=%d WHERE job_db_inx=%d and time_end=0",
+ 			"state=%d WHERE job_db_inx=%d and time_end=0;",
  			pg_conn->cluster_name,
  			step_table, (int)job_ptr->suspend_time,
  			(int)job_ptr->job_state, (int)job_ptr->db_index);
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_job.h b/src/plugins/accounting_storage/pgsql/as_pg_job.h
index 13bad45f3..403d20dde 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_job.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_job.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_problem.c b/src/plugins/accounting_storage/pgsql/as_pg_problem.c
index d4a6708d6..230ad0b52 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_problem.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_problem.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_problem.h b/src/plugins/accounting_storage/pgsql/as_pg_problem.h
index f9959d73c..d8e066711 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_problem.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_problem.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_qos.c b/src/plugins/accounting_storage/pgsql/as_pg_qos.c
index bf4f66ae3..4cad080d3 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_qos.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_qos.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_qos.h b/src/plugins/accounting_storage/pgsql/as_pg_qos.h
index 00d20c513..c031b9ac8 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_qos.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_qos.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_resv.c b/src/plugins/accounting_storage/pgsql/as_pg_resv.c
index 9780e6c9e..57b7263b1 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_resv.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_resv.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_resv.h b/src/plugins/accounting_storage/pgsql/as_pg_resv.h
index f90ae54fb..05ee26b57 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_resv.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_resv.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_rollup.c b/src/plugins/accounting_storage/pgsql/as_pg_rollup.c
index 970de84b3..d55c52e5e 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_rollup.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_rollup.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_rollup.h b/src/plugins/accounting_storage/pgsql/as_pg_rollup.h
index 2bf60df0d..75912196d 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_rollup.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_rollup.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_txn.c b/src/plugins/accounting_storage/pgsql/as_pg_txn.c
index 549f14fa2..42eef0cef 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_txn.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_txn.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_txn.h b/src/plugins/accounting_storage/pgsql/as_pg_txn.h
index 8a09f7d7c..0f3fdd2f5 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_txn.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_txn.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_usage.c b/src/plugins/accounting_storage/pgsql/as_pg_usage.c
index 101b41ceb..ac0353e0a 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_usage.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_usage.c
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_usage.h b/src/plugins/accounting_storage/pgsql/as_pg_usage.h
index e850a67ef..703b31a05 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_usage.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_usage.h
@@ -10,7 +10,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_user.c b/src/plugins/accounting_storage/pgsql/as_pg_user.c
index e9b1c525b..211df3a10 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_user.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_user.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_user.h b/src/plugins/accounting_storage/pgsql/as_pg_user.h
index 7021291ba..3a6f49a58 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_user.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_user.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_wckey.c b/src/plugins/accounting_storage/pgsql/as_pg_wckey.c
index 65bf4831a..4077e5bc5 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_wckey.c
+++ b/src/plugins/accounting_storage/pgsql/as_pg_wckey.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -397,7 +397,7 @@ as_pg_get_wckeys(pgsql_conn_t *pg_conn, uid_t uid,
 {
 	char *cond = NULL;
 	List wckey_list = NULL;
-	int with_usage, is_admin;
+	int is_admin;
 	slurmdb_user_rec_t user;
 
 	if (check_db_connection(pg_conn) != SLURM_SUCCESS)
@@ -410,7 +410,6 @@ as_pg_get_wckeys(pgsql_conn_t *pg_conn, uid_t uid,
 	}
 
 	if (wckey_cond) {
-		with_usage = wckey_cond->with_usage;
 		cond = _make_wckey_cond(wckey_cond);
 	}
 	if (!is_admin)
diff --git a/src/plugins/accounting_storage/pgsql/as_pg_wckey.h b/src/plugins/accounting_storage/pgsql/as_pg_wckey.h
index ca2ffdd4c..be6c980e5 100644
--- a/src/plugins/accounting_storage/pgsql/as_pg_wckey.h
+++ b/src/plugins/accounting_storage/pgsql/as_pg_wckey.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/accounting_storage/slurmdbd/Makefile.in b/src/plugins/accounting_storage/slurmdbd/Makefile.in
index c3ced56a3..25b63c4c6 100644
--- a/src/plugins/accounting_storage/slurmdbd/Makefile.in
+++ b/src/plugins/accounting_storage/slurmdbd/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -141,7 +143,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -178,6 +183,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -235,6 +241,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -270,6 +277,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 60f467805..3cac99a56 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -7,7 +7,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,7 +51,7 @@
 #include <sys/types.h>
 #include <pwd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/jobacct_common.h"
@@ -67,8 +67,13 @@
  * the slurmctld we will have these symbols defined.  They will get
  * overwritten when linking with the slurmctld.
  */
+#if defined(__APPLE__)
+slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
+List job_list __attribute__((weak_import)) = NULL;
+#else
 slurm_ctl_conf_t slurmctld_conf;
 List job_list = NULL;
+#endif
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -225,7 +230,11 @@ static void *_set_db_inx_thread(void *no_data)
 		 * is can make submitting jobs much
 		 * faster and not lock up the
 		 * controller waiting for the db inx
-		 * back from the database. */
+		 * back from the database.
+		 * Even though there is potential of modifying the
+		 * job db_index here we use a read lock since the
+		 * data isn't that sensitive and will only be updated
+		 * later in this function. */
 		lock_slurmctld(job_read_lock);
 		itr = list_iterator_create(job_list);
 		while ((job_ptr = list_next(itr))) {
@@ -237,6 +246,23 @@ static void *_set_db_inx_thread(void *no_data)
 					_partial_destroy_dbd_job_start(req);
 					continue;
 				}
+
+				/* We set the db_index to NO_VAL here
+				 * to avoid a potential race condition
+				 * where at this moment in time the
+				 * job is only eligible to run and
+				 * before this call to the DBD returns,
+				 * the job starts and needs to send
+				 * the start message as well, but
+				 * won't if the db_index is 0
+				 * resulting in lost information about
+				 * the allocation.  Setting
+				 * it to NO_VAL will inform the DBD of
+				 * this situation and it will handle
+				 * it accordingly.
+				 */
+				job_ptr->db_index = NO_VAL;
+
 				/* we only want to destory the pointer
 				   here not the contents (except
 				   block_id) so call special function
@@ -1948,6 +1974,19 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn, uint16_t port)
 	return rc;
 }
 
+extern int clusteracct_storage_p_register_disconn_ctld(
+	void *db_conn, char *control_host)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int clusteracct_storage_p_fini_ctld(void *db_conn,
+					   char *ip, uint16_t port,
+					   char *cluster_nodes)
+{
+	return SLURM_SUCCESS;
+}
+
 /*
  * load into the storage the start of a job
  */
@@ -2029,6 +2068,10 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 	memset(&req, 0, sizeof(dbd_job_comp_msg_t));
 
 	req.assoc_id    = job_ptr->assoc_id;
+	if (slurmctld_conf.acctng_store_job_comment)
+		req.comment     = job_ptr->comment;
+	else
+		req.comment     = NULL;
 	req.db_index    = job_ptr->db_index;
 	req.derived_ec  = job_ptr->derived_ec;
 	req.exit_code   = job_ptr->exit_code;
@@ -2072,43 +2115,47 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 	slurmdbd_msg_t msg;
 	dbd_step_start_msg_t req;
 	char temp_bit[BUF_SIZE];
-
-#ifdef HAVE_BG
+	char *temp_nodes = NULL;
 	char *ionodes = NULL;
 
+#ifdef HAVE_BG_L_P
+
 	if (step_ptr->job_ptr->details)
-		cpus = step_ptr->job_ptr->details->min_cpus;
+		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
 	else
-		cpus = step_ptr->job_ptr->cpu_cnt;
-	select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
-				    SELECT_JOBDATA_IONODES,
-				    &ionodes);
-	if (ionodes) {
-		snprintf(node_list, BUFFER_SIZE,
-			 "%s[%s]", step_ptr->job_ptr->nodes, ionodes);
-		xfree(ionodes);
-	} else {
-		snprintf(node_list, BUFFER_SIZE, "%s",
-			 step_ptr->job_ptr->nodes);
-	}
+		tasks = cpus = step_ptr->job_ptr->cpu_cnt;
 	select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
 				    SELECT_JOBDATA_NODE_CNT,
 				    &nodes);
+	temp_nodes = step_ptr->job_ptr->nodes;
 #else
 	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
 		cpus = tasks = step_ptr->job_ptr->total_cpus;
-		snprintf(node_list, BUFFER_SIZE, "%s",
-			 step_ptr->job_ptr->nodes);
 		nodes = step_ptr->job_ptr->total_nodes;
+		temp_nodes = step_ptr->job_ptr->nodes;
 	} else {
 		cpus = step_ptr->cpu_count;
 		tasks = step_ptr->step_layout->task_cnt;
+#ifdef HAVE_BGQ
+		select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+					    SELECT_JOBDATA_NODE_CNT,
+					    &nodes);
+#else
 		nodes = step_ptr->step_layout->node_cnt;
+#endif
 		task_dist = step_ptr->step_layout->task_dist;
-		snprintf(node_list, BUFFER_SIZE, "%s",
-			 step_ptr->step_layout->node_list);
+		temp_nodes = step_ptr->step_layout->node_list;
 	}
 #endif
+	select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+				    SELECT_JOBDATA_IONODES,
+				    &ionodes);
+	if (ionodes) {
+		snprintf(node_list, BUFFER_SIZE, "%s[%s]", temp_nodes, ionodes);
+		xfree(ionodes);
+	} else
+		snprintf(node_list, BUFFER_SIZE, "%s", temp_nodes);
+
 	if (step_ptr->step_id == SLURM_BATCH_SCRIPT) {
 		/* We overload gres with the node name of where the
 		   script was running.
@@ -2172,7 +2219,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	slurmdbd_msg_t msg;
 	dbd_step_comp_msg_t req;
 
-#ifdef HAVE_BG
+#ifdef HAVE_BG_L_P
 	if (step_ptr->job_ptr->details)
 		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
 	else
diff --git a/src/plugins/auth/Makefile.in b/src/plugins/auth/Makefile.in
index 7c983adae..658bb9914 100644
--- a/src/plugins/auth/Makefile.in
+++ b/src/plugins/auth/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/auth/authd/Makefile.in b/src/plugins/auth/authd/Makefile.in
index 2050c67b0..120b42108 100644
--- a/src/plugins/auth/authd/Makefile.in
+++ b/src/plugins/auth/authd/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/auth/authd/auth_authd.c b/src/plugins/auth/authd/auth_authd.c
index 4c044eec3..76865ae38 100644
--- a/src/plugins/auth/authd/auth_authd.c
+++ b/src/plugins/auth/authd/auth_authd.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,7 @@
 #define UNIX_PATH_MAX 108  /* Cribbed from linux/un.h */
 #endif
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
 /*
diff --git a/src/plugins/auth/munge/Makefile.in b/src/plugins/auth/munge/Makefile.in
index cacce455b..c9b7b4b46 100644
--- a/src/plugins/auth/munge/Makefile.in
+++ b/src/plugins/auth/munge/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -140,7 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -177,6 +182,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -234,6 +240,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -269,6 +276,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c
index f898067e8..710064264 100644
--- a/src/plugins/auth/munge/auth_munge.c
+++ b/src/plugins/auth/munge/auth_munge.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -69,7 +69,7 @@
 
 #include <munge.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
 #define MUNGE_ERRNO_OFFSET	1000
diff --git a/src/plugins/auth/none/Makefile.in b/src/plugins/auth/none/Makefile.in
index 8b8297005..5755cb1ab 100644
--- a/src/plugins/auth/none/Makefile.in
+++ b/src/plugins/auth/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/auth/none/auth_none.c b/src/plugins/auth/none/auth_none.c
index 5e75aa7b1..dfd2d9525 100644
--- a/src/plugins/auth/none/auth_none.c
+++ b/src/plugins/auth/none/auth_none.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -64,7 +64,7 @@
 
 #include <stdio.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
 /*
diff --git a/src/plugins/checkpoint/Makefile.am b/src/plugins/checkpoint/Makefile.am
index 4d42811d1..f762a4993 100644
--- a/src/plugins/checkpoint/Makefile.am
+++ b/src/plugins/checkpoint/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for checkpoint plugins
 
-SUBDIRS = aix blcr none ompi xlch
+SUBDIRS = aix blcr none ompi
diff --git a/src/plugins/checkpoint/Makefile.in b/src/plugins/checkpoint/Makefile.in
index aa9c3ec7a..8e9a8381b 100644
--- a/src/plugins/checkpoint/Makefile.in
+++ b/src/plugins/checkpoint/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,7 +322,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = aix blcr none ompi xlch
+SUBDIRS = aix blcr none ompi
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/checkpoint/aix/Makefile.in b/src/plugins/checkpoint/aix/Makefile.in
index e71be0ea1..0d8f1f48c 100644
--- a/src/plugins/checkpoint/aix/Makefile.in
+++ b/src/plugins/checkpoint/aix/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -143,7 +145,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -180,6 +185,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -237,6 +243,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -272,6 +279,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/checkpoint/aix/checkpoint_aix.c b/src/plugins/checkpoint/aix/checkpoint_aix.c
index ddfdfb572..d4b5ff7b5 100644
--- a/src/plugins/checkpoint/aix/checkpoint_aix.c
+++ b/src/plugins/checkpoint/aix/checkpoint_aix.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,8 +55,9 @@
 #include <signal.h>
 #include <stdio.h>
 #include <time.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/list.h"
diff --git a/src/plugins/checkpoint/blcr/Makefile.in b/src/plugins/checkpoint/blcr/Makefile.in
index 842f9a05d..ed2363090 100644
--- a/src/plugins/checkpoint/blcr/Makefile.in
+++ b/src/plugins/checkpoint/blcr/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -149,7 +151,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -186,6 +191,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -243,6 +249,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -278,6 +285,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/checkpoint/blcr/checkpoint_blcr.c b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
index 7cf7d7b92..dfc5c7e5b 100644
--- a/src/plugins/checkpoint/blcr/checkpoint_blcr.c
+++ b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
@@ -58,8 +58,9 @@
 #include <libgen.h>
 #include <sys/types.h>
 #include <sys/stat.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/list.h"
 #include "src/common/log.h"
diff --git a/src/plugins/checkpoint/none/Makefile.in b/src/plugins/checkpoint/none/Makefile.in
index 0975427ff..ed8d22524 100644
--- a/src/plugins/checkpoint/none/Makefile.in
+++ b/src/plugins/checkpoint/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/checkpoint/none/checkpoint_none.c b/src/plugins/checkpoint/none/checkpoint_none.c
index f77d2299e..fc977e031 100644
--- a/src/plugins/checkpoint/none/checkpoint_none.c
+++ b/src/plugins/checkpoint/none/checkpoint_none.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,8 +49,9 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/slurmctld/slurmctld.h"
 
diff --git a/src/plugins/checkpoint/ompi/Makefile.in b/src/plugins/checkpoint/ompi/Makefile.in
index 2c1ebdc08..0481e4616 100644
--- a/src/plugins/checkpoint/ompi/Makefile.in
+++ b/src/plugins/checkpoint/ompi/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/checkpoint/ompi/checkpoint_ompi.c b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
index 26f49ea06..8d38d6c4a 100644
--- a/src/plugins/checkpoint/ompi/checkpoint_ompi.c
+++ b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,8 +49,9 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/pack.h"
 #include "src/common/xassert.h"
diff --git a/src/plugins/checkpoint/xlch/Makefile.am b/src/plugins/checkpoint/xlch/Makefile.am
deleted file mode 100644
index 4f06518c6..000000000
--- a/src/plugins/checkpoint/xlch/Makefile.am
+++ /dev/null
@@ -1,17 +0,0 @@
-# Makefile for checkpoint/xlch plugin
-
-AUTOMAKE_OPTIONS = foreign
-
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-
-pkglib_LTLIBRARIES = checkpoint_xlch.la
-checkpoint_xlch_la_SOURCES = checkpoint_xlch.c
-checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-
-force:
-
-$(checkpoint_xlch_LDADD) : force
-	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/checkpoint/xlch/checkpoint_xlch.c b/src/plugins/checkpoint/xlch/checkpoint_xlch.c
deleted file mode 100644
index 5562a7481..000000000
--- a/src/plugins/checkpoint/xlch/checkpoint_xlch.c
+++ /dev/null
@@ -1,714 +0,0 @@
-/*****************************************************************************\
- *  checkpoint_xlch.c - XLCH slurm checkpoint plugin.
- *  $Id: checkpoint_xlch.c 0001 2006-10-31 10:55:11Z hjcao $
- *****************************************************************************
- *  Derived from checkpoint_aix.c
- *  Copyright (C) 2007-2009 National University of Defense Technology, China.
- *  Written by Hongia Cao.
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#if HAVE_STDINT_H
-#  include <stdint.h>
-#endif
-#if HAVE_INTTYPES_H
-#  include <inttypes.h>
-#endif
-#ifdef WITH_PTHREADS
-#  include <pthread.h>
-#endif
-
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-#include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
-#include "src/common/list.h"
-#include "src/common/log.h"
-#include "src/common/pack.h"
-#include "src/common/xassert.h"
-#include "src/common/xstring.h"
-#include "src/common/xmalloc.h"
-#include "src/slurmctld/agent.h"
-#include "src/slurmctld/slurmctld.h"
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
-
-#define SIGCKPT SIGUSR2
-
-struct check_job_info {
-	uint16_t disabled;	/* counter, checkpointable only if zero */
-	uint16_t task_cnt;
-	uint16_t reply_cnt;
-	uint16_t wait_time;
-	time_t   time_stamp;	/* begin or end checkpoint time */
-	uint32_t error_code;
-	char    *error_msg;
-	uint16_t sig_done;
-	bitstr_t *replied;	/* which task has replied the checkpoint.
-				   XXX: only valid if in operation */
-	pthread_mutex_t mutex;
-};
-
-static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal,
-		      char *nodelist);
-
-static int _step_ckpt(struct step_record * step_ptr, uint16_t wait,
-		      char *image_dir, uint16_t sig_timeout);
-
-/* checkpoint request timeout processing */
-static pthread_t	ckpt_agent_tid = 0;
-static pthread_mutex_t	ckpt_agent_mutex = PTHREAD_MUTEX_INITIALIZER;
-static List		ckpt_timeout_list = NULL;
-struct ckpt_timeout_info {
-	uint32_t   job_id;
-	uint32_t   step_id;
-	uint16_t   signal;
-	time_t     start_time;
-	time_t     end_time;
-	char*      nodelist;
-};
-static void *_ckpt_agent_thr(void *arg);
-static void _ckpt_enqueue_timeout(uint32_t job_id, uint32_t step_id,
-				  time_t start_time, uint16_t signal,
-				  uint16_t wait_time, char *nodelist);
-static void  _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id,
-				   time_t start_time);
-static void  _ckpt_timeout_free(void *rec);
-static void  _ckpt_signal_step(struct ckpt_timeout_info *rec);
-
-static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code);
-
-static char *scch_path = SLURM_PREFIX "/sbin/scch";
-
-/*
- * These variables are required by the generic plugin interface.  If they
- * are not found in the plugin, the plugin loader will ignore it.
- *
- * plugin_name - a string giving a human-readable description of the
- * plugin.  There is no maximum length, but the symbol must refer to
- * a valid string.
- *
- * plugin_type - a string suggesting the type of the plugin or its
- * applicability to a particular form of data or method of data handling.
- * If the low-level plugin API is used, the contents of this string are
- * unimportant and may be anything.  SLURM uses the higher-level plugin
- * interface which requires this string to be of the form
- *
- *	<application>/<method>
- *
- * where <application> is a description of the intended application of
- * the plugin (e.g., "checkpoint" for SLURM checkpoint) and <method>
- * is a description of how this plugin satisfies that application.  SLURM will
- * only load checkpoint plugins if the plugin_type string has a
- * prefix of "checkpoint/".
- *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
- */
-const char plugin_name[]       	= "XLCH checkpoint plugin";
-const char plugin_type[]       	= "checkpoint/xlch";
-const uint32_t plugin_version	= 100;
-
-/*
- * init() is called when the plugin is loaded, before any other functions
- * are called.  Put global initialization here.
- */
-extern int init ( void )
-{
-	pthread_attr_t attr;
-
-	slurm_attr_init(&attr);
-	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
-		error("pthread_attr_setdetachstate: %m");
-	if (pthread_create(&ckpt_agent_tid, &attr, _ckpt_agent_thr, NULL)) {
-		error("pthread_create: %m");
-		return SLURM_ERROR;
-	}
-	slurm_attr_destroy(&attr);
-
-	return SLURM_SUCCESS;
-}
-
-
-extern int fini ( void )
-{
-	int i;
-
-	if (!ckpt_agent_tid)
-		return SLURM_SUCCESS;
-
-	for (i=0; i<4; i++) {
-		if (pthread_cancel(ckpt_agent_tid)) {
-			ckpt_agent_tid = 0;
-			return SLURM_SUCCESS;
-		}
-		usleep(1000);
-	}
-	error("Could not kill checkpoint pthread");
-	return SLURM_ERROR;
-}
-
-/*
- * The remainder of this file implements the standard SLURM checkpoint API.
- */
-
-extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id,
-			  struct step_record *step_ptr, uint16_t op,
-			  uint16_t data, char *image_dir, time_t * event_time,
-			  uint32_t *error_code, char **error_msg )
-{
-	int rc = SLURM_SUCCESS;
-	struct check_job_info *check_ptr;
-
-	/* checkpoint/xlch does not support checkpoint batch jobs */
-	if (step_id == SLURM_BATCH_SCRIPT)
-		return ESLURM_NOT_SUPPORTED;
-
-	xassert(step_ptr);
-	check_ptr = (struct check_job_info *) step_ptr->check_job;
-	check_ptr->task_cnt = step_ptr->step_layout->task_cnt; /* set it early */
-	xassert(check_ptr);
-
-	slurm_mutex_lock (&check_ptr->mutex);
-
-	switch (op) {
-		case CHECK_ABLE:
-			if (check_ptr->disabled)
-				rc = ESLURM_DISABLED;
-			else {
-				if (check_ptr->reply_cnt < check_ptr->task_cnt)
-					*event_time = check_ptr->time_stamp;
-				rc = SLURM_SUCCESS;
-			}
-			break;
-		case CHECK_DISABLE:
-			check_ptr->disabled++;
-			break;
-		case CHECK_ENABLE:
-			check_ptr->disabled--;
-			break;
-		case CHECK_CREATE:
-			if (check_ptr->time_stamp != 0) {
-				rc = EALREADY;
-				break;
-			}
-			check_ptr->time_stamp = time(NULL);
-			check_ptr->reply_cnt = 0;
-			check_ptr->replied = bit_alloc(check_ptr->task_cnt);
-			check_ptr->error_code = 0;
-			check_ptr->sig_done = 0;
-			xfree(check_ptr->error_msg);
-			rc = _step_ckpt(step_ptr, data, image_dir, SIGKILL);
-			break;
-		case CHECK_VACATE:
-			if (check_ptr->time_stamp != 0) {
-				rc = EALREADY;
-				break;
-			}
-			check_ptr->time_stamp = time(NULL);
-			check_ptr->reply_cnt = 0;
-			check_ptr->replied = bit_alloc(check_ptr->task_cnt);
-			check_ptr->error_code = 0;
-			check_ptr->sig_done = SIGTERM; /* exit elegantly */
-			xfree(check_ptr->error_msg);
-			rc = _step_ckpt(step_ptr, data, image_dir, SIGKILL);
-			break;
-		case CHECK_RESTART:
-		case CHECK_REQUEUE:
-			rc = ESLURM_NOT_SUPPORTED;
-			break;
-		case CHECK_ERROR:
-			xassert(error_code);
-			xassert(error_msg);
-			*error_code = check_ptr->error_code;
-			xfree(*error_msg);
-			*error_msg = xstrdup(check_ptr->error_msg);
-			break;
-		default:
-			error("Invalid checkpoint operation: %d", op);
-			rc = EINVAL;
-	}
-
-	slurm_mutex_unlock (&check_ptr->mutex);
-
-	return rc;
-}
-
-/* this function will not be called by us */
-extern int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
-		uint32_t error_code, char *error_msg )
-{
-	error("checkpoint/xlch: slurm_ckpt_comp not implemented");
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id,
-				  time_t event_time, uint32_t error_code, char *error_msg )
-{
-	struct check_job_info *check_ptr;
-	int rc = SLURM_SUCCESS;
-
-	xassert(step_ptr);
-	check_ptr = (struct check_job_info *) step_ptr->check_job;
-	xassert(check_ptr);
-
-	/* XXX: we need a mutex here, since in proc_req only JOB_READ locked */
-	debug3("slurm_ckpt_task_comp: job %u.%hu, task %u, error %d",
-	       step_ptr->job_ptr->job_id, step_ptr->step_id, task_id,
-	       error_code);
-
-	slurm_mutex_lock (&check_ptr->mutex);
-
-	/*
-	 * for now we do not use event_time to identify operation and always
-	 * set it 0
-	 * TODO: consider send event_time to the task via sigqueue().
-	 */
-	if (event_time && (event_time != check_ptr->time_stamp)) {
-		rc = ESLURM_ALREADY_DONE;
-		goto out;
-	}
-
-	if (!check_ptr->replied || bit_test (check_ptr->replied, task_id)) {
-		rc = ESLURM_ALREADY_DONE;
-		goto out;
-	}
-
-	if ((uint16_t)task_id >= check_ptr->task_cnt) {
-		error("invalid task_id %u, task_cnt: %hu", task_id,
-		      check_ptr->task_cnt);
-		rc = EINVAL;
-		goto out;
-	}
-	bit_set (check_ptr->replied, task_id);
-	check_ptr->reply_cnt ++;
-
-	/* TODO: check the error_code */
-	if (error_code > check_ptr->error_code) {
-		info("slurm_ckpt_task_comp error %u: %s", error_code, error_msg);
-		check_ptr->error_code = error_code;
-		xfree(check_ptr->error_msg);
-		check_ptr->error_msg = xstrdup(error_msg);
-	}
-
-	/* We need an error-free reply from each task to note completion */
-	if (check_ptr->reply_cnt == check_ptr->task_cnt) { /* all tasks done */
-		time_t now = time(NULL);
-		long delay = (long) difftime(now, check_ptr->time_stamp);
-		info("Checkpoint complete for job %u.%u in %ld seconds",
-		     step_ptr->job_ptr->job_id, step_ptr->step_id,
-		     delay);
-		/* remove the timeout */
-		_ckpt_dequeue_timeout(step_ptr->job_ptr->job_id,
-				      step_ptr->step_id, check_ptr->time_stamp);
-		/* free the replied bitstr */
-		FREE_NULL_BITMAP (check_ptr->replied);
-
-		if (check_ptr->sig_done) {
-			info ("checkpoint step %u.%hu done, sending signal %hu",
-			      step_ptr->job_ptr->job_id,
-			      step_ptr->step_id, check_ptr->sig_done);
-			_send_sig(step_ptr->job_ptr->job_id, step_ptr->step_id,
-				  check_ptr->sig_done,
-				  step_ptr->step_layout->node_list);
-		}
-
-		_on_ckpt_complete(step_ptr, check_ptr->error_code); /* how about we execute a program? */
-
-		check_ptr->time_stamp = 0; /* this enables checkpoint again */
-	}
-
- out:
-	slurm_mutex_unlock (&check_ptr->mutex);
-	return rc;
-}
-
-extern int slurm_ckpt_alloc_job(check_jobinfo_t *jobinfo)
-{
-	struct check_job_info *check_ptr;
-
-	check_ptr = xmalloc(sizeof(struct check_job_info));
-	slurm_mutex_init (&check_ptr->mutex);
-	*jobinfo = (check_jobinfo_t) check_ptr;
-	return SLURM_SUCCESS;
-}
-
-extern int slurm_ckpt_free_job(check_jobinfo_t jobinfo)
-{
-	struct check_job_info *check_ptr = (struct check_job_info *)jobinfo;
-	if (check_ptr) {
-		xfree (check_ptr->error_msg);
-		FREE_NULL_BITMAP (check_ptr->replied);
-	}
-	xfree(jobinfo);
-	return SLURM_SUCCESS;
-}
-
-extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer,
-			       uint16_t protocol_version)
-{
-	struct check_job_info *check_ptr =
-		(struct check_job_info *)jobinfo;
-
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		pack16(check_ptr->disabled, buffer);
-		pack16(check_ptr->task_cnt, buffer);
-		pack16(check_ptr->reply_cnt, buffer);
-		pack16(check_ptr->wait_time, buffer);
-		pack_bit_fmt(check_ptr->replied, buffer);
-
-		pack32(check_ptr->error_code, buffer);
-		packstr(check_ptr->error_msg, buffer);
-		pack_time(check_ptr->time_stamp, buffer);
-	}
-
-	return SLURM_SUCCESS;
-}
-
-extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer,
-				 uint16_t protocol_version)
-{
-	uint32_t uint32_tmp;
-	char *task_inx_str;
-	struct check_job_info *check_ptr =
-		(struct check_job_info *)jobinfo;
-
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		safe_unpack16(&check_ptr->disabled, buffer);
-		safe_unpack16(&check_ptr->task_cnt, buffer);
-		safe_unpack16(&check_ptr->reply_cnt, buffer);
-		safe_unpack16(&check_ptr->wait_time, buffer);
-		safe_unpackstr_xmalloc(&task_inx_str, &uint32_tmp, buffer);
-		if (task_inx_str == NULL)
-			check_ptr->replied = NULL;
-		else {
-			check_ptr->replied = bit_alloc(check_ptr->task_cnt);
-			bit_unfmt(check_ptr->replied, task_inx_str);
-			xfree(task_inx_str);
-		}
-
-		safe_unpack32(&check_ptr->error_code, buffer);
-		safe_unpackstr_xmalloc(&check_ptr->error_msg,
-				       &uint32_tmp, buffer);
-		safe_unpack_time(&check_ptr->time_stamp, buffer);
-	}
-
-	return SLURM_SUCCESS;
-
-    unpack_error:
-	xfree(check_ptr->error_msg);
-	return SLURM_ERROR;
-}
-
-/* Send a signal RPC to a list of nodes */
-static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal,
-		      char *nodelist)
-{
-	agent_arg_t *agent_args;
-	kill_tasks_msg_t *kill_tasks_msg;
-
-	kill_tasks_msg = xmalloc(sizeof(kill_tasks_msg_t));
-	kill_tasks_msg->job_id		= job_id;
-	kill_tasks_msg->job_step_id	= step_id;
-	kill_tasks_msg->signal		= signal;
-
-	agent_args = xmalloc(sizeof(agent_arg_t));
-	agent_args->msg_type		= REQUEST_SIGNAL_TASKS;
-	agent_args->retry		= 1;
-	agent_args->msg_args		= kill_tasks_msg;
-	agent_args->hostlist            = hostlist_create(nodelist);
-	agent_args->node_count		= hostlist_count(agent_args->hostlist);
-
-	agent_queue_request(agent_args);
-}
-
-/* Send checkpoint request to the processes of a job step.
- * If the request times out, send sig_timeout. */
-static int _step_ckpt(struct step_record * step_ptr, uint16_t wait,
-		      char *image_dir, uint16_t sig_timeout)
-{
-	struct check_job_info *check_ptr;
-	struct job_record *job_ptr;
-
-	xassert(step_ptr);
-	check_ptr = (struct check_job_info *) step_ptr->check_job;
-	xassert(check_ptr);
-	job_ptr = step_ptr->job_ptr;
-	xassert(job_ptr);
-
-	if (IS_JOB_FINISHED(job_ptr))
-		return ESLURM_ALREADY_DONE;
-
-	if (check_ptr->disabled)
-		return ESLURM_DISABLED;
-
-	if (!check_ptr->task_cnt) {
-		error("_step_ckpt: job %u.%u has no tasks to checkpoint",
-			job_ptr->job_id,
-			step_ptr->step_id);
-		return ESLURM_INVALID_NODE_NAME;
-	}
-	char* nodelist = xstrdup (step_ptr->step_layout->node_list);
-	check_ptr->wait_time  = wait; /* TODO: how about change wait_time according to task_cnt? */
-
-	checkpoint_tasks(step_ptr->job_ptr->job_id, step_ptr->step_id,
-			 check_ptr->time_stamp, image_dir, wait, nodelist);
-
-	_ckpt_enqueue_timeout(step_ptr->job_ptr->job_id,
-			      step_ptr->step_id, check_ptr->time_stamp,
-			      sig_timeout, check_ptr->wait_time, nodelist);
-
-	info("checkpoint requested for job %u.%u", job_ptr->job_id,
-	     step_ptr->step_id);
-	xfree (nodelist);
-	return SLURM_SUCCESS;
-}
-
-
-static void _ckpt_signal_step(struct ckpt_timeout_info *rec)
-{
-	/* debug("signal %u.%u %u", rec->job_id, rec->step_id, rec->signal); */
-	_send_sig(rec->job_id, rec->step_id, rec->signal, rec->nodelist);
-}
-
-/* Checkpoint processing pthread
- * Never returns, but is cancelled on plugin termiantion */
-static void *_ckpt_agent_thr(void *arg)
-{
-	ListIterator iter;
-	struct ckpt_timeout_info *rec;
-	time_t now;
-
-	while (1) {
-		sleep(1);
-		if (!ckpt_timeout_list)
-			continue;
-
-		now = time(NULL);
-		iter = list_iterator_create(ckpt_timeout_list);
-		slurm_mutex_lock(&ckpt_agent_mutex);
-		/* look for and process any timeouts */
-		while ((rec = list_next(iter))) {
-			if (rec->end_time > now)
-				continue;
-			info("checkpoint timeout for %u.%u",
-				rec->job_id, rec->step_id);
-			_ckpt_signal_step(rec);
-			list_delete_item(iter);
-		}
-		slurm_mutex_unlock(&ckpt_agent_mutex);
-		list_iterator_destroy(iter);
-	}
-	return NULL;
-}
-
-/* Queue a checkpoint request timeout */
-static void _ckpt_enqueue_timeout(uint32_t job_id, uint32_t step_id,
-				  time_t start_time, uint16_t signal,
-				  uint16_t wait_time, char *nodelist)
-{
-	struct ckpt_timeout_info *rec;
-
-	if ((wait_time == 0) || (signal == 0)) /* if signal == 0, don't enqueue it */
-		return;
-
-	slurm_mutex_lock(&ckpt_agent_mutex);
-	if (!ckpt_timeout_list)
-		ckpt_timeout_list = list_create(_ckpt_timeout_free);
-	rec = xmalloc(sizeof(struct ckpt_timeout_info));
-	rec->job_id	= job_id;
-	rec->step_id	= step_id;
-	rec->signal     = signal;
-	rec->start_time	= start_time;
-	rec->end_time	= start_time + wait_time;
-	rec->nodelist	= xstrdup(nodelist);
-	/* debug("enqueue %u.%u %u", job_id, step_id, wait_time); */
-	list_enqueue(ckpt_timeout_list, rec);
-	slurm_mutex_unlock(&ckpt_agent_mutex);
-}
-
-static void _ckpt_timeout_free(void *rec)
-{
-	struct ckpt_timeout_info *ckpt_rec = (struct ckpt_timeout_info *)rec;
-
-	if (ckpt_rec) {
-		xfree(ckpt_rec->nodelist);
-		xfree(ckpt_rec);
-	}
-}
-
-/* De-queue a checkpoint timeout request. The operation completed */
-static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id,
-		time_t start_time)
-{
-	ListIterator iter;
-	struct ckpt_timeout_info *rec;
-
-	slurm_mutex_lock(&ckpt_agent_mutex);
-	if (!ckpt_timeout_list)
-		goto fini;
-	iter = list_iterator_create(ckpt_timeout_list);
-	while ((rec = list_next(iter))) {
-		if ((rec->job_id != job_id) || (rec->step_id != step_id)
-		    ||  (start_time && (rec->start_time != start_time)))
-			continue;
-		/* debug("dequeue %u.%u", job_id, step_id); */
-		list_delete_item(iter);
-		break;
-	}
-	list_iterator_destroy(iter);
- fini:
-	slurm_mutex_unlock(&ckpt_agent_mutex);
-}
-
-
-/* a checkpoint completed, process the images files */
-static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code)
-{
-	int status;
-	pid_t cpid;
-
-	if (access(scch_path, R_OK | X_OK) < 0) {
-		info("Access denied for %s: %m", scch_path);
-		return SLURM_ERROR;
-	}
-
-	if ((cpid = fork()) < 0) {
-		error ("_on_ckpt_complete: fork: %m");
-		return SLURM_ERROR;
-	}
-
-	if (cpid == 0) {
-		/*
-		 * We don't fork and wait the child process because the job
-		 * read lock is held. It could take minutes to delete/move
-		 * the checkpoint image files. So there is a race condition
-		 * of the user requesting another checkpoint before SCCH
-		 * finishes.
-		 */
-		/* fork twice to avoid zombies */
-		if ((cpid = fork()) < 0) {
-			error ("_on_ckpt_complete: second fork: %m");
-			exit(127);
-		}
-		/* grand child execs */
-		if (cpid == 0) {
-			char *args[6];
-			char str_job[11];
-			char str_step[11];
-			char str_err[11];
-
-			/*
-			 * XXX: if slurmctld is running as root, we must setuid here.
-			 * But what if slurmctld is running as SlurmUser?
-			 * How about we make scch setuid and pass the user/group to it?
-			 */
-			if (geteuid() == 0) { /* root */
-				if (setgid(step_ptr->job_ptr->group_id) < 0) {
-					error ("_on_ckpt_complete: failed to "
-						"setgid: %m");
-					exit(127);
-				}
-				if (setuid(step_ptr->job_ptr->user_id) < 0) {
-					error ("_on_ckpt_complete: failed to "
-						"setuid: %m");
-					exit(127);
-				}
-			}
-			snprintf(str_job,  sizeof(str_job),  "%u",
-				 step_ptr->job_ptr->job_id);
-			snprintf(str_step, sizeof(str_step), "%hu",
-				 step_ptr->step_id);
-			snprintf(str_err,  sizeof(str_err),  "%u",
-				 error_code);
-
-			args[0] = scch_path;
-			args[1] = str_job;
-			args[2] = str_step;
-			args[3] = str_err;
-			args[4] = step_ptr->ckpt_dir;
-			args[5] = NULL;
-
-			execv(scch_path, args);
-			error("help! %m");
-			exit(127);
-		}
-		/* child just exits */
-		exit(0);
-	}
-
-	while(1) {
-		if (waitpid(cpid, &status, 0) < 0 && errno == EINTR)
-			continue;
-		break;
-	}
-
-	return SLURM_SUCCESS;
-}
-
-extern int slurm_ckpt_stepd_prefork(void *slurmd_job)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int slurm_ckpt_signal_tasks(void *slurmd_job)
-{
-	/* send SIGCKPT to all tasks */
-	return killpg(((slurmd_job_t *)slurmd_job)->pgid, SIGCKPT);
-}
-
-extern int slurm_ckpt_restart_task(void *slurmd_job, char *image_dir, int gtid)
-{
-	char buf[256];
-
-	if (snprintf(buf, sizeof(buf), "%s/task.%d.ckpt", image_dir, gtid) >= sizeof(buf)) {
-		error("slurm buffer size too small");
-		return SLURM_FAILURE;
-	}
-	/* restart the task and update its environment */
-#if 0
-	restart(buf, ((slurmd_job_t *)slurmd_job)->env);
-#endif
-
-	error("restart() failed: rank=%d, file=%s: %m", gtid, buf);
-	return SLURM_FAILURE;
-}
diff --git a/src/plugins/crypto/Makefile.in b/src/plugins/crypto/Makefile.in
index 8f00a8f9c..64b4c852b 100644
--- a/src/plugins/crypto/Makefile.in
+++ b/src/plugins/crypto/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/crypto/munge/Makefile.in b/src/plugins/crypto/munge/Makefile.in
index f3d73adbb..bca4ca5f2 100644
--- a/src/plugins/crypto/munge/Makefile.in
+++ b/src/plugins/crypto/munge/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -140,7 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -177,6 +182,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -234,6 +240,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -269,6 +276,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/crypto/munge/crypto_munge.c b/src/plugins/crypto/munge/crypto_munge.c
index b81676750..12bf2b5bd 100644
--- a/src/plugins/crypto/munge/crypto_munge.c
+++ b/src/plugins/crypto/munge/crypto_munge.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,7 +58,8 @@
 #define GPL_LICENSED 1
 #include <munge.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
+
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
diff --git a/src/plugins/crypto/openssl/Makefile.in b/src/plugins/crypto/openssl/Makefile.in
index 4ffa9cbe5..7a9a98624 100644
--- a/src/plugins/crypto/openssl/Makefile.in
+++ b/src/plugins/crypto/openssl/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -145,7 +147,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -182,6 +187,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -239,6 +245,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -274,6 +281,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/crypto/openssl/crypto_openssl.c b/src/plugins/crypto/openssl/crypto_openssl.c
index 970dd997c..fa2a2836a 100644
--- a/src/plugins/crypto/openssl/crypto_openssl.c
+++ b/src/plugins/crypto/openssl/crypto_openssl.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -59,7 +59,8 @@
 #include <openssl/pem.h>
 #include <openssl/err.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
+
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 
diff --git a/src/plugins/gres/Makefile.in b/src/plugins/gres/Makefile.in
index 903fcec9f..71ab0712f 100644
--- a/src/plugins/gres/Makefile.in
+++ b/src/plugins/gres/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/gres/gpu/Makefile.in b/src/plugins/gres/gpu/Makefile.in
index 1975c6b5e..2e0bcd384 100644
--- a/src/plugins/gres/gpu/Makefile.in
+++ b/src/plugins/gres/gpu/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/gres/gpu/gres_gpu.c b/src/plugins/gres/gpu/gres_gpu.c
index 56168ac99..3e60f18bb 100644
--- a/src/plugins/gres/gpu/gres_gpu.c
+++ b/src/plugins/gres/gpu/gres_gpu.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,8 +68,8 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/bitstring.h"
@@ -166,7 +166,10 @@ extern void job_set_env(char ***job_env_ptr, void *gres_ptr)
 				    dev_list);
 		xfree(dev_list);
 	} else {
-		env_array_overwrite(job_env_ptr,"CUDA_VISIBLE_DEVICES", "");
+		/* The gres.conf file must identify specific device files
+		 * in order to set the CUDA_VISIBLE_DEVICES env var */
+		error("gres/gpu unable to set CUDA_VISIBLE_DEVICES, "
+		      "no device files configured");
 	}
 }
 
@@ -200,6 +203,9 @@ extern void step_set_env(char ***job_env_ptr, void *gres_ptr)
 				    dev_list);
 		xfree(dev_list);
 	} else {
-		env_array_overwrite(job_env_ptr,"CUDA_VISIBLE_DEVICES", "");
+		/* The gres.conf file must identify specific device files
+		 * in order to set the CUDA_VISIBLE_DEVICES env var */
+		error("gres/gpu unable to set CUDA_VISIBLE_DEVICES, "
+		      "no device files configured");
 	}
 }
diff --git a/src/plugins/gres/nic/Makefile.in b/src/plugins/gres/nic/Makefile.in
index f3e0fe8d7..432036d70 100644
--- a/src/plugins/gres/nic/Makefile.in
+++ b/src/plugins/gres/nic/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/gres/nic/gres_nic.c b/src/plugins/gres/nic/gres_nic.c
index da120712d..b50af5df8 100644
--- a/src/plugins/gres/nic/gres_nic.c
+++ b/src/plugins/gres/nic/gres_nic.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,8 +68,8 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/bitstring.h"
diff --git a/src/plugins/job_submit/Makefile.in b/src/plugins/job_submit/Makefile.in
index 96825d9cd..efd2a9510 100644
--- a/src/plugins/job_submit/Makefile.in
+++ b/src/plugins/job_submit/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/cnode/Makefile.in b/src/plugins/job_submit/cnode/Makefile.in
index 33101d4ba..23fc9f482 100644
--- a/src/plugins/job_submit/cnode/Makefile.in
+++ b/src/plugins/job_submit/cnode/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/cnode/job_submit_cnode.c b/src/plugins/job_submit/cnode/job_submit_cnode.c
index ed48f3ae2..42a78d421 100644
--- a/src/plugins/job_submit/cnode/job_submit_cnode.c
+++ b/src/plugins/job_submit/cnode/job_submit_cnode.c
@@ -12,7 +12,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,7 +68,7 @@
 
 #include <stdio.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/slurmctld/slurmctld.h"
 
diff --git a/src/plugins/job_submit/defaults/Makefile.in b/src/plugins/job_submit/defaults/Makefile.in
index ca0552350..8b27d8875 100644
--- a/src/plugins/job_submit/defaults/Makefile.in
+++ b/src/plugins/job_submit/defaults/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/defaults/job_submit_defaults.c b/src/plugins/job_submit/defaults/job_submit_defaults.c
index f41366431..49a2fd0ee 100644
--- a/src/plugins/job_submit/defaults/job_submit_defaults.c
+++ b/src/plugins/job_submit/defaults/job_submit_defaults.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -63,7 +63,7 @@
 
 #include <stdio.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/slurmctld/slurmctld.h"
 
diff --git a/src/plugins/job_submit/logging/Makefile.in b/src/plugins/job_submit/logging/Makefile.in
index 9835dc5a1..2cb05bfb0 100644
--- a/src/plugins/job_submit/logging/Makefile.in
+++ b/src/plugins/job_submit/logging/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/logging/job_submit_logging.c b/src/plugins/job_submit/logging/job_submit_logging.c
index b48f5ca85..5a8e09d4a 100644
--- a/src/plugins/job_submit/logging/job_submit_logging.c
+++ b/src/plugins/job_submit/logging/job_submit_logging.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -63,8 +63,9 @@
 
 #include <stdio.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/slurm_xlator.h"
 #include "src/slurmctld/slurmctld.h"
 
diff --git a/src/plugins/job_submit/lua/Makefile.in b/src/plugins/job_submit/lua/Makefile.in
index a8b0debeb..0f5f1764b 100644
--- a/src/plugins/job_submit/lua/Makefile.in
+++ b/src/plugins/job_submit/lua/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/lua/job_submit_lua.c b/src/plugins/job_submit/lua/job_submit_lua.c
index 0a7d51b81..91dbb12ed 100644
--- a/src/plugins/job_submit/lua/job_submit_lua.c
+++ b/src/plugins/job_submit/lua/job_submit_lua.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -56,14 +56,13 @@
 #include <dlfcn.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
 #include <lua.h>
 #include <lauxlib.h>
 #include <lualib.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/slurm_xlator.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -259,6 +258,8 @@ static int _get_job_rec_field (lua_State *L)
 		lua_pushstring (L, job_ptr->account);
 	} else if (!strcmp(name, "comment")) {
 		lua_pushstring (L, job_ptr->comment);
+	} else if (!strcmp(name, "direct_set_prio")) {
+		lua_pushnumber (L, job_ptr->direct_set_prio);
 	} else if (!strcmp(name, "gres")) {
 		lua_pushstring (L, job_ptr->gres);
 	} else if (!strcmp(name, "job_id")) {
@@ -287,8 +288,15 @@ static int _get_job_rec_field (lua_State *L)
 			lua_pushnumber (L, job_ptr->details->min_nodes);
 		else
 			lua_pushnumber (L, 0);
+	} else if (!strcmp(name, "nice")) {
+		if (job_ptr->details)
+			lua_pushnumber (L, job_ptr->details->nice);
+		else
+			lua_pushnumber (L, (uint16_t)NO_VAL);
 	} else if (!strcmp(name, "partition")) {
 		lua_pushstring (L, job_ptr->partition);
+	} else if (!strcmp(name, "priority")) {
+		lua_pushnumber (L, job_ptr->priority);
 	} else if (!strcmp(name, "time_limit")) {
 		lua_pushnumber (L, job_ptr->time_limit);
 	} else if (!strcmp(name, "time_min")) {
diff --git a/src/plugins/job_submit/partition/Makefile.in b/src/plugins/job_submit/partition/Makefile.in
index e4d292126..71c5b55d7 100644
--- a/src/plugins/job_submit/partition/Makefile.in
+++ b/src/plugins/job_submit/partition/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/job_submit/partition/job_submit_partition.c b/src/plugins/job_submit/partition/job_submit_partition.c
index d98a1e443..e4a25ee2c 100644
--- a/src/plugins/job_submit/partition/job_submit_partition.c
+++ b/src/plugins/job_submit/partition/job_submit_partition.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -64,7 +64,7 @@
 
 #include <stdio.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -131,7 +131,7 @@ static bool _user_access(uid_t run_uid, uint32_t submit_uid,
 	return false;		/* User not in AllowGroups */
 }
 
-/* This exampe code will set a job's default partition to the highest
+/* This example code will set a job's default partition to the highest
  * priority partition that is available to this user. This is only an
  * example and tremendous flexibility is available. */
 extern int job_submit(struct job_descriptor *job_desc, uint32_t submit_uid)
diff --git a/src/plugins/jobacct_gather/Makefile.in b/src/plugins/jobacct_gather/Makefile.in
index cfd863fc7..c0e4d2539 100644
--- a/src/plugins/jobacct_gather/Makefile.in
+++ b/src/plugins/jobacct_gather/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobacct_gather/aix/Makefile.in b/src/plugins/jobacct_gather/aix/Makefile.in
index f2cc60ae8..ad5a2f69c 100644
--- a/src/plugins/jobacct_gather/aix/Makefile.in
+++ b/src/plugins/jobacct_gather/aix/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index ac0156613..13c66b9a1 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -100,7 +100,7 @@ const uint32_t plugin_version = 100;
 static bool jobacct_shutdown = 0;
 static bool jobacct_suspended = 0;
 static List task_list = NULL;
-static uint32_t cont_id = (uint32_t)NO_VAL;
+static uint64_t cont_id = (uint64_t)NO_VAL;
 static bool pgid_plugin = false;
 
 #ifdef HAVE_AIX
@@ -207,7 +207,7 @@ static void _get_process_data(void)
 	ListIterator itr;
 	ListIterator itr2;
 
-	if(!pgid_plugin && cont_id == (uint32_t)NO_VAL) {
+	if (!pgid_plugin && (cont_id == (uint64_t)NO_VAL)) {
 		debug("cont_id hasn't been set yet not running poll");
 		return;
 	}
@@ -223,8 +223,8 @@ static void _get_process_data(void)
 	if(!pgid_plugin) {
 		/* get only the processes in the proctrack container */
 		slurm_container_get_pids(cont_id, &pids, &npids);
-		if(!npids) {
-			debug4("no pids in this container %d", cont_id);
+		if (!npids) {
+			debug4("no pids in this container %"PRIu64"", cont_id);
 			goto finished;
 		}
 		for (i = 0; i < npids; i++) {
@@ -570,18 +570,18 @@ extern void jobacct_gather_p_resume_poll()
 	jobacct_suspended = false;
 }
 
-extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id)
+extern int jobacct_gather_p_set_proctrack_container_id(uint64_t id)
 {
-	if(pgid_plugin)
+	if (pgid_plugin)
 		return SLURM_SUCCESS;
 
-	if(cont_id != (uint32_t)NO_VAL)
-		info("Warning: jobacct: set_proctrack_container_id: "
-		     "cont_id is already set to %d you are setting it to %d",
-		     cont_id, id);
-	if(id <= 0) {
+	if (cont_id != (uint64_t)NO_VAL)
+		info("Warning: jobacct: set_proctrack_container_id: cont_id "
+		     "is already set to %"PRIu64" you are setting it to "
+		     "%"PRIu64"", cont_id, id);
+	if (id <= 0) {
 		error("jobacct: set_proctrack_container_id: "
-		      "I was given most likely an unset cont_id %d",
+		      "I was given most likely an unset cont_id %"PRIu64"",
 		      id);
 		return SLURM_ERROR;
 	}
diff --git a/src/plugins/jobacct_gather/linux/Makefile.in b/src/plugins/jobacct_gather/linux/Makefile.in
index 5c4ac82e5..e1b0521f5 100644
--- a/src/plugins/jobacct_gather/linux/Makefile.in
+++ b/src/plugins/jobacct_gather/linux/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index 0d988073a..811d44aa6 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -118,7 +118,7 @@ static pthread_mutex_t reading_mutex = PTHREAD_MUTEX_INITIALIZER;
 static bool jobacct_shutdown = 0;
 static bool jobacct_suspended = 0;
 static List task_list = NULL;
-static uint32_t cont_id = (uint32_t)NO_VAL;
+static uint64_t cont_id = (uint64_t)NO_VAL;
 static bool pgid_plugin = false;
 
 /* Finally, pre-define all local routines. */
@@ -211,7 +211,7 @@ static void _get_process_data(void)
 	static int processing = 0;
 	long		hertz;
 
-	if(!pgid_plugin && cont_id == (uint32_t)NO_VAL) {
+	if (!pgid_plugin && (cont_id == (uint64_t)NO_VAL)) {
 		debug("cont_id hasn't been set yet not running poll");
 		return;
 	}
@@ -233,7 +233,7 @@ static void _get_process_data(void)
 		/* get only the processes in the proctrack container */
 		slurm_container_get_pids(cont_id, &pids, &npids);
 		if(!npids) {
-			debug4("no pids in this container %d", cont_id);
+			debug4("no pids in this container %"PRIu64"", cont_id);
 			goto finished;
 		}
 		for (i = 0; i < npids; i++) {
@@ -766,18 +766,18 @@ extern void jobacct_gather_p_resume_poll()
 	jobacct_suspended = false;
 }
 
-extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id)
+extern int jobacct_gather_p_set_proctrack_container_id(uint64_t id)
 {
 	if(pgid_plugin)
 		return SLURM_SUCCESS;
 
-	if(cont_id != (uint32_t)NO_VAL)
-		info("Warning: jobacct: set_proctrack_container_id: "
-		     "cont_id is already set to %d you are setting it to %d",
-		     cont_id, id);
-	if(id <= 0) {
+	if (cont_id != (uint64_t)NO_VAL)
+		info("Warning: jobacct: set_proctrack_container_id: cont_id "
+		     "is already set to %"PRIu64" you are setting it to "
+		     "%"PRIu64"", cont_id, id);
+	if (id <= 0) {
 		error("jobacct: set_proctrack_container_id: "
-		      "I was given most likely an unset cont_id %d",
+		      "I was given most likely an unset cont_id %"PRIu64"",
 		      id);
 		return SLURM_ERROR;
 	}
diff --git a/src/plugins/jobacct_gather/none/Makefile.in b/src/plugins/jobacct_gather/none/Makefile.in
index 447023d4e..40d0f2a2e 100644
--- a/src/plugins/jobacct_gather/none/Makefile.in
+++ b/src/plugins/jobacct_gather/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobacct_gather/none/jobacct_gather_none.c b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
index e169faa85..def7c8dda 100644
--- a/src/plugins/jobacct_gather/none/jobacct_gather_none.c
+++ b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -155,7 +155,7 @@ extern void jobacct_gather_p_resume_poll()
 	return;
 }
 
-extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id)
+extern int jobacct_gather_p_set_proctrack_container_id(uint64_t id)
 {
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/jobcomp/Makefile.in b/src/plugins/jobcomp/Makefile.in
index 05a38d2c0..7468aa587 100644
--- a/src/plugins/jobcomp/Makefile.in
+++ b/src/plugins/jobcomp/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/filetxt/Makefile.in b/src/plugins/jobcomp/filetxt/Makefile.in
index 44a01e8d0..4dd1ee48b 100644
--- a/src/plugins/jobcomp/filetxt/Makefile.in
+++ b/src/plugins/jobcomp/filetxt/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
index 014decb0f..184283df2 100644
--- a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
+++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
index e680af875..cebc5c6e7 100644
--- a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
+++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
index e50dd5d09..014fac7b0 100644
--- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
+++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/mysql/Makefile.in b/src/plugins/jobcomp/mysql/Makefile.in
index 69a0798d2..3c8bc1cc6 100644
--- a/src/plugins/jobcomp/mysql/Makefile.in
+++ b/src/plugins/jobcomp/mysql/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -147,7 +149,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -184,6 +189,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -241,6 +247,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -276,6 +283,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/mysql/jobcomp_mysql.c b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
index 9d8755075..ca04612bd 100644
--- a/src/plugins/jobcomp/mysql/jobcomp_mysql.c
+++ b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
index 04c754a16..f15980632 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
index bb8499a48..14a920845 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/none/Makefile.in b/src/plugins/jobcomp/none/Makefile.in
index 32fdc1eec..08fac9f22 100644
--- a/src/plugins/jobcomp/none/Makefile.in
+++ b/src/plugins/jobcomp/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/none/jobcomp_none.c b/src/plugins/jobcomp/none/jobcomp_none.c
index 65b108bc0..6994d4f8f 100644
--- a/src/plugins/jobcomp/none/jobcomp_none.c
+++ b/src/plugins/jobcomp/none/jobcomp_none.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,7 +48,8 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_jobcomp.h"
 #include "src/slurmctld/slurmctld.h"
diff --git a/src/plugins/jobcomp/pgsql/Makefile.in b/src/plugins/jobcomp/pgsql/Makefile.in
index 691f86c1e..b61cdd7d2 100644
--- a/src/plugins/jobcomp/pgsql/Makefile.in
+++ b/src/plugins/jobcomp/pgsql/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -147,7 +149,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -184,6 +189,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -241,6 +247,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -276,6 +283,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
index ff7d3bb48..c9a215d9a 100644
--- a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
+++ b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
index 73256531a..faffe6933 100644
--- a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
+++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
index ab5a67eb6..9079bd0b3 100644
--- a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
+++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
@@ -9,7 +9,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/jobcomp/script/Makefile.in b/src/plugins/jobcomp/script/Makefile.in
index 0c3a09713..d2ce5dfa1 100644
--- a/src/plugins/jobcomp/script/Makefile.in
+++ b/src/plugins/jobcomp/script/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index c55d35b20..12ed94b40 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -85,8 +85,9 @@
 #include <sys/wait.h>
 #include <unistd.h>
 #include <pthread.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/slurm_protocol_defs.h"
diff --git a/src/plugins/mpi/Makefile.in b/src/plugins/mpi/Makefile.in
index d8582e7ae..ece688255 100644
--- a/src/plugins/mpi/Makefile.in
+++ b/src/plugins/mpi/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/lam/Makefile.in b/src/plugins/mpi/lam/Makefile.in
index a01d5b18d..9ed14264e 100644
--- a/src/plugins/mpi/lam/Makefile.in
+++ b/src/plugins/mpi/lam/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/lam/lam.h b/src/plugins/mpi/lam/lam.h
index 837fdba45..d1863802b 100644
--- a/src/plugins/mpi/lam/lam.h
+++ b/src/plugins/mpi/lam/lam.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/mpi/lam/mpi_lam.c b/src/plugins/mpi/lam/mpi_lam.c
index 66f7ff10e..835377d2b 100644
--- a/src/plugins/mpi/lam/mpi_lam.c
+++ b/src/plugins/mpi/lam/mpi_lam.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/mpi/lam/lam.h"
 
diff --git a/src/plugins/mpi/mpich1_p4/Makefile.in b/src/plugins/mpi/mpich1_p4/Makefile.in
index 3e11d18b1..a3f3ba9f3 100644
--- a/src/plugins/mpi/mpich1_p4/Makefile.in
+++ b/src/plugins/mpi/mpich1_p4/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/mpich1_p4/mpich1_p4.c b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
index f2da54e56..201960f73 100644
--- a/src/plugins/mpi/mpich1_p4/mpich1_p4.c
+++ b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,7 +47,7 @@
 #include <sys/types.h>
 #include <sys/socket.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/env.h"
 #include "src/common/fd.h"
 #include "src/common/hostlist.h"
diff --git a/src/plugins/mpi/mpich1_shmem/Makefile.in b/src/plugins/mpi/mpich1_shmem/Makefile.in
index 87e61b9cb..0bd91cbca 100644
--- a/src/plugins/mpi/mpich1_shmem/Makefile.in
+++ b/src/plugins/mpi/mpich1_shmem/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
index 3ea264525..bc1e2f89c 100644
--- a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
+++ b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/mpi/lam/lam.h"
 
diff --git a/src/plugins/mpi/mpichgm/Makefile.in b/src/plugins/mpi/mpichgm/Makefile.in
index 7a812f021..f912bc1cf 100644
--- a/src/plugins/mpi/mpichgm/Makefile.in
+++ b/src/plugins/mpi/mpichgm/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/mpichgm/mpi_mpichgm.c b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
index 5bc3b92ef..422718a36 100644
--- a/src/plugins/mpi/mpichgm/mpi_mpichgm.c
+++ b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/mpi/mpichgm/mpichgm.h"
 
diff --git a/src/plugins/mpi/mpichgm/mpichgm.c b/src/plugins/mpi/mpichgm/mpichgm.c
index dd777cd37..5067a5fe5 100644
--- a/src/plugins/mpi/mpichgm/mpichgm.c
+++ b/src/plugins/mpi/mpichgm/mpichgm.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -297,10 +297,8 @@ static void _gmpi_wait_abort(gmpi_state_t *st)
 static void *_gmpi_thr(void *arg)
 {
 	gmpi_state_t *st;
-	mpi_plugin_client_info_t *job;
 
 	st = (gmpi_state_t *) arg;
-	job = st->job;
 
 	debug3("GMPI master thread pid=%lu", (unsigned long) getpid());
 	_gmpi_establish_map(st);
diff --git a/src/plugins/mpi/mpichgm/mpichgm.h b/src/plugins/mpi/mpichgm/mpichgm.h
index 19e0cbe55..1cce496b3 100644
--- a/src/plugins/mpi/mpichgm/mpichgm.h
+++ b/src/plugins/mpi/mpichgm/mpichgm.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/mpi/mpichmx/Makefile.in b/src/plugins/mpi/mpichmx/Makefile.in
index 8985aec84..e4754aeac 100644
--- a/src/plugins/mpi/mpichmx/Makefile.in
+++ b/src/plugins/mpi/mpichmx/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/mpichmx/mpi_mpichmx.c b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
index 0dc6954b1..f13b69816 100644
--- a/src/plugins/mpi/mpichmx/mpi_mpichmx.c
+++ b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/mpi/mpichmx/mpichmx.h"
 
diff --git a/src/plugins/mpi/mpichmx/mpichmx.c b/src/plugins/mpi/mpichmx/mpichmx.c
index b0fa09895..0d4ae97ad 100644
--- a/src/plugins/mpi/mpichmx/mpichmx.c
+++ b/src/plugins/mpi/mpichmx/mpichmx.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -300,10 +300,8 @@ static void _gmpi_wait_abort(gmpi_state_t *st)
 static void *_gmpi_thr(void *arg)
 {
 	gmpi_state_t *st;
-	mpi_plugin_client_info_t *job;
 
 	st = (gmpi_state_t *) arg;
-	job = st->job;
 
 	debug3("GMPI master thread pid=%lu", (unsigned long) getpid());
 	_gmpi_establish_map(st);
diff --git a/src/plugins/mpi/mpichmx/mpichmx.h b/src/plugins/mpi/mpichmx/mpichmx.h
index 087ab3a72..dee9fcf97 100644
--- a/src/plugins/mpi/mpichmx/mpichmx.h
+++ b/src/plugins/mpi/mpichmx/mpichmx.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/mpi/mvapich/Makefile.in b/src/plugins/mpi/mvapich/Makefile.in
index 152587960..19e6d3300 100644
--- a/src/plugins/mpi/mvapich/Makefile.in
+++ b/src/plugins/mpi/mvapich/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/mvapich/mpi_mvapich.c b/src/plugins/mpi/mvapich/mpi_mvapich.c
index c3fdea957..0b33ead35 100644
--- a/src/plugins/mpi/mvapich/mpi_mvapich.c
+++ b/src/plugins/mpi/mvapich/mpi_mvapich.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/mpi/mvapich/mvapich.h"
 
diff --git a/src/plugins/mpi/mvapich/mvapich.c b/src/plugins/mpi/mvapich/mvapich.c
index d9805db35..ec426bcd6 100644
--- a/src/plugins/mpi/mvapich/mvapich.c
+++ b/src/plugins/mpi/mvapich/mvapich.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -425,7 +425,7 @@ static int mvapich_poll_internal (struct mvapich_poll *mp)
 {
 	int n;
 	while ((n = poll (mp->fds, mp->nfds, startup_timeout (mp->st))) < 0) {
-		if (errno != EINTR && errno != EAGAIN)
+		if ((errno != EINTR) && (errno != EAGAIN))
 			return (-1);
 	}
 	return (n);
@@ -480,9 +480,10 @@ again:
 
 		mvapich_debug3 ("mvapich_poll_next (nfds=%d, timeout=%d)",
 				mp->nfds, startup_timeout (st));
-		if ((rc = mvapich_poll_internal (mp)) < 0)
+		if ((rc = mvapich_poll_internal (mp)) < 0) {
 			mvapich_terminate_job (st, "mvapich_poll_next: %m");
-		else if (rc == 0) {
+			return (NULL);
+		} else if (rc == 0) {
 			/*
 			 *  If we timed out, then report all tasks that we were
 			 *   still waiting for.
diff --git a/src/plugins/mpi/mvapich/mvapich.h b/src/plugins/mpi/mvapich/mvapich.h
index e0a115f0d..63ba7d3fd 100644
--- a/src/plugins/mpi/mvapich/mvapich.h
+++ b/src/plugins/mpi/mvapich/mvapich.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/mpi/none/Makefile.in b/src/plugins/mpi/none/Makefile.in
index 6f8e0cdd4..402267feb 100644
--- a/src/plugins/mpi/none/Makefile.in
+++ b/src/plugins/mpi/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/none/mpi_none.c b/src/plugins/mpi/none/mpi_none.c
index d2cb9c5f7..514d2376b 100644
--- a/src/plugins/mpi/none/mpi_none.c
+++ b/src/plugins/mpi/none/mpi_none.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/mpi.h"
diff --git a/src/plugins/mpi/openmpi/Makefile.in b/src/plugins/mpi/openmpi/Makefile.in
index e63c8b3c8..3b4b64227 100644
--- a/src/plugins/mpi/openmpi/Makefile.in
+++ b/src/plugins/mpi/openmpi/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/mpi/openmpi/mpi_openmpi.c b/src/plugins/mpi/openmpi/mpi_openmpi.c
index b724c6cd3..c7e9ea55f 100644
--- a/src/plugins/mpi/openmpi/mpi_openmpi.c
+++ b/src/plugins/mpi/openmpi/mpi_openmpi.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/mpi.h"
diff --git a/src/plugins/preempt/Makefile.in b/src/plugins/preempt/Makefile.in
index b2fe94e9a..342308abe 100644
--- a/src/plugins/preempt/Makefile.in
+++ b/src/plugins/preempt/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/preempt/none/Makefile.in b/src/plugins/preempt/none/Makefile.in
index 4d9df3d61..ba5cb05c3 100644
--- a/src/plugins/preempt/none/Makefile.in
+++ b/src/plugins/preempt/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/preempt/none/preempt_none.c b/src/plugins/preempt/none/preempt_none.c
index 5c4a9aa11..72976f8cd 100644
--- a/src/plugins/preempt/none/preempt_none.c
+++ b/src/plugins/preempt/none/preempt_none.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,8 @@
 \*****************************************************************************/
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/list.h"
diff --git a/src/plugins/preempt/partition_prio/Makefile.in b/src/plugins/preempt/partition_prio/Makefile.in
index 09705174a..8f9ba0735 100644
--- a/src/plugins/preempt/partition_prio/Makefile.in
+++ b/src/plugins/preempt/partition_prio/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -140,7 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -177,6 +182,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -234,6 +240,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -269,6 +276,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/preempt/partition_prio/preempt_partition_prio.c b/src/plugins/preempt/partition_prio/preempt_partition_prio.c
index 2c7238bda..b676428df 100644
--- a/src/plugins/preempt/partition_prio/preempt_partition_prio.c
+++ b/src/plugins/preempt/partition_prio/preempt_partition_prio.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,8 @@
 \*****************************************************************************/
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/list.h"
@@ -114,6 +115,9 @@ extern List find_preemptable_jobs(struct job_record *job_ptr)
 		    (bit_overlap(job_p->node_bitmap,
 				 job_ptr->part_ptr->node_bitmap) == 0))
 			continue;
+		if (job_ptr->details &&
+		    (job_ptr->details->expanding_jobid == job_p->job_id))
+			continue;
 
 		/* This job is a preemption candidate */
 		if (preemptee_job_list == NULL) {
diff --git a/src/plugins/preempt/qos/Makefile.in b/src/plugins/preempt/qos/Makefile.in
index 4d3fbb0f8..d2c773c91 100644
--- a/src/plugins/preempt/qos/Makefile.in
+++ b/src/plugins/preempt/qos/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/preempt/qos/preempt_qos.c b/src/plugins/preempt/qos/preempt_qos.c
index e3b01c505..94047ab02 100644
--- a/src/plugins/preempt/qos/preempt_qos.c
+++ b/src/plugins/preempt/qos/preempt_qos.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,8 @@
 \*****************************************************************************/
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/list.h"
@@ -112,11 +113,14 @@ extern List find_preemptable_jobs(struct job_record *job_ptr)
 			continue;
 		if (!_qos_preemptable(job_p, job_ptr))
 			continue;
-
 		if ((job_p->node_bitmap == NULL) ||
 		    (bit_overlap(job_p->node_bitmap,
 				 job_ptr->part_ptr->node_bitmap) == 0))
 			continue;
+		if (job_ptr->details &&
+		    (job_ptr->details->expanding_jobid == job_p->job_id))
+			continue;
+
 		/* This job is a preemption candidate */
 		if (preemptee_job_list == NULL) {
 			preemptee_job_list = list_create(NULL);
diff --git a/src/plugins/priority/Makefile.in b/src/plugins/priority/Makefile.in
index 2f48b66d9..34b4df605 100644
--- a/src/plugins/priority/Makefile.in
+++ b/src/plugins/priority/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/priority/basic/Makefile.in b/src/plugins/priority/basic/Makefile.in
index 69f6f00a0..d49201f96 100644
--- a/src/plugins/priority/basic/Makefile.in
+++ b/src/plugins/priority/basic/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/priority/basic/priority_basic.c b/src/plugins/priority/basic/priority_basic.c
index 82bb30ed7..f82d7404d 100644
--- a/src/plugins/priority/basic/priority_basic.c
+++ b/src/plugins/priority/basic/priority_basic.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,9 +48,10 @@
 #endif
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
 #include <math.h>
 
+#include "slurm/slurm_errno.h"
+
 #include "src/common/slurm_priority.h"
 
 /*
@@ -109,16 +110,16 @@ extern uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)
 {
 	uint32_t new_prio = 1;
 
-	if(job_ptr->direct_set_prio)
+	if (job_ptr->direct_set_prio && (job_ptr->priority > 1))
 		return job_ptr->priority;
 
-	if(last_prio >= 2)
+	if (last_prio >= 2)
 		new_prio = (last_prio - 1);
 
-	if(job_ptr->details)
+	if (job_ptr->details)
 		new_prio -= (job_ptr->details->nice - NICE_OFFSET);
 
-	if(new_prio < 1)
+	if (new_prio < 1)
 		new_prio = 1;
 
 	return new_prio;
@@ -126,7 +127,6 @@ extern uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)
 
 extern void priority_p_reconfig(void)
 {
-
 	return;
 }
 
@@ -139,15 +139,14 @@ extern double priority_p_calc_fs_factor(long double usage_efctv,
 					long double shares_norm)
 {
 	/* This calculation is needed for sshare when ran from a
-	   non-multifactor machine to a multifactor machine.  It
-	   doesn't do anything on regular systems, it should always
-	   return 0 since shares_norm will always be NO_VAL.
-	*/
+	 * non-multifactor machine to a multifactor machine.  It
+	 * doesn't do anything on regular systems, it should always
+	 * return 0 since shares_norm will always be NO_VAL. */
 	double priority_fs;
 
-	xassert(usage_efctv != (long double)NO_VAL);
+	xassert(!fuzzy_equal(usage_efctv, NO_VAL));
 
-	if ((shares_norm <= 0.0) || (shares_norm == (long double)NO_VAL))
+	if ((shares_norm <= 0.0) || fuzzy_equal(shares_norm, NO_VAL))
 		priority_fs = 0.0;
 	else
 		priority_fs = pow(2.0, -(usage_efctv / shares_norm));
diff --git a/src/plugins/priority/multifactor/Makefile.in b/src/plugins/priority/multifactor/Makefile.in
index 8a2f29cfb..710a2a769 100644
--- a/src/plugins/priority/multifactor/Makefile.in
+++ b/src/plugins/priority/multifactor/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/priority/multifactor/priority_multifactor.c b/src/plugins/priority/multifactor/priority_multifactor.c
index 5471131d7..04eaf2604 100644
--- a/src/plugins/priority/multifactor/priority_multifactor.c
+++ b/src/plugins/priority/multifactor/priority_multifactor.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,8 +53,9 @@
 #include <sys/stat.h>
 #include <stdio.h>
 #include <fcntl.h>
-#include <slurm/slurm_errno.h>
+
 #include <math.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_priority.h"
 #include "src/common/xstring.h"
@@ -419,7 +420,7 @@ static double _get_fairshare_priority( struct job_record *job_ptr)
 		fs_assoc = fs_assoc->usage->parent_assoc_ptr;
 	}
 
-	if (fs_assoc->usage->usage_efctv == (long double) NO_VAL)
+	if (fuzzy_equal(fs_assoc->usage->usage_efctv, NO_VAL))
 		priority_p_set_assoc_usage(fs_assoc);
 
 	/* Priority is 0 -> 1 */
@@ -467,7 +468,7 @@ static void _get_priority_factors(time_t start_time, struct job_record *job_ptr)
 
 	if (job_ptr->assoc_ptr && weight_fs) {
 		job_ptr->prio_factors->priority_fs =
-				_get_fairshare_priority(job_ptr);
+			_get_fairshare_priority(job_ptr);
 	}
 
 	if (weight_js) {
@@ -530,7 +531,7 @@ static uint32_t _get_priority_internal(time_t start_time,
 	double priority		= 0.0;
 	priority_factors_object_t pre_factors;
 
-	if (job_ptr->direct_set_prio)
+	if (job_ptr->direct_set_prio && (job_ptr->priority > 1))
 		return job_ptr->priority;
 
 	if (!job_ptr->details) {
@@ -549,7 +550,7 @@ static uint32_t _get_priority_internal(time_t start_time,
 	/* figure out the priority */
 	_get_priority_factors(start_time, job_ptr);
 	memcpy(&pre_factors, job_ptr->prio_factors,
-	       sizeof(job_ptr->prio_factors));
+	       sizeof(priority_factors_object_t));
 
 	job_ptr->prio_factors->priority_age *= (double)weight_age;
 	job_ptr->prio_factors->priority_fs *= (double)weight_fs;
@@ -661,6 +662,245 @@ static time_t _next_reset(uint16_t reset_period, time_t last_reset)
 	return mktime(&last_tm);
 }
 
+/*
+  Remove previously used time from qos and assocs
+  grp_used_cpu_run_secs.
+
+  When restarting slurmctld acct_policy_job_begin() is called for all
+  running jobs. There every jobs total requested cputime (total_cpus *
+  time_limit) is added to grp_used_cpu_run_secs of assocs and qos.
+
+  This function will subtract all cputime that was used until the
+  decay thread last ran. This kludge is necessary as the decay thread
+  last_ran variable can't be accessed from acct_policy_job_begin().
+*/
+void _init_grp_used_cpu_run_secs(time_t last_ran)
+{
+	struct job_record *job_ptr = NULL;
+	ListIterator itr;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+	slurmctld_lock_t job_read_lock =
+		{ NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+	uint64_t delta;
+	slurmdb_qos_rec_t *qos;
+	slurmdb_association_rec_t *assoc;
+
+	if(priority_debug)
+		info("Initializing grp_used_cpu_run_secs");
+
+	if (!(job_list && list_count(job_list)))
+		return;
+
+	lock_slurmctld(job_read_lock);
+	itr = list_iterator_create(job_list);
+	if (itr == NULL)
+		fatal("list_iterator_create: malloc failure");
+
+	while ((job_ptr = list_next(itr))) {
+		if (priority_debug)
+			debug2("job: %u",job_ptr->job_id);
+		qos = NULL;
+		assoc = NULL;
+		delta = 0;
+
+		if (!IS_JOB_RUNNING(job_ptr))
+			continue;
+
+		if (job_ptr->start_time > last_ran)
+			continue;
+
+		delta = job_ptr->total_cpus * (last_ran - job_ptr->start_time);
+
+		assoc_mgr_lock(&locks);
+		qos = (slurmdb_qos_rec_t *) job_ptr->qos_ptr;
+		assoc = (slurmdb_association_rec_t *) job_ptr->assoc_ptr;
+
+		if(qos) {
+			if (priority_debug)
+				info("Subtracting %"PRIu64" from qos "
+				     "%u grp_used_cpu_run_secs "
+				     "%"PRIu64" = %"PRIu64"",
+				     delta,
+				     qos->id,
+				     qos->usage->grp_used_cpu_run_secs,
+				     qos->usage->grp_used_cpu_run_secs -
+				     delta);
+			qos->usage->grp_used_cpu_run_secs -= delta;
+		}
+		while (assoc) {
+			if (priority_debug)
+				info("Subtracting %"PRIu64" from assoc %u "
+				     "grp_used_cpu_run_secs "
+				     "%"PRIu64" = %"PRIu64"",
+				     delta,
+				     assoc->id,
+				     assoc->usage->grp_used_cpu_run_secs,
+				     assoc->usage->grp_used_cpu_run_secs -
+				     delta);
+			assoc->usage->grp_used_cpu_run_secs -= delta;
+			assoc = assoc->usage->parent_assoc_ptr;
+		}
+		assoc_mgr_unlock(&locks);
+	}
+	list_iterator_destroy(itr);
+	unlock_slurmctld(job_read_lock);
+}
+
+/* If the job is running then apply decay to the job.
+ *
+ * Return 0 if we don't need to process the job any further, 1 if
+ * futher processing is needed.
+ */
+static int _apply_new_usage(struct job_record *job_ptr, double decay_factor,
+			    time_t start_period, time_t end_period)
+{
+	slurmdb_qos_rec_t *qos;
+	slurmdb_association_rec_t *assoc;
+	int run_delta = 0;
+	double run_decay = 0.0, real_decay = 0.0;
+	uint64_t cpu_run_delta = 0;
+	uint64_t job_time_limit_ends = 0;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t qos_read_lock = { NO_LOCK, NO_LOCK,
+					   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	/* If usage_factor is 0 just skip this
+	   since we don't add the usage.
+	*/
+	assoc_mgr_lock(&qos_read_lock);
+	qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+	if (qos && !qos->usage_factor) {
+		assoc_mgr_unlock(&qos_read_lock);
+		return 0;
+	}
+	assoc_mgr_unlock(&qos_read_lock);
+
+	if (job_ptr->start_time > start_period)
+		start_period = job_ptr->start_time;
+
+	if (job_ptr->end_time
+	    && (end_period > job_ptr->end_time))
+		end_period = job_ptr->end_time;
+
+	run_delta = (int) (end_period - start_period);
+
+	/* job already has been accounted for
+	   go to next */
+	if (run_delta < 1)
+		return 0;
+
+	/* cpu_run_delta will is used to
+	   decrease qos and assocs
+	   grp_used_cpu_run_secs values. When
+	   a job is started only seconds until
+	   start_time+time_limit is added, so
+	   for jobs running over their
+	   timelimit we should only subtract
+	   the used time until the time
+	   limit. */
+	job_time_limit_ends =
+		(uint64_t)job_ptr->start_time +
+		(uint64_t)job_ptr->time_limit * 60;
+
+	if ((uint64_t)start_period  >= job_time_limit_ends)
+		cpu_run_delta = 0;
+	else if (end_period > job_time_limit_ends)
+		cpu_run_delta = job_ptr->total_cpus *
+			(job_time_limit_ends - (uint64_t)start_period);
+	else
+		cpu_run_delta = job_ptr->total_cpus * run_delta;
+
+	if (priority_debug)
+		info("job %u ran for %d seconds on %u cpus",
+		     job_ptr->job_id, run_delta, job_ptr->total_cpus);
+
+	/* get the time in decayed fashion */
+	run_decay = run_delta * pow(decay_factor, (double)run_delta);
+
+	real_decay = run_decay * (double)job_ptr->total_cpus;
+
+	assoc_mgr_lock(&locks);
+	/* Just to make sure we don't make a
+	   window where the qos_ptr could of
+	   changed make sure we get it again
+	   here.
+	*/
+	qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+	assoc = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+
+	/* now apply the usage factor for this qos */
+	if (qos) {
+		if (qos->usage_factor >= 0) {
+			real_decay *= qos->usage_factor;
+			run_decay *= qos->usage_factor;
+		}
+		qos->usage->grp_used_wall += run_decay;
+		qos->usage->usage_raw += (long double)real_decay;
+		if (qos->usage->grp_used_cpu_run_secs >= cpu_run_delta) {
+			if (priority_debug)
+				info("grp_used_cpu_run_secs is %"PRIu64", "
+				     "will subtract %"PRIu64"",
+				     qos->usage->grp_used_cpu_run_secs,
+				     cpu_run_delta);
+			qos->usage->grp_used_cpu_run_secs -= cpu_run_delta;
+		} else {
+			if (priority_debug)
+				info("jobid %u, qos %s: setting "
+				     "grp_used_cpu_run_secs "
+				     "to 0 because %"PRIu64" < %"PRIu64"",
+				     job_ptr->job_id, qos->name,
+				     qos->usage->grp_used_cpu_run_secs,
+				     cpu_run_delta);
+			qos->usage->grp_used_cpu_run_secs = 0;
+		}
+	}
+
+	/* We want to do this all the way up
+	   to and including root.  This way we
+	   can keep track of how much usage
+	   has occured on the entire system
+	   and use that to normalize against.
+	*/
+	while (assoc) {
+		if (assoc->usage->grp_used_cpu_run_secs >= cpu_run_delta) {
+			if(priority_debug)
+				info("grp_used_cpu_run_secs is %"PRIu64", "
+				     "will subtract %"PRIu64"",
+				     assoc->usage->grp_used_cpu_run_secs,
+				     cpu_run_delta);
+			assoc->usage->grp_used_cpu_run_secs -= cpu_run_delta;
+		} else {
+			if (priority_debug)
+				info("jobid %u, assoc %u: setting "
+				     "grp_used_cpu_run_secs "
+				     "to 0 because %"PRIu64" < %"PRIu64"",
+				     job_ptr->job_id, assoc->id,
+				     assoc->usage->grp_used_cpu_run_secs,
+				     cpu_run_delta);
+			assoc->usage->grp_used_cpu_run_secs = 0;
+		}
+
+		assoc->usage->grp_used_wall += run_decay;
+		assoc->usage->usage_raw += (long double)real_decay;
+		if (priority_debug)
+			info("adding %f new usage to assoc %u (user='%s' "
+			     "acct='%s') raw usage is now %Lf.  Group wall "
+			     "added %f making it %f. GrpCPURunMins is "
+			     "%"PRIu64"",
+			     real_decay, assoc->id,
+			     assoc->user, assoc->acct,
+			     assoc->usage->usage_raw,
+			     run_decay,
+			     assoc->usage->grp_used_wall,
+			     assoc->usage->grp_used_cpu_run_secs/60);
+		assoc = assoc->usage->parent_assoc_ptr;
+	}
+	assoc_mgr_unlock(&locks);
+	return 1;
+}
+
 static void *_decay_thread(void *no_data)
 {
 	struct job_record *job_ptr = NULL;
@@ -680,7 +920,7 @@ static void *_decay_thread(void *no_data)
 	slurmctld_lock_t job_write_lock =
 		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
 	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (decay_hl > 0)
 		decay_factor = 1 - (0.693 / decay_hl);
@@ -699,6 +939,8 @@ static void *_decay_thread(void *no_data)
 	if (last_reset == 0)
 		last_reset = start_time;
 
+	_init_grp_used_cpu_run_secs(last_ran);
+
 	while (1) {
 		time_t now = time(NULL);
 		int run_delta = 0;
@@ -783,98 +1025,9 @@ static void *_decay_thread(void *no_data)
 			/* apply new usage */
 			if (!IS_JOB_PENDING(job_ptr) &&
 			    job_ptr->start_time && job_ptr->assoc_ptr) {
-				slurmdb_qos_rec_t *qos;
-				slurmdb_association_rec_t *assoc;
-				time_t start_period = last_ran;
-				time_t end_period = start_time;
-				double run_decay = 0;
-				assoc_mgr_lock_t qos_read_lock =
-					{ NO_LOCK, NO_LOCK,
-					  READ_LOCK, NO_LOCK, NO_LOCK };
-
-				/* If usage_factor is 0 just skip this
-				   since we don't add the usage.
-				*/
-				assoc_mgr_lock(&qos_read_lock);
-				qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-				if (qos && !qos->usage_factor) {
-					assoc_mgr_unlock(&qos_read_lock);
-					continue;
-				}
-				assoc_mgr_unlock(&qos_read_lock);
-
-				if (job_ptr->start_time > start_period)
-					start_period = job_ptr->start_time;
-
-				if (job_ptr->end_time
-				    && (end_period > job_ptr->end_time))
-					end_period = job_ptr->end_time;
-
-				run_delta = (int)end_period - (int)start_period;
-
-				/* job already has been accounted for
-				   go to next */
-				if (run_delta < 1)
+				if (!_apply_new_usage(job_ptr, decay_factor,
+						      last_ran, start_time))
 					continue;
-
-				if (priority_debug)
-					info("job %u ran for %d seconds",
-					     job_ptr->job_id, run_delta);
-
-				/* get the time in decayed fashion */
-				run_decay = run_delta
-					* pow(decay_factor, (double)run_delta);
-
-				real_decay = run_decay
-					* (double)job_ptr->total_cpus;
-
-				assoc_mgr_lock(&locks);
-				/* Just to make sure we don't make a
-				   window where the qos_ptr could of
-				   changed make sure we get it again
-				   here.
-				*/
-				qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-				assoc = (slurmdb_association_rec_t *)
-					job_ptr->assoc_ptr;
-				/* now apply the usage factor for this
-				   qos */
-				if (qos) {
-					if (qos->usage_factor >= 0) {
-						real_decay *= qos->usage_factor;
-						run_decay *= qos->usage_factor;
-					}
-					qos->usage->grp_used_wall += run_decay;
-					qos->usage->usage_raw +=
-						(long double)real_decay;
-				}
-
-				/* We want to do this all the way up
-				   to and including root.  This way we
-				   can keep track of how much usage
-				   has occured on the entire system
-				   and use that to normalize against.
-				*/
-				while (assoc) {
-					assoc->usage->grp_used_wall +=
-						run_decay;
-					assoc->usage->usage_raw +=
-						(long double)real_decay;
-					if (priority_debug)
-						info("adding %f new usage to "
-						     "assoc %u (user='%s' "
-						     "acct='%s') raw usage "
-						     "is now %Lf.  Group wall "
-						     "added %f making it %f.",
-						     real_decay, assoc->id,
-						     assoc->user, assoc->acct,
-						     assoc->usage->usage_raw,
-						     run_decay,
-						     assoc->usage->
-						     grp_used_wall);
-					assoc = assoc->usage->parent_assoc_ptr;
-				}
-				assoc_mgr_unlock(&locks);
 			}
 
 			/*
@@ -898,12 +1051,10 @@ static void *_decay_thread(void *no_data)
 
 	get_usage:
 		/* now calculate all the normalized usage here */
-		locks.qos = NO_LOCK;
 		assoc_mgr_lock(&locks);
 		_set_children_usage_efctv(
 			assoc_mgr_root_assoc->usage->childern_list);
 		assoc_mgr_unlock(&locks);
-		locks.qos = WRITE_LOCK;
 
 		last_ran = start_time;
 
@@ -972,11 +1123,9 @@ static void *_cleanup_thread(void *no_data)
 	return NULL;
 }
 
-static void _internal_setup()
+static void _internal_setup(void)
 {
-	uint32_t debug_flags = slurm_get_debug_flags();
-
-	if (debug_flags & DEBUG_FLAG_PRIO)
+	if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO)
 		priority_debug = 1;
 	else
 		priority_debug = 0;
@@ -1180,7 +1329,7 @@ extern double priority_p_calc_fs_factor(long double usage_efctv,
 {
 	double priority_fs;
 
-	xassert(usage_efctv != (long double)NO_VAL);
+	xassert(!fuzzy_equal(usage_efctv, NO_VAL));
 
 	if (shares_norm > 0.0)
 		priority_fs = pow(2.0, -(usage_efctv / shares_norm));
@@ -1234,6 +1383,12 @@ extern List priority_p_get_priority_factors_list(
 			if (job_ptr->priority <= 1)
 				continue;
 
+			/*
+			 * Priority has been set elsewhere (e.g. by SlurmUser)
+			 */
+			if (job_ptr->direct_set_prio)
+				continue;
+
 			if (_filter_job(job_ptr, req_job_list, req_user_list))
 				continue;
 
diff --git a/src/plugins/proctrack/Makefile.in b/src/plugins/proctrack/Makefile.in
index 9b5abf96e..1d460ad1c 100644
--- a/src/plugins/proctrack/Makefile.in
+++ b/src/plugins/proctrack/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/aix/Makefile.in b/src/plugins/proctrack/aix/Makefile.in
index 24cc5f90f..f45e13dc4 100644
--- a/src/plugins/proctrack/aix/Makefile.in
+++ b/src/plugins/proctrack/aix/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/aix/proctrack_aix.c b/src/plugins/proctrack/aix/proctrack_aix.c
index 8027b0a5f..b186dc5a2 100644
--- a/src/plugins/proctrack/aix/proctrack_aix.c
+++ b/src/plugins/proctrack/aix/proctrack_aix.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,9 +51,11 @@
 #include <signal.h>
 #include <stdlib.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
 #include <proctrack.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/log.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
@@ -88,7 +90,7 @@
  */
 const char plugin_name[]      = "Process tracking via AIX kernel extension plugin";
 const char plugin_type[]      = "proctrack/aix";
-const uint32_t plugin_version = 90;
+const uint32_t plugin_version = 91;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -117,7 +119,7 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_create ( slurmd_job_t *job )
+extern int slurm_container_plugin_create ( slurmd_job_t *job )
 {
 	return SLURM_SUCCESS;
 }
@@ -126,7 +128,7 @@ extern int slurm_container_create ( slurmd_job_t *job )
  * Uses job step process group id as a unique identifier.  Job id
  * and step id are not unique by themselves.
  */
-extern int slurm_container_add ( slurmd_job_t *job, pid_t pid )
+extern int slurm_container_plugin_add ( slurmd_job_t *job, pid_t pid )
 {
 	int pgid = (int) job->pgid;
 
@@ -138,11 +140,11 @@ extern int slurm_container_add ( slurmd_job_t *job, pid_t pid )
 		return SLURM_ERROR;
 	}
 
-	job->cont_id = (uint32_t)pgid;
+	job->cont_id = (uint64_t)pgid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_signal  ( uint32_t id, int signal )
+extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
 {
 	int jobid = (int) id;
 	if (!id)	/* no container ID */
@@ -151,7 +153,7 @@ extern int slurm_container_signal  ( uint32_t id, int signal )
 	return proctrack_job_kill(&jobid, &signal);
 }
 
-extern int slurm_container_destroy ( uint32_t id )
+extern int slurm_container_plugin_destroy ( uint64_t id )
 {
 	int jobid = (int) id;
 
@@ -164,53 +166,54 @@ extern int slurm_container_destroy ( uint32_t id )
 	return SLURM_ERROR;
 }
 
-extern uint32_t
-slurm_container_find(pid_t pid)
+extern uint64_t
+slurm_container_plugin_find(pid_t pid)
 {
 	int local_pid = (int) pid;
 	int cont_id = proctrack_get_job_id(&local_pid);
 	if (cont_id == -1)
-		return (uint32_t) 0;
-	return (uint32_t) cont_id;
+		return (uint64_t) 0;
+	return (uint64_t) cont_id;
 }
 
 extern bool
-slurm_container_has_pid(uint32_t cont_id, pid_t pid)
+slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	int local_pid = (int) pid;
 	int found_cont_id = proctrack_get_job_id(&local_pid);
 
-	if (found_cont_id == -1 || (uint32_t)found_cont_id != cont_id)
+	if ((found_cont_id == -1) || ((uint64_t)found_cont_id != cont_id))
 		return false;
 
 	return true;
 }
 
 extern int
-slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int32_t *p;
 	int np;
 	int len = 64;
 
 	p = (int32_t *)xmalloc(len * sizeof(int32_t));
-	while((np = proctrack_get_pids(cont_id, len, p)) > len) {
+	while ((np = proctrack_get_pids(cont_id, len, p)) > len) {
 		/* array is too short, double its length */
 		len *= 2;
 		xrealloc(p, len);
 	}
 
 	if (np == -1) {
-		error("proctrack_get_pids(AIX) for container %u failed: %m",
-		      cont_id);
+		error("proctrack_get_pids(AIX) for container %"PRIu64" "
+		      "failed: %m", cont_id);
 		xfree(p);
 		*pids = NULL;
 		*npids = 0;
 		return SLURM_ERROR;
 	}
 
-	if (sizeof(uint32_t) == sizeof(pid_t)) {
-		debug3("slurm_container_get_pids: No need to copy pids array");
+	if (sizeof(int32_t) == sizeof(pid_t)) {
+		debug3("slurm_container_plugin_get_pids: No need to copy "
+		       "pids array");
 		*npids = np;
 		*pids = (pid_t *)p;
 	} else {
@@ -218,7 +221,7 @@ slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
 		pid_t *p_copy;
 		int i;
 
-		debug3("slurm_container_get_pids: Must copy pids array");
+		debug3("slurm_container_plugin_get_pids: Must copy pids array");
 		p_copy = (pid_t *)xmalloc(np * sizeof(pid_t));
 		for (i = 0; i < np; i++) {
 			p_copy[i] = (pid_t)p[i];
@@ -232,7 +235,7 @@ slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
 }
 
 extern int
-slurm_container_wait(uint32_t cont_id)
+slurm_container_plugin_wait(uint64_t cont_id)
 {
 	int jobid = (int) cont_id;
 	int delay = 1;
@@ -251,12 +254,12 @@ slurm_container_wait(uint32_t cont_id)
 			int i;
 			pid_t *pids = NULL;
 			int npids = 0;
-			error("Container %u is still not empty", cont_id);
+			error("Container %"PRIu64" is still not empty", cont_id);
 
-			slurm_container_get_pids(cont_id, &pids, &npids);
+			slurm_container_plugin_get_pids(cont_id, &pids, &npids);
 			if (npids > 0) {
 				for (i = 0; i < npids; i++) {
-					verbose("  Container %u has pid %d",
+					verbose("Container %"PRIu64" has pid %d",
 						cont_id, pids[i]);
 				}
 				xfree(pids);
@@ -266,4 +269,3 @@ slurm_container_wait(uint32_t cont_id)
 
 	return SLURM_SUCCESS;
 }
-
diff --git a/src/plugins/proctrack/cgroup/Changelog b/src/plugins/proctrack/cgroup/Changelog
deleted file mode 100644
index c3ebd4eac..000000000
--- a/src/plugins/proctrack/cgroup/Changelog
+++ /dev/null
@@ -1,20 +0,0 @@
-* Thu Jan 07 2010 Matthieu Hautreux <matthieu.hautreux@cea.fr>
-- release 0.2
-- fix a bug in memory limits calculation based on cgroup.conf 
-  configuration parameters ( (a / b * c) becomes (a * (float) (b/c)) 
-  which is better due to a, b and c being uint32_t values and roundness 
-  issues)
-- add new operations xcgroup_get_memlimit and xcgroup_get_memswlimit
-- add a workaround to cope with a slurm-2.1.0 and previous versions
-  limitation. job_mem field of slurmd_job_t corresponds to job steps
-  limits and not to the job mem limit. Two distinct fields should be
-  available in the future. In the meantime, we use job_mem value of
-  each launched step and extend the amount of allowed memory (both
-  ram and swap) if the the new amount is higher that the previous
-  one
-	
-* Tue Dec 01 2009 Matthieu Hautreux <matthieu.hautreux@cea.fr>
-- initial release (0.1) of proctrack/cgroup plugin
-- include a patch for jobacct_gather proper behavior when used with 
-  proctrack/cgroup (skip POSIX threads reported by the cgroup during
-  accounting)
diff --git a/src/plugins/proctrack/cgroup/Makefile.am b/src/plugins/proctrack/cgroup/Makefile.am
index 4f2cebae6..75a043e1d 100644
--- a/src/plugins/proctrack/cgroup/Makefile.am
+++ b/src/plugins/proctrack/cgroup/Makefile.am
@@ -2,19 +2,12 @@
 
 AUTOMAKE_OPTIONS = foreign
 
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
 pkglib_LTLIBRARIES = proctrack_cgroup.la
 
 # Process group ID process tracking plugin.
-proctrack_cgroup_la_SOURCES = \
-	proctrack_cgroup.c \
-	xcgroup.c xcgroup.h \
-	xcpuinfo.c xcpuinfo.h \
-	read_config.c read_config.h \
-	$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c \
-	$(top_builddir)/src/slurmd/slurmd/get_mach_stat.h
-
+proctrack_cgroup_la_SOURCES = proctrack_cgroup.c
 proctrack_cgroup_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/proctrack/cgroup/Makefile.in b/src/plugins/proctrack/cgroup/Makefile.in
index 00a5076f1..f1d31681c 100644
--- a/src/plugins/proctrack/cgroup/Makefile.in
+++ b/src/plugins/proctrack/cgroup/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -105,8 +107,7 @@ am__base_list = \
 am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 proctrack_cgroup_la_LIBADD =
-am_proctrack_cgroup_la_OBJECTS = proctrack_cgroup.lo xcgroup.lo \
-	xcpuinfo.lo read_config.lo get_mach_stat.lo
+am_proctrack_cgroup_la_OBJECTS = proctrack_cgroup.lo
 proctrack_cgroup_la_OBJECTS = $(am_proctrack_cgroup_la_OBJECTS)
 proctrack_cgroup_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
@@ -139,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -321,19 +328,12 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 pkglib_LTLIBRARIES = proctrack_cgroup.la
 
 # Process group ID process tracking plugin.
-proctrack_cgroup_la_SOURCES = \
-	proctrack_cgroup.c \
-	xcgroup.c xcgroup.h \
-	xcpuinfo.c xcpuinfo.h \
-	read_config.c read_config.h \
-	$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c \
-	$(top_builddir)/src/slurmd/slurmd/get_mach_stat.h
-
+proctrack_cgroup_la_SOURCES = proctrack_cgroup.c
 proctrack_cgroup_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 all: all-am
 
@@ -409,11 +409,7 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/get_mach_stat.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack_cgroup.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcgroup.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcpuinfo.Plo@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -436,13 +432,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
-get_mach_stat.lo: $(top_builddir)/src/slurmd/slurmd/get_mach_stat.c
-@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT get_mach_stat.lo -MD -MP -MF $(DEPDIR)/get_mach_stat.Tpo -c -o get_mach_stat.lo `test -f '$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/get_mach_stat.Tpo $(DEPDIR)/get_mach_stat.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c' object='get_mach_stat.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o get_mach_stat.lo `test -f '$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/slurmd/get_mach_stat.c
-
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/plugins/proctrack/cgroup/proctrack_cgroup.c b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
index bb5dcbe57..5de335def 100644
--- a/src/plugins/proctrack/cgroup/proctrack_cgroup.c
+++ b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
@@ -3,64 +3,65 @@
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  
+ *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
 #if HAVE_CONFIG_H
-#   include "config.h"
+#include "config.h"
 #endif
 
 #if HAVE_STDINT_H
-#  include <stdint.h>
+#include <stdint.h>
 #endif
 #if HAVE_INTTYPES_H
-#  include <inttypes.h>
+#include <inttypes.h>
 #endif
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/slurmd/slurmd/slurmd.h"
 
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
+#include "src/common/xcgroup_read_config.h"
+#include "src/common/xcgroup.h"
+#include "src/common/xstring.h"
+#include "src/common/xcpuinfo.h"
+
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <stdlib.h>
 
-#include "read_config.h"
-#include "xcgroup.h"
-#include "xcpuinfo.h"
-
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -80,7 +81,7 @@
  * where <application> is a description of the intended application of
  * the plugin (e.g., "jobcomp" for SLURM job completion logging) and <method>
  * is a description of how this plugin satisfies that application.  SLURM will
- * only load job completion logging plugins if the plugin_type string has a 
+ * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
  * plugin_version - an unsigned 32-bit integer giving the version number
@@ -90,397 +91,315 @@
  * minimum version for their plugins as the job completion logging API
  * matures.
  */
-const char plugin_name[]      = "Process tracking via linux cgroup";
+const char plugin_name[]      = "Process tracking via linux "
+				"cgroup freezer subsystem";
 const char plugin_type[]      = "proctrack/cgroup";
-const uint32_t plugin_version = 10;
+const uint32_t plugin_version = 91;
 
 #ifndef PATH_MAX
 #define PATH_MAX 256
 #endif
 
-#define CGROUP_SLURMDIR CGROUP_BASEDIR "/slurm"
+static slurm_cgroup_conf_t slurm_cgroup_conf;
 
-char user_cgroup_path[PATH_MAX];
-char job_cgroup_path[PATH_MAX];
-char jobstep_cgroup_path[PATH_MAX];
+static char user_cgroup_path[PATH_MAX];
+static char job_cgroup_path[PATH_MAX];
+static char jobstep_cgroup_path[PATH_MAX];
+static char release_agent_path[PATH_MAX];
 
-int _slurm_cgroup_init()
-{
-	int fstatus;
-	xcgroup_opts_t opts;
+static xcgroup_ns_t freezer_ns;
+
+static xcgroup_t user_freezer_cg;
+static xcgroup_t job_freezer_cg;
+static xcgroup_t step_freezer_cg;
 
-	/* initialize job/jobstep cgroup path */
+int _slurm_cgroup_init(void)
+{
+	/* initialize user/job/jobstep cgroup relative paths
+	 * and release agent path */
 	user_cgroup_path[0]='\0';
 	job_cgroup_path[0]='\0';
 	jobstep_cgroup_path[0]='\0';
+	release_agent_path[0]='\0';
+
+	/* build freezer release agent path */
+	if (snprintf(release_agent_path, PATH_MAX, "%s/release_freezer",
+		      slurm_cgroup_conf.cgroup_release_agent) >= PATH_MAX) {
+		error("unable to build cgroup freezer release agent path");
+		return SLURM_ERROR;
+	}
 
-	/* we first check that cgroup is mounted */
-	if ( ! xcgroup_is_available() ) {
-		if ( slurm_cgroup_conf->cgroup_automount ) {
-			if ( xcgroup_mount(slurm_cgroup_conf->
-					   cgroup_mount_opts) ) {
-				error("unable to mount cgroup");
+	/* initialize freezer cgroup namespace */
+	if (xcgroup_ns_create(&slurm_cgroup_conf, &freezer_ns, "/freezer", "",
+			       "freezer", release_agent_path)
+	     != XCGROUP_SUCCESS) {
+		error("unable to create freezer cgroup namespace");
+		return SLURM_ERROR;
+	}
+
+	/* check that freezer cgroup namespace is available */
+	if (! xcgroup_ns_is_available(&freezer_ns)) {
+		if (slurm_cgroup_conf.cgroup_automount) {
+			if (xcgroup_ns_mount(&freezer_ns)) {
+				error("unable to mount freezer cgroup"
+				      " namespace");
 				return SLURM_ERROR;
 			}
-			info("cgroup system is now mounted");
-			/* we then set the release_agent if necessary */
-			if ( slurm_cgroup_conf->cgroup_release_agent ) {
-				xcgroup_set_release_agent(slurm_cgroup_conf->
-							  cgroup_release_agent);
-			}
+			info("cgroup namespace '%s' is now mounted", "freezer");
 		}
 		else {
-			error("cgroup is not mounted. aborting");
+			error("cgroup namespace '%s' not mounted. aborting",
+			      "freezer");
 			return SLURM_ERROR;
 		}
 	}
 
-	/* create a non releasable root cgroup for slurm usage */
-	opts.uid=getuid();
-	opts.gid=getgid();
-	opts.create_only=0;
-	opts.notify=0;
-	fstatus = xcgroup_create(CGROUP_SLURMDIR,&opts);
-	if ( fstatus != SLURM_SUCCESS ) {
-		error("unable to create SLURM cgroup directory '%s'. aborting",
-		      CGROUP_SLURMDIR);
-		return SLURM_ERROR;
-	}
-
 	return SLURM_SUCCESS;
 }
 
-int _slurm_cgroup_create(slurmd_job_t *job,uint32_t id,uid_t uid,gid_t gid)
+int _slurm_cgroup_create(slurmd_job_t *job, uint64_t id, uid_t uid, gid_t gid)
 {
-	int fstatus;
-
-	xcgroup_opts_t opts;
-	uint32_t cur_memlimit,cur_memswlimit;
+	/* we do it here as we do not have access to the conf structure */
+	/* in libslurm (src/common/xcgroup.c) */
+	xcgroup_t slurm_cg;
+	char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);
+#ifdef MULTIPLE_SLURMD
+	if ( conf->node_name != NULL )
+		xstrsubstitute(pre,"%n", conf->node_name);
+	else {
+		xfree(pre);
+		pre = (char*) xstrdup("/slurm");
+	}
+#endif
 
-	/* build user cgroup path if no set (should not be) */
-	if ( *user_cgroup_path == '\0' ) {
-		if ( snprintf(user_cgroup_path,PATH_MAX,CGROUP_SLURMDIR 
-			      "/uid_%u",uid) >= PATH_MAX ) {
-			error("unable to build uid %u cgroup filepath : %m",
-			      uid);
+	/* create slurm cgroup in the freezer ns (it could already exist) */
+	if (xcgroup_create(&freezer_ns, &slurm_cg,pre,
+			   getuid(), getgid()) != XCGROUP_SUCCESS) {
+		return SLURM_ERROR;
+	}
+	if (xcgroup_instanciate(&slurm_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&slurm_cg);
+		return SLURM_ERROR;
+	}
+	else
+		xcgroup_destroy(&slurm_cg);
+
+	/* build user cgroup relative path if not set (should not be) */
+	if (*user_cgroup_path == '\0') {
+		if (snprintf(user_cgroup_path, PATH_MAX,
+			     "%s/uid_%u", pre, uid) >= PATH_MAX) {
+			error("unable to build uid %u cgroup relative "
+			      "path : %m", uid);
+			xfree(pre);
 			return SLURM_ERROR;
 		}
 	}
-
-	/* build job cgroup path if no set (should not be) */
-	if ( *job_cgroup_path == '\0' ) {
-		if ( snprintf(job_cgroup_path,PATH_MAX,"%s/job_%u",
-			      user_cgroup_path,job->jobid) >= PATH_MAX ) {
-			error("unable to build job %u cgroup filepath : %m",
-			      job->jobid);
+	xfree(pre);
+
+	/* build job cgroup relative path if no set (should not be) */
+	if (*job_cgroup_path == '\0') {
+		if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u",
+			      user_cgroup_path, job->jobid) >= PATH_MAX) {
+			error("unable to build job %u cgroup relative "
+			      "path : %m", job->jobid);
 			return SLURM_ERROR;
 		}
 	}
 
-	/* build job step cgroup path (should not be) */
-	if ( *jobstep_cgroup_path == '\0' ) {
-		if ( snprintf(jobstep_cgroup_path,PATH_MAX,"%s/step_%u",
-			      job_cgroup_path,job->stepid) >= PATH_MAX ) {
-			error("unable to build job step %u cgroup filepath "
-			      ": %m",job->stepid);
-			return SLURM_ERROR;
+	/* build job step cgroup relative path (should not be) */
+	if (*jobstep_cgroup_path == '\0') {
+		if (job->stepid == NO_VAL) {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX,
+				     "%s/step_batch", job_cgroup_path)
+			    >= PATH_MAX) {
+				error("proctrack/cgroup unable to build job step"
+				      " %u.batch freezer cg relative path: %m",
+				      job->jobid);
+				return SLURM_ERROR;
+			}
+		} else {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u",
+				     job_cgroup_path, job->stepid) >= PATH_MAX) {
+				error("proctrack/cgroup unable to build job step"
+				      " %u.%u freezer cg relative path: %m",
+				      job->jobid, job->stepid);
+				return SLURM_ERROR;
+			}
 		}
 	}
 
-	/* create user cgroup (it could already exist) */
-	opts.uid=getuid();
-	opts.gid=getgid();
-	opts.create_only=0;
-	opts.notify=1;
-	if ( xcgroup_create(user_cgroup_path,&opts)
-	     != SLURM_SUCCESS )
+	/* create user cgroup in the freezer ns (it could already exist) */
+	if (xcgroup_create(&freezer_ns, &user_freezer_cg,
+			    user_cgroup_path,
+			    getuid(), getgid()) != XCGROUP_SUCCESS) {
 		return SLURM_ERROR;
-	if ( slurm_cgroup_conf->user_cgroup_params )
-		xcgroup_set_params(user_cgroup_path,
-				   slurm_cgroup_conf->user_cgroup_params);
-	
-	/*
-	 * if memory constraints have to be added to uid cgroup 
-	 * use_hierachy=1 must be set here, but this would result
-	 * in impossibility to configure some job memory parameters
-	 * differently, so skip this stage for now
-	 */
+	}
+	if (xcgroup_instanciate(&user_freezer_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_freezer_cg);
 
-	/* create job cgroup (it could already exist) */
-	opts.uid=getuid();
-	opts.gid=getgid();
-	opts.create_only=0;
-	opts.notify=1;
-	if ( xcgroup_create(job_cgroup_path,&opts)
-	     != SLURM_SUCCESS )
 		return SLURM_ERROR;
-	
-	/* job cgroup parameters must be set before any sub cgroups 
-	   are created */
-	xcgroup_set_mem_use_hierarchy(job_cgroup_path,1);
-	if ( slurm_cgroup_conf->job_cgroup_params )
-		xcgroup_set_params(job_cgroup_path,
-				   slurm_cgroup_conf->job_cgroup_params);
-
-	/*
-	 *  Warning: OOM Killer must be disabled for slurmstepd
-	 *  or it would be destroyed if the application use
-	 *  more memory than permitted
-	 *
-	 *  If an env value is already set for slurmstepd
-	 *  OOM killer behavior, keep it, otherwise set the 
-	 *  -17 value, wich means do not let OOM killer kill it
-	 *  
-	 *  FYI, setting "export SLURMSTEPD_OOM_ADJ=-17" 
-	 *  in /etc/sysconfig/slurm would be the same
-	 */
-	setenv("SLURMSTEPD_OOM_ADJ","-17",0);
-
-	/* 
-	 * FIXME!
-	 * Warning, with slurm-2.1.0 job_mem more corresponds to the
-	 * missing field jobstep_mem and thus must not be
-	 * trusted to set the job mem limit constraint
-	 * Due to the lack of jobstep_mem field in slurm-2.1.0
-	 * we only allow to extend the amount of allowed memory
-	 * as a step requiring less than the max allowed amount
-	 * for the job could otherwise reduce the allowed amount of other
-	 * already running steps
-	 * Thus, as a long as a step comes with a value that is higher
-	 * than the current value, we use it as it means that the
-	 * job is at least authorized to use this amount
-	 * In the future, a jobstep_mem field should be added
-	 * to avoid this workaround and be more deterministic
-	 *
-	 * Unfortunately with this workaround comes a collateral problem ! 
-	 * As we propose to alter already fixed limits for both mem and 
-	 * mem+swap, we have to respect a certain order while doing the
-	 * modification to respect the kernel cgroup implementation
-	 * requirements : when sets, memory limit must be lower or equal
-	 * to memory+swap limit
-	 *
-	 * Notes : a limit value of -1 means that the limit was not
-	 * previously set
-	 * Notes : this whole part should be much more simpler when 
-	 * the jobstep_mem field will be added
-	 *
-	 */
-
-	/*
-	 * Get current limits for both mem and mem+swap
-	 */
-	xcgroup_get_memlimit(job_cgroup_path,&cur_memlimit);
-	xcgroup_get_memswlimit(job_cgroup_path,&cur_memswlimit);
-
-	/* 
-	 * set memory constraints according to cgroup conf
-	 */
-	if ( slurm_cgroup_conf->constrain_ram_space &&
-	     cur_memlimit == -1 ) {
-		uint32_t limit;
-		limit = (uint32_t) job->job_mem ;
-		limit = (uint32_t) limit *
-			( slurm_cgroup_conf->allowed_ram_space / 100.0 ) ;
-		xcgroup_set_memlimit(job_cgroup_path,limit);
-	}
-	if ( slurm_cgroup_conf->constrain_swap_space ) {
-		uint32_t limit,memlimit,swaplimit;
-		memlimit = (uint32_t) job->job_mem ;
-		swaplimit = memlimit ;
-		memlimit = (uint32_t) memlimit * 
-			( slurm_cgroup_conf->allowed_ram_space / 100.0 ) ;
-		swaplimit = (uint32_t) swaplimit * 
-			( slurm_cgroup_conf->allowed_swap_space / 100.0 ) ;
-		limit = memlimit + swaplimit ;
-		/* 
-		 * if memlimit was not set in the previous block, 
-		 * we have to set it here or it will not be possible 
-		 * to set mem+swap limit as the mem limit value could be
-		 * higher.
-		 * FIXME!
-		 * However, due to the restriction mentioned in the previous
-		 * block (job_mem...) if a step already set it, we will
-		 * have to skip this as if the new amount is bigger
-		 * we will not be allowed by the kernel to set it as 
-		 * the mem+swap value will certainly be lower. In such 
-		 * scenario, we will have to set memlimit after mem+swap limit
-		 * to still be clean regarding to cgroup kernel implementation
-		 * ( memlimit must be lower or equal to mem+swap limit when
-		 * set ). See stage 2 below...
-		 */
-		if ( !slurm_cgroup_conf->constrain_ram_space && 
-		     cur_memlimit == -1 )
-			xcgroup_set_memlimit(job_cgroup_path,limit);
-		/*
-		 * FIXME!
-		 * for the reason why we do this, see the previous block too
-		 */
-
-		if ( cur_memswlimit == -1 || cur_memswlimit < limit )
-			xcgroup_set_memswlimit(job_cgroup_path,limit);
-		else
-			debug3("keeping previously set mem+swap limit of %uMB"
-			       " for '%s'",cur_memswlimit,job_cgroup_path);
-		/* 
-		 * FIXME!
-		 * stage 2
-		 */
-		if ( !slurm_cgroup_conf->constrain_ram_space && 
-		     cur_memlimit != -1 ) {
-			/*
-			 * FIXME!
-			 * for the reason why we do this, see the previous 
-			 * block
-			 */
-			if ( cur_memlimit == -1 || cur_memlimit < limit ) 
-				xcgroup_set_memlimit(job_cgroup_path,limit);
-			else
-				debug3("keeping previously set mem limit of "
-				       "%uMB for '%s'",cur_memlimit,
-				       job_cgroup_path);
-		}
 	}
-	/*
-	 * FIXME!
-	 * yet an other stage 2 due to jobstep_mem lack... 
-	 * only used when ram_space constraint is enforced
-	 */
-	if ( slurm_cgroup_conf->constrain_ram_space &&
-	     cur_memlimit != -1 ) {
-		uint32_t limit;
-		limit = (uint32_t) job->job_mem ;
-		limit = (uint32_t) limit *
-			( slurm_cgroup_conf->allowed_ram_space / 100.0 ) ;
-		if ( cur_memlimit == -1 || cur_memlimit < limit )
-			xcgroup_set_memlimit(job_cgroup_path,limit);
-		else
-			debug3("keeping previously set mem limit of "
-			       "%uMB for '%s'",cur_memlimit,job_cgroup_path);
-	}
-
-	/* set cores constraints if required by conf */
-	if ( slurm_cgroup_conf->constrain_cores && 
-	     job->job_alloc_cores ) {
-		/*
-		 * abstract mapping of cores in slurm must
-		 * first be mapped into the machine one
-		 */
-		char* mach;
-		if ( xcpuinfo_abs_to_mac(job->job_alloc_cores,&mach) !=
-		     XCPUINFO_SUCCESS ) {
-			error("unable to convert abstract slurm allocated "
-			      "cores '%s' into a valid machine map",
-			      job->job_alloc_cores);
-		}
-		else {
-			debug3("allocated cores conversion done : "
-			       "%s (abstract) -> %s (machine)",
-			       job->job_alloc_cores,mach);
-			xcgroup_set_cpuset_cpus(job_cgroup_path,
-						mach);
-			xfree(mach);
-		}
+
+	/* create job cgroup in the freezer ns (it could already exist) */
+	if (xcgroup_create(&freezer_ns, &job_freezer_cg,
+			    job_cgroup_path,
+			    getuid(), getgid()) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_freezer_cg);
+		return SLURM_ERROR;
 	}
-	else if ( ! job->job_alloc_cores ) {
-		error("job_alloc_cores not defined for this job! ancestor's conf"
-		      " will be used instead");
+	if (xcgroup_instanciate(&job_freezer_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_freezer_cg);
+		xcgroup_destroy(&job_freezer_cg);
+		return SLURM_ERROR;
 	}
 
-	/* create the step sub cgroup  (it sould not already exists) */
-	opts.uid=uid;
-	opts.gid=gid;
-	opts.create_only=1;
-	opts.notify=1;
-	fstatus = xcgroup_create(jobstep_cgroup_path,&opts);
-	if ( fstatus != XCGROUP_SUCCESS ) {
-		rmdir(job_cgroup_path);
-		return fstatus;
+	/* create step cgroup in the freezer ns (it should not exists) */
+	if (xcgroup_create(&freezer_ns, &step_freezer_cg,
+			    jobstep_cgroup_path,
+			    getuid(), getgid()) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_freezer_cg);
+		xcgroup_destroy(&job_freezer_cg);
+		return SLURM_ERROR;
+	}
+	if (xcgroup_instanciate(&step_freezer_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_freezer_cg);
+		xcgroup_destroy(&job_freezer_cg);
+		xcgroup_destroy(&step_freezer_cg);
+		return SLURM_ERROR;
 	}
 
-	/* set jobstep cgroup parameters */
-	if ( slurm_cgroup_conf->jobstep_cgroup_params )
-		xcgroup_set_params(jobstep_cgroup_path,
-				   slurm_cgroup_conf->jobstep_cgroup_params);
+	/* inhibit release agent for the step cgroup thus letting 
+	 * slurmstepd being able to add new pids to the container 
+	 * when the job ends (TaskEpilog,...) */
+	xcgroup_set_param(&step_freezer_cg,"notify_on_release","0");
 
-	return fstatus;
+	return SLURM_SUCCESS;
 }
 
 int _slurm_cgroup_destroy(void)
 {
-	if ( jobstep_cgroup_path[0] != '\0' )
-		xcgroup_destroy(jobstep_cgroup_path);
-	
-	if ( job_cgroup_path[0] != '\0' )
-		xcgroup_destroy(job_cgroup_path);
-	
-	if ( user_cgroup_path[0] != '\0' )
-		xcgroup_destroy(user_cgroup_path);
-	
+	if (jobstep_cgroup_path[0] != '\0') {
+		if ( xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS )
+			return SLURM_ERROR;
+		xcgroup_destroy(&step_freezer_cg);
+	}
+
+	if (job_cgroup_path[0] != '\0') {
+		xcgroup_delete(&job_freezer_cg);
+		xcgroup_destroy(&job_freezer_cg);
+	}
+
+	if (user_cgroup_path[0] != '\0') {
+		xcgroup_delete(&user_freezer_cg);
+		xcgroup_destroy(&user_freezer_cg);
+	}
+
 	return SLURM_SUCCESS;
 }
 
-int _slurm_cgroup_add_pids(uint32_t id,pid_t* pids,int npids)
+int _slurm_cgroup_add_pids(uint64_t id, pid_t* pids, int npids)
+{
+	if (*jobstep_cgroup_path == '\0')
+		return SLURM_ERROR;
+
+	return xcgroup_add_pids(&step_freezer_cg, pids, npids);
+}
+
+int _slurm_cgroup_stick_stepd(uint64_t id, pid_t pid)
 {
-	if ( *jobstep_cgroup_path == '\0' )
+	if (*job_cgroup_path == '\0')
 		return SLURM_ERROR;
-	
-	return xcgroup_add_pids(jobstep_cgroup_path,pids,npids);
+
+	return xcgroup_add_pids(&job_freezer_cg, &pid, 1);
 }
 
 int
-_slurm_cgroup_get_pids(uint32_t id, pid_t **pids, int *npids)
+_slurm_cgroup_get_pids(uint64_t id, pid_t **pids, int *npids)
 {
-	if ( *jobstep_cgroup_path == '\0' )
+	if (*jobstep_cgroup_path == '\0')
 		return SLURM_ERROR;
-	
-	return xcgroup_get_pids(jobstep_cgroup_path,pids,npids);
+
+	return xcgroup_get_pids(&step_freezer_cg, pids, npids);
 }
 
-int _slurm_cgroup_set_memlimit(uint32_t id,uint32_t memlimit)
+int _slurm_cgroup_suspend(uint64_t id)
 {
-	if ( *jobstep_cgroup_path == '\0' )
+	if (*jobstep_cgroup_path == '\0')
 		return SLURM_ERROR;
-	
-	return xcgroup_set_memlimit(jobstep_cgroup_path,memlimit);
+
+	return xcgroup_set_param(&step_freezer_cg,
+				 "freezer.state", "FROZEN");
 }
 
-int _slurm_cgroup_set_memswlimit(uint32_t id,uint32_t memlimit)
+int _slurm_cgroup_resume(uint64_t id)
 {
-	if ( *jobstep_cgroup_path == '\0' )
+	if (*jobstep_cgroup_path == '\0')
 		return SLURM_ERROR;
-	
-	return xcgroup_set_memswlimit(jobstep_cgroup_path,memlimit);
+
+	return xcgroup_set_param(&step_freezer_cg,
+				 "freezer.state", "THAWED");
 }
 
-int
-_slurm_cgroup_find_by_pid(uint32_t* pcont_id, pid_t pid)
+bool
+_slurm_cgroup_has_pid(pid_t pid)
 {
-	int fstatus;
-	int rc;
-	uint32_t cont_id;
-	char cpath[PATH_MAX];
-	char* token;
+	bool fstatus;
+	xcgroup_t cg;
 
-	fstatus = xcgroup_find_by_pid(cpath,pid);
-	if (  fstatus != SLURM_SUCCESS )
-		return fstatus;
+	fstatus = xcgroup_ns_find_by_pid(&freezer_ns, &cg, pid);
+	if ( fstatus != XCGROUP_SUCCESS)
+		return false;
 
-	token = rindex(cpath,'/');
-	if ( token == NULL ) {
-		debug3("pid %u cgroup '%s' does not match %s cgroup pattern",
-		      pid,cpath,plugin_type);
-		return SLURM_ERROR;
+	if (strcmp(cg.path, step_freezer_cg.path)) {
+		fstatus = false;
+	}
+	else {
+		fstatus = true;
 	}
 
-	rc = sscanf(token,"/%u",&cont_id);
-	if ( rc == 1 ) {
-		if ( pcont_id != NULL )
-			*pcont_id=cont_id;
-		fstatus = SLURM_SUCCESS;
+	xcgroup_destroy(&cg);
+	return fstatus;
+}
+
+int
+_slurm_cgroup_is_pid_a_slurm_task(uint64_t id, pid_t pid)
+{
+	int fstatus = -1;
+	int fd;
+	pid_t ppid;
+	char file_path[PATH_MAX], buf[2048];
+
+	if (snprintf(file_path, PATH_MAX, "/proc/%ld/stat",
+		      (long)pid) >= PATH_MAX) {
+		debug2("unable to build pid '%d' stat file: %m ", pid);
+		return fstatus;
 	}
-	else {
-		fstatus = SLURM_ERROR;
+
+	if ((fd = open(file_path, O_RDONLY)) < 0) {
+		debug2("unable to open '%s' : %m ", file_path);
+		return fstatus;
+	}
+	if (read(fd, buf, 2048) <= 0) {
+		debug2("unable to read '%s' : %m ", file_path);
+		close(fd);
+		return fstatus;
 	}
+	close(fd);
+
+	if (sscanf(buf, "%*d %*s %*s %d", &ppid) != 1) {
+		debug2("unable to get ppid of pid '%d', %m", pid);
+		return fstatus;
+	}
+
+	/*
+	 * assume that any child of slurmstepd is a slurm task
+	 * they will get all signals, inherited processes will
+	 * only get SIGKILL
+	 */
+	if (ppid == (pid_t) id)
+		fstatus = 1;
+	else
+		fstatus = 0;
 
 	return fstatus;
 }
@@ -489,132 +408,151 @@ _slurm_cgroup_find_by_pid(uint32_t* pcont_id, pid_t pid)
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
  */
-extern int init ( void )
+extern int init (void)
 {
 	/* read cgroup configuration */
-	if ( read_slurm_cgroup_conf() )
+	if (read_slurm_cgroup_conf(&slurm_cgroup_conf))
 		return SLURM_ERROR;
 
 	/* initialize cpuinfo internal data */
-	if ( xcpuinfo_init() != XCPUINFO_SUCCESS ) {
-		free_slurm_cgroup_conf();
+	if (xcpuinfo_init() != XCPUINFO_SUCCESS) {
+		free_slurm_cgroup_conf(&slurm_cgroup_conf);
 		return SLURM_ERROR;
 	}
 
 	/* initialize cgroup internal data */
-	if ( _slurm_cgroup_init() != SLURM_SUCCESS ) {
+	if (_slurm_cgroup_init() != SLURM_SUCCESS) {
 		xcpuinfo_fini();
-		free_slurm_cgroup_conf();
+		free_slurm_cgroup_conf(&slurm_cgroup_conf);
 		return SLURM_ERROR;
 	}
 
 	return SLURM_SUCCESS;
 }
 
-extern int fini ( void )
+extern int fini (void)
 {
 	_slurm_cgroup_destroy();
 	xcpuinfo_fini();
-	free_slurm_cgroup_conf();
+	free_slurm_cgroup_conf(&slurm_cgroup_conf);
 	return SLURM_SUCCESS;
 }
 
 /*
  * Uses slurmd job-step manager's pid as the unique container id.
  */
-extern int slurm_container_create ( slurmd_job_t *job )
+extern int slurm_container_plugin_create (slurmd_job_t *job)
 {
 	int fstatus;
 
-	/* create a new cgroup for that container */ 
-	fstatus = _slurm_cgroup_create(job,(uint32_t)job->jmgr_pid,
-				       job->uid,job->gid);
-	if ( fstatus )
+	/* create a new cgroup for that container */
+	fstatus = _slurm_cgroup_create(job, (uint64_t)job->jmgr_pid,
+				       job->uid, job->gid);
+	if (fstatus)
 		return SLURM_ERROR;
 
-	/* set the cgroup paths to adhoc env variables */
-	env_array_overwrite(&job->env,"SLURM_JOB_CGROUP",
-			    job_cgroup_path);
-	env_array_overwrite(&job->env,"SLURM_STEP_CGROUP",
-			    jobstep_cgroup_path);
-
-	/* add slurmstepd pid to this newly created container */
-	fstatus = _slurm_cgroup_add_pids((uint32_t)job->jmgr_pid,
-					 &(job->jmgr_pid),1);
-	if ( fstatus ) {
-		_slurm_cgroup_destroy();		
+	/* stick slurmstepd pid to the newly created job container
+	 * (Note: we do not put it in the step container because this
+	 * container could be used to suspend/resume tasks using freezer
+	 * properties so we need to let the slurmstepd outside of
+	 * this one)
+	 */
+	fstatus = _slurm_cgroup_stick_stepd((uint64_t)job->jmgr_pid,
+					    job->jmgr_pid);
+	if (fstatus) {
+		_slurm_cgroup_destroy();
 		return SLURM_ERROR;
 	}
 
-	/* we use slurmstepd pid as the identifier of the container 
+	/* we use slurmstepd pid as the identifier of the container
 	 * the corresponding cgroup could be found using
 	 * _slurm_cgroup_find_by_pid */
-	job->cont_id = (uint32_t)job->jmgr_pid;
+	job->cont_id = (uint64_t)job->jmgr_pid;
 
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_add ( slurmd_job_t *job, pid_t pid )
+extern int slurm_container_plugin_add (slurmd_job_t *job, pid_t pid)
 {
-	return _slurm_cgroup_add_pids(job->cont_id,&pid,1);
+	return _slurm_cgroup_add_pids(job->cont_id, &pid, 1);
 }
 
-extern int slurm_container_signal ( uint32_t id, int signal )
+extern int slurm_container_plugin_signal (uint64_t id, int signal)
 {
 	pid_t* pids = NULL;
 	int npids;
 	int i;
+	int slurm_task;
+
+	/* get all the pids associated with the step */
+	if (_slurm_cgroup_get_pids(id, &pids, &npids) !=
+	     SLURM_SUCCESS) {
+		debug3("unable to get pids list for cont_id=%"PRIu64"", id);
+		/* that could mean that all the processes already exit */
+		/* the container so return success */
+		return SLURM_SUCCESS;
+	}
 
-	if ( _slurm_cgroup_get_pids(id,&pids,&npids) !=
-	     SLURM_SUCCESS ) {
-		error("unable to get pids list for cont_id=%u",id);
-		return SLURM_ERROR;
+	/* directly manage SIGSTOP using cgroup freezer subsystem */
+	if (signal == SIGSTOP) {
+		xfree(pids);
+		return _slurm_cgroup_suspend(id);
+	}
+
+	/* start by resuming in case of SIGKILL */
+	if (signal == SIGKILL) {
+		_slurm_cgroup_resume(id);
 	}
-	
-	for ( i = 0 ; i<npids ; i++ ) {
-		/* do not kill slurmstepd */
-		if ( pids[i] != id ) {
-			debug2("killing process %d with signal %d",
-			       pids[i],signal);
-			kill(pids[i],signal);
+
+	for (i = 0 ; i<npids ; i++) {
+		/* do not kill slurmstepd (it should not be part
+		 * of the list, but just to not forget about that ;))
+		 */
+		if (pids[i] == (pid_t)id)
+			continue;
+
+		/* only signal slurm tasks unless signal is SIGKILL */
+		slurm_task = _slurm_cgroup_is_pid_a_slurm_task(id, pids[i]);
+		if (slurm_task == 1 || signal == SIGKILL) {
+			debug2("killing process %d (%s) with signal %d", pids[i],
+			       (slurm_task==1)?"slurm_task":"inherited_task",
+			       signal);
+			kill(pids[i], signal);
 		}
 	}
-	
+
 	xfree(pids);
-	
+
+	/* resume tasks after signaling slurm tasks with SIGCONT to be sure */
+	/* that SIGTSTP received at suspend time is removed */
+	if (signal == SIGCONT) {
+		return _slurm_cgroup_resume(id);
+	}
+
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_destroy ( uint32_t id )
+extern int slurm_container_plugin_destroy (uint64_t id)
 {
-	_slurm_cgroup_destroy();
-	return SLURM_SUCCESS;
+	return _slurm_cgroup_destroy();
 }
 
-extern uint32_t slurm_container_find(pid_t pid)
+extern uint64_t slurm_container_plugin_find(pid_t pid)
 {
-	uint32_t cont_id=-1;
-	_slurm_cgroup_find_by_pid(&cont_id,pid);
+	uint64_t cont_id = -1;
+
+	if (cont_id == (uint64_t) -1)
+		return 0;
+	/* not provided for now */
 	return cont_id;
 }
 
-extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
+extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
-	int fstatus;
-	uint32_t lid;
-
-	fstatus = _slurm_cgroup_find_by_pid(&lid,pid);
-	if ( fstatus != SLURM_SUCCESS )
-		return false;
-
-	if ( lid == cont_id )
-		return true;
-	else
-		return false;
-
+	return _slurm_cgroup_has_pid(pid);
 }
 
-extern int slurm_container_wait(uint32_t cont_id)
+extern int slurm_container_plugin_wait(uint64_t cont_id)
 {
 	int delay = 1;
 
@@ -624,20 +562,23 @@ extern int slurm_container_wait(uint32_t cont_id)
 	}
 
 	/* Spin until the container is successfully destroyed */
-	while (slurm_container_destroy(cont_id) != SLURM_SUCCESS) {
-		slurm_container_signal(cont_id, SIGKILL);
+	/* This indicates that all tasks have exited the container */
+	while (slurm_container_plugin_destroy(cont_id) != SLURM_SUCCESS) {
+		slurm_container_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
 		} else {
-			error("Unable to destroy container %u", cont_id);
+			error("Unable to destroy container %"PRIu64"",
+			      cont_id);
 		}
 	}
 
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+extern int slurm_container_plugin_get_pids(uint64_t cont_id,
+					   pid_t **pids, int *npids)
 {
-	return _slurm_cgroup_get_pids(cont_id,pids,npids);
+	return _slurm_cgroup_get_pids(cont_id, pids, npids);
 }
diff --git a/src/plugins/proctrack/cgroup/xcgroup.c b/src/plugins/proctrack/cgroup/xcgroup.c
deleted file mode 100644
index d238cf7d3..000000000
--- a/src/plugins/proctrack/cgroup/xcgroup.c
+++ /dev/null
@@ -1,985 +0,0 @@
-/*****************************************************************************\
- *  xcgroup.c - cgroup related primitives
- *****************************************************************************
- *  Copyright (C) 2009 CEA/DAM/DIF
- *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#if HAVE_CONFIG_H
-#   include "config.h"
-#endif
-
-#if HAVE_STDINT_H
-#  include <stdint.h>
-#endif
-#if HAVE_INTTYPES_H
-#  include <inttypes.h>
-#endif
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <string.h>
-#include <strings.h>
-
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-#include "src/common/log.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
-
-#include "xcgroup.h"
-
-#ifndef PATH_MAX
-#define PATH_MAX 256
-#endif
-
-/* internal functions */
-size_t _file_getsize(int fd);
-int _file_read_uint32s(char* file_path,uint32_t** pvalues,int* pnb);
-int _file_write_uint32s(char* file_path,uint32_t* values,int nb);
-int _file_read_uint64s(char* file_path,uint64_t** pvalues,int* pnb);
-int _file_write_uint64s(char* file_path,uint64_t* values,int nb);
-int _file_read_content(char* file_path,char** content,size_t *csize);
-int _file_write_content(char* file_path,char* content,size_t csize);
-int _xcgroup_cpuset_init(char* file_path);
-
-/* xcgroup primitives */
-int xcgroup_is_available(void)
-{
-	char* value;
-	size_t s;
-
-	if ( xcgroup_get_param(CGROUP_BASEDIR,"release_agent",
-			       &value,&s) != XCGROUP_SUCCESS )
-		return 0;
-	else {
-		xfree(value);
-		return 1;
-	}
-
-}
-
-int xcgroup_set_release_agent(char* agent)
-{
-	int fstatus;
-	char* rag;
-	char* value;
-	size_t s;
-
-	if ( agent == NULL )
-		return XCGROUP_ERROR;
-
-	rag = (char*) xstrdup("release_agent=");
-	fstatus = xcgroup_get_param(CGROUP_BASEDIR,"release_agent",
-				    &value,&s);
-	if (  fstatus == XCGROUP_SUCCESS ) {
-		if ( strcmp(value,agent) != 0 ) {
-			xstrcat(rag,agent);
-			fstatus = xcgroup_set_params(CGROUP_BASEDIR,rag);
-		}
-		xfree(value);
-	}
-
-	xfree(rag);
-	return fstatus;
-}
-
-int xcgroup_mount(char* mount_opts)
-{
-	char* mount_cmd_fmt;
-	char mount_cmd[1024];
-
-	mode_t cmask;
-	mode_t omask;
-
-	cmask = S_IWGRP | S_IWOTH;
-	omask = umask(cmask);
-
-	if ( mkdir(CGROUP_BASEDIR,0755) && errno != EEXIST) {
-		debug("unable to create cgroup directory '%s'"
-		      " : %m",CGROUP_BASEDIR);
-		umask(omask);
-		return XCGROUP_ERROR;
-	}
-	umask(omask);
-
-	if ( mount_opts == NULL ||
-	     strlen(mount_opts) == 0 ) {
-		mount_cmd_fmt="/bin/mount -t cgroup none " CGROUP_BASEDIR;
-	}
-	else
-		mount_cmd_fmt="/bin/mount -o %s -t cgroup none " CGROUP_BASEDIR;
-
-	if ( snprintf(mount_cmd,1024,mount_cmd_fmt,
-		      mount_opts) >= 1024 ) {
-		debug2("unable to build mount cmd line");
-		return XCGROUP_ERROR;
-	}
-	else
-		debug3("cgroup mount cmd line is '%s'",mount_cmd);
-
-	if ( system(mount_cmd) )
-		return XCGROUP_ERROR;
-	else
-		return XCGROUP_SUCCESS;
-
-}
-
-int xcgroup_create(char* file_path,xcgroup_opts_t* opts)
-{
-	int fstatus;
-	uid_t uid;
-	gid_t gid;
-	int create_only;
-	int notify;
-
-	mode_t cmask;
-	mode_t omask;
-
-	uid=opts->uid;
-	gid=opts->gid;
-	create_only=opts->create_only;
-	notify=opts->notify;
-
-	fstatus = XCGROUP_ERROR;
-
-	/* save current mask and apply working one */
-	cmask = S_IWGRP | S_IWOTH;
-	omask = umask(cmask);
-
-	/* build cgroup */
-	if ( mkdir(file_path,0755) ) {
-		if ( create_only || errno != EEXIST ) {
-			debug2("unable to create cgroup '%s' : %m",
-			       file_path);
-			umask(omask);
-			return fstatus;
-		}
-	}
-	umask(omask);
-
-	/* initialize cpuset support (if enabled in cgroup ) */
-	if ( _xcgroup_cpuset_init(file_path) != XCGROUP_SUCCESS ) {
-		debug2("unable to initialize cpuset cgroup component");
-		rmdir(file_path);
-		return fstatus;
-	}
-
-	/* change cgroup ownership as requested */
-	if ( chown(file_path,uid,gid) ) {
-		debug2("unable to chown %d:%d cgroup '%s' : %m",
-		       uid,gid,file_path);
-		return fstatus;
-	}
-
-	/* following operations failure might not result in a general
-	 * failure so set output status to success */
-	fstatus = XCGROUP_SUCCESS;
-
-	/* set notify on release flag */
-	if ( notify == 1 )
-		xcgroup_set_params(file_path,"notify_on_release=1");
-	else if ( notify == 0 )
-		xcgroup_set_params(file_path,"notify_on_release=0");
-
-	return fstatus;
-}
-
-int xcgroup_destroy(char* file_path)
-{
-
-	/*
-	 * nothing to be done here, notify_on_release was set
-	 * so hope that all will works perfectly...
-	 *
-	 * with memory cgroup some pages can still be accounted
-	 * to the cgroup but no more processes are present, this results
-	 * in a directory not being removed until the pages are accounted
-	 * to an other cgroup...
-	 * echoing 1 into memory.force_empty can purge this memory but
-	 * as slurmstepd is still present in the cgroup and use pages,
-	 * this is not sufficient as it could leave some other pages too..
-	 * we should have a way to ask the cgroup to force_empty
-	 * on last process exit but I did not find any for now
-	 */
-	//xcgroup_set_params(file_path,"memory.force_empty=1");
-
-	return XCGROUP_SUCCESS;
-}
-
-int xcgroup_add_pids(char* cpath,pid_t* pids,int npids)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/tasks",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to add pids to '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	fstatus = _file_write_uint32s(file_path,(uint32_t*)pids,npids);
-	if ( fstatus != XCGROUP_SUCCESS )
-		debug2("unable to add pids to '%s'",cpath);
-	return fstatus;
-}
-
-int
-xcgroup_get_pids(char* cpath, pid_t **pids, int *npids)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( pids == NULL || npids == NULL )
-		return SLURM_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/tasks",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to get pids of '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	fstatus = _file_read_uint32s(file_path,(uint32_t**)pids,npids);
-	if ( fstatus != XCGROUP_SUCCESS )
-		debug2("unable to get pids of '%s'",cpath);
-	return fstatus;
-}
-
-int
-xcgroup_find_by_pid(char* cpath, pid_t pid)
-{
-	int fstatus = SLURM_ERROR;
-	char file_path[PATH_MAX];
-	char* buf;
-	size_t fsize;
-	char* p;
-	char* e;
-	char* entry;
-
-	/* build pid cgroup meta filepath */
-	if ( snprintf(file_path,PATH_MAX,"/proc/%u/cgroup",
-		      pid) >= PATH_MAX ) {
-		debug2("unable to build cgroup meta filepath for pid=%u : %m",
-		       pid);
-		return XCGROUP_ERROR;
-	}
-
-	/* read file content */
-	fstatus = _file_read_content(file_path,&buf,&fsize);
-	if ( fstatus == XCGROUP_SUCCESS ) {
-		fstatus = XCGROUP_ERROR;
-		p = buf;
-		if ( index(p,'\n') != NULL ) {
-			e = index(p,'\n');
-			*e='\0';
-			entry = rindex(p,':');
-			if ( entry != NULL ) {
-				entry++;
-				snprintf(cpath,PATH_MAX,"%s%s",
-					 CGROUP_BASEDIR,entry);
-				fstatus = XCGROUP_SUCCESS;
-			}
-		}
-		xfree(buf);
-	}
-
-	return fstatus;
-}
-
-int xcgroup_set_memlimit(char* cpath,uint32_t memlimit)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-	uint64_t ml;
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/memory.limit_in_bytes",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to set memory limit of '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	ml = (uint64_t) memlimit * 1024 * 1024;
-	fstatus = _file_write_uint64s(file_path,&ml,1);
-	if ( fstatus != XCGROUP_SUCCESS )
-		debug2("unable to set memory limit of '%s' : %m",cpath);
-	else
-		debug3("memory limit set to %uMB for '%s'",memlimit,cpath);
-
-	return fstatus;
-}
-
-int xcgroup_get_memlimit(char* cpath,uint32_t* memlimit)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-	uint64_t* ml;
-	int i;
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/memory.limit_in_bytes",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to get memory limit of '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	fstatus = _file_read_uint64s(file_path,&ml,&i);
-	if ( fstatus != XCGROUP_SUCCESS ||
-	     i == 0 )
-		debug2("unable to get memory limit of '%s' : %m",cpath);
-	else {
-		if ( *ml == 0 ) {
-			*memlimit = 0;
-		}
-		else {
-			/* convert into MB */
-			*ml /= 1024 * 1024;
-			/* memlimit is stored into a uint32_t */
-			/* so cap the memlimit value to the max value */
-			/* of an uint32_t */
-			*memlimit = -1 ;
-			if ( *ml < *memlimit ) {
-				*memlimit = *ml;
-			}
-		}
-		debug3("memory limit of '%s' is %uMB",cpath,*memlimit);
-		xfree(ml);
-	}
-
-	return fstatus;
-}
-
-int xcgroup_set_memswlimit(char* cpath,uint32_t memlimit)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-	uint64_t ml;
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/memory.memsw.limit_in_bytes",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to set memsw limit of '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	ml = (uint64_t) memlimit * 1024 * 1024;
-	fstatus = _file_write_uint64s(file_path,&ml,1);
-	if ( fstatus != XCGROUP_SUCCESS )
-		debug2("unable to set memsw limit of '%s' : %m",cpath);
-	else
-		debug3("mem+swap limit set to %uMB for '%s'",memlimit,cpath);
-
-	return fstatus;
-}
-
-int xcgroup_get_memswlimit(char* cpath,uint32_t* memlimit)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-	uint64_t *ml;
-	int i;
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/memory.memsw.limit_in_bytes",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to get memsw limit of '%s' : %m",cpath);
-		return fstatus;
-	}
-
-	fstatus = _file_read_uint64s(file_path,&ml,&i);
-	if ( fstatus != XCGROUP_SUCCESS ||
-	     i ==0 )
-		debug2("unable to get memsw limit of '%s' : %m",cpath);
-	else {
-		if ( *ml == 0 ) {
-			*memlimit = 0;
-		}
-		else {
-			/* convert into MB */
-			*ml /= 1024 * 1024;
-			/* memlimit is stored into a uint32_t */
-			/* so cap the memlimit value to the max value */
-			/* of an uint32_t */
-			*memlimit = -1 ;
-			if ( *ml < *memlimit ) {
-				*memlimit = *ml;
-			}
-		}
-		debug3("mem+swap limit of '%s' is %uMB",cpath,*memlimit);
-		xfree(ml);
-	}
-
-	return fstatus;
-}
-
-int xcgroup_set_mem_use_hierarchy(char* cpath,int flag)
-{
-	if ( flag )
-		return xcgroup_set_params(cpath,"memory.use_hierarchy=1");
-	else
-		return xcgroup_set_params(cpath,"memory.use_hierarchy=0");
-}
-
-int xcgroup_set_cpuset_cpus(char* cpath,char* range)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/cpuset.cpus",
-		      cpath) >= PATH_MAX ) {
-		debug2("unable to set cpuset.cpus to '%s' for '%s' : %m",
-		       range,cpath);
-		return fstatus;
-	}
-
-	fstatus = _file_write_content(file_path,range,strlen(range));
-	if ( fstatus != XCGROUP_SUCCESS )
-		debug2("unable to set cpuset.cpus to '%s' for '%s' : %m",
-		       range,cpath);
-	else
-		debug3("cpuset.cpus set to '%s' for '%s'",range,cpath);
-
-	return fstatus;
-}
-
-int xcgroup_set_params(char* cpath,char* parameters)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-	char* params;
-	char* value;
-	char* p;
-	char* next;
-
-	fstatus = XCGROUP_ERROR;
-
-	params = (char*) xstrdup(parameters);
-
-	p = params;
-	while ( p != NULL && *p != '\0' ) {
-		next = index(p,' ');
-		if ( next ) {
-			*next='\0';
-			next++;
-			while ( *next == ' ' )
-				next++;
-		}
-		value = index(p,'=');
-		if ( value != NULL ) {
-			*value='\0';
-			value++;
-			if ( snprintf(file_path,PATH_MAX,"%s/%s",cpath,p)
-			     >= PATH_MAX ) {
-				debug2("unable to build filepath for '%s' and"
-				       " parameter '%s' : %m",cpath,p);
-				goto next_loop;
-			}
-			fstatus = _file_write_content(file_path,value,
-						      strlen(value));
-			if ( fstatus != XCGROUP_SUCCESS )
-				debug2("unable to set parameter '%s' to "
-				       "'%s' for '%s'",p,value,cpath);
-			else
-				debug3("parameter '%s' set to '%s' for '%s'",
-				       p,value,cpath);
-		}
-		else
-			debug2("bad parameters format for entry '%s'",p);
-	next_loop:
-		p = next;
-	}
-
-	xfree(params);
-
-	return fstatus;
-}
-
-int xcgroup_get_param(char* cpath,char* parameter,char **content,size_t *csize)
-{
-	int fstatus;
-	char file_path[PATH_MAX];
-
-	fstatus = XCGROUP_ERROR;
-
-	if ( snprintf(file_path,PATH_MAX,"%s/%s",cpath,parameter)
-	     >= PATH_MAX ) {
-		debug2("unable to build filepath for '%s' and"
-		       " parameter '%s' : %m",cpath,parameter);
-	}
-	else {
-		fstatus = _file_read_content(file_path,content,csize);
-		if ( fstatus != XCGROUP_SUCCESS )
-			debug2("unable to get parameter '%s'", parameter);
-	}
-
-	return fstatus;
-}
-
-
-size_t _file_getsize(int fd)
-{
-	int rc;
-	size_t fsize;
-	off_t offset;
-	char c;
-
-	/* store current position and rewind */
-	offset = lseek(fd,0,SEEK_CUR);
-	if ( offset < 0 )
-		return -1;
-	lseek(fd,0,SEEK_SET);
-
-	/* get file size */
-	fsize=0;
-	do {
-		rc = read(fd,(void*)&c,1);
-		if ( rc > 0 )
-			fsize++;
-	}
-	while ( (rc < 0 && errno == EINTR) || rc > 0 );
-
-	/* restore position */
-	lseek(fd,offset,SEEK_SET);
-
-	if ( rc < 0 )
-		return -1;
-	else
-		return fsize;
-}
-
-int
-_file_write_uint64s(char* file_path,uint64_t* values,int nb)
-{
-	int fstatus;
-	int rc;
-	int fd;
-	char tstr[256];
-	uint64_t value;
-	int i;
-
-	/* open file for writing */
-	fd = open(file_path, O_WRONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for writing : %m",
-		       file_path);
-		return XCGROUP_ERROR;
-	}
-
-	/* add one value per line */
-	fstatus = XCGROUP_SUCCESS;
-	for ( i=0 ; i < nb ; i++ ) {
-
-		value = values[i];
-
-		rc = snprintf(tstr, sizeof(tstr), "%"PRIu64"", value);
-		if ( rc < 0 ) {
-			debug2("unable to build %"PRIu64" string value, "
-			       "skipping", value);
-			fstatus = XCGROUP_ERROR;
-			continue;
-		}
-
-		do {
-			rc = write(fd, tstr, strlen(tstr)+1);
-		}
-		while ( rc != 0 && errno == EINTR);
-		if (rc < 1) {
-			debug2("unable to add value '%s' to file '%s' : %m",
-			       tstr,file_path);
-			fstatus = XCGROUP_ERROR;
-		}
-
-	}
-
-	/* close file */
-	close(fd);
-
-	return fstatus;
-}
-
-
-int
-_file_read_uint64s(char* file_path,uint64_t** pvalues,int* pnb)
-{
-	int rc;
-	int fd;
-
-	size_t fsize;
-	char* buf;
-	char* p;
-
-	uint64_t* pa=NULL;
-	int i;
-
-	/* check input pointers */
-	if ( pvalues == NULL || pnb == NULL )
-		return XCGROUP_ERROR;
-
-	/* open file for reading */
-	fd = open(file_path, O_RDONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for reading : %m",
-		       file_path);
-		return XCGROUP_ERROR;
-	}
-
-	/* get file size */
-	fsize=_file_getsize(fd);
-	if ( fsize == -1 ) {
-		close(fd);
-		return XCGROUP_ERROR;
-	}
-
-	/* read file contents */
-	buf = (char*) xmalloc((fsize+1)*sizeof(char));
-	do {
-		rc = read(fd,buf,fsize);
-	}
-	while ( rc < 0 && errno == EINTR );
-	close(fd);
-	buf[fsize]='\0';
-
-	/* count values (splitted by \n) */
-	i=0;
-	if ( rc > 0 ) {
-		p = buf;
-		while ( index(p,'\n') != NULL ) {
-			i++;
-			p = index(p,'\n') + 1;
-		}
-	}
-
-	/* build uint32_t list */
-	if ( i > 0 ) {
-		pa = (uint64_t*) xmalloc(sizeof(uint64_t) * i);
-		p = buf;
-		i = 0;
-		while ( index(p,'\n') != NULL ) {
-			long long unsigned int ll_tmp;
-			sscanf(p,"%llu", &ll_tmp);
-			pa[i++] = ll_tmp;
-			p = index(p,'\n') + 1;
-		}
-	}
-
-	/* free buffer */
-	xfree(buf);
-
-	/* set output values */
-	*pvalues = pa;
-	*pnb = i;
-
-	return XCGROUP_SUCCESS;
-}
-
-int
-_file_write_uint32s(char* file_path,uint32_t* values,int nb)
-{
-	int fstatus;
-	int rc;
-	int fd;
-	char tstr[256];
-	uint32_t value;
-	int i;
-
-	/* open file for writing */
-	fd = open(file_path, O_WRONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for writing : %m",
-		       file_path);
-		return XCGROUP_ERROR;
-	}
-
-	/* add one value per line */
-	fstatus = XCGROUP_SUCCESS;
-	for ( i=0 ; i < nb ; i++ ) {
-
-		value = values[i];
-
-		rc = snprintf(tstr, sizeof(tstr), "%u",value);
-		if ( rc < 0 ) {
-			debug2("unable to build %u string value, skipping",
-			       value);
-			fstatus = XCGROUP_ERROR;
-			continue;
-		}
-
-		do {
-			rc = write(fd, tstr, strlen(tstr)+1);
-		}
-		while ( rc != 0 && errno == EINTR);
-		if (rc < 1) {
-			debug2("unable to add value '%s' to file '%s' : %m",
-			       tstr,file_path);
-			fstatus = XCGROUP_ERROR;
-		}
-
-	}
-
-	/* close file */
-	close(fd);
-
-	return fstatus;
-}
-
-
-int
-_file_read_uint32s(char* file_path,uint32_t** pvalues,int* pnb)
-{
-	int rc;
-	int fd;
-
-	size_t fsize;
-	char* buf;
-	char* p;
-
-	uint32_t* pa=NULL;
-	int i;
-
-	/* check input pointers */
-	if ( pvalues == NULL || pnb == NULL )
-		return XCGROUP_ERROR;
-
-	/* open file for reading */
-	fd = open(file_path, O_RDONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for reading : %m",
-		       file_path);
-		return XCGROUP_ERROR;
-	}
-
-	/* get file size */
-	fsize=_file_getsize(fd);
-	if ( fsize == -1 ) {
-		close(fd);
-		return XCGROUP_ERROR;
-	}
-
-	/* read file contents */
-	buf = (char*) xmalloc((fsize+1)*sizeof(char));
-	do {
-		rc = read(fd,buf,fsize);
-	}
-	while ( rc < 0 && errno == EINTR );
-	close(fd);
-	buf[fsize]='\0';
-
-	/* count values (splitted by \n) */
-	i=0;
-	if ( rc > 0 ) {
-		p = buf;
-		while ( index(p,'\n') != NULL ) {
-			i++;
-			p = index(p,'\n') + 1;
-		}
-	}
-
-	/* build uint32_t list */
-	if ( i > 0 ) {
-		pa = (uint32_t*) xmalloc(sizeof(uint32_t) * i);
-		p = buf;
-		i = 0;
-		while ( index(p,'\n') != NULL ) {
-			sscanf(p,"%u",pa+i);
-			p = index(p,'\n') + 1;
-			i++;
-		}
-	}
-
-	/* free buffer */
-	xfree(buf);
-
-	/* set output values */
-	*pvalues = pa;
-	*pnb = i;
-
-	return XCGROUP_SUCCESS;
-}
-
-int
-_file_write_content(char* file_path, char* content,size_t csize)
-{
-	int fstatus;
-	int rc;
-	int fd;
-
-	/* open file for writing */
-	fd = open(file_path, O_WRONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for writing : %m",
-		       file_path);
-		return XCGROUP_ERROR;
-	}
-
-	/* write content */
-	do {
-		rc = write(fd,content,csize);
-	}
-	while ( rc != 0 && errno == EINTR);
-
-	/* check read size */
-	if (rc < csize) {
-		debug2("unable to write %zd bytes to file '%s' : %m",
-		       csize, file_path);
-		fstatus = XCGROUP_ERROR;
-	}
-	else
-		fstatus = XCGROUP_SUCCESS;
-
-	/* close file */
-	close(fd);
-
-	return fstatus;
-}
-
-int
-_file_read_content(char* file_path,char** content,size_t *csize)
-{
-	int fstatus;
-	int rc;
-	int fd;
-
-	size_t fsize;
-	char* buf;
-
-	fstatus = XCGROUP_ERROR;
-
-	/* check input pointers */
-	if ( content == NULL || csize == NULL )
-		return fstatus;
-
-	/* open file for reading */
-	fd = open(file_path, O_RDONLY, 0700);
-	if (fd < 0) {
-		debug2("unable to open '%s' for reading : %m",
-		       file_path);
-		return fstatus;
-	}
-
-	/* get file size */
-	fsize=_file_getsize(fd);
-	if ( fsize == -1 ) {
-		close(fd);
-		return fstatus;
-	}
-
-	/* read file contents */
-	buf = (char*) xmalloc((fsize+1)*sizeof(char));
-	buf[fsize]='\0';
-	do {
-		rc = read(fd,buf,fsize);
-	}
-	while ( rc < 0 && errno == EINTR );
-
-	/* set output values */
-	if ( rc >= 0 ) {
-		*content = buf;
-		*csize = rc;
-		fstatus = XCGROUP_SUCCESS;
-	}
-
-	/* close file */
-	close(fd);
-
-	return fstatus;
-}
-
-
-int _xcgroup_cpuset_init(char* file_path)
-{
-	int fstatus;
-	char path[PATH_MAX];
-
-	char* cpuset_metafiles[] = {
-		"cpuset.cpus",
-		"cpuset.mems"
-	};
-	char* cpuset_meta;
-	char* cpuset_conf;
-	size_t csize;
-
-	int i;
-
-	fstatus = XCGROUP_ERROR;
-
-	/* when cgroups are configured with cpuset, at least
-	 * cpuset.cpus and cpuset.mems must be set or the cgroup
-	 * will not be available at all.
-	 * we duplicate the ancestor configuration in the init step */
-	for ( i = 0 ; i < 2 ; i++ ) {
-
-		cpuset_meta = cpuset_metafiles[i];
-
-		/* try to read ancestor configuration */
-		if ( snprintf(path,PATH_MAX,"%s/../%s",
-			      file_path,cpuset_meta) >= PATH_MAX ) {
-			debug2("unable to get ancestor %s for cgroup '%s' : %m",
-			       cpuset_meta,file_path);
-			return fstatus;
-		}
-		if ( _file_read_content(path,&cpuset_conf,&csize) !=
-		     XCGROUP_SUCCESS ) {
-			debug3("assuming no cpuset support for '%s'",path);
-			return XCGROUP_SUCCESS;
-		}
-
-		/* duplicate ancestor conf in current cgroup */
-		if ( snprintf(path,PATH_MAX,"%s/%s",
-			      file_path,cpuset_meta) >= PATH_MAX ) {
-			debug2("unable to set %s for cgroup '%s' : %m",
-			       cpuset_meta,file_path);
-			return fstatus;
-		}
-		if ( _file_write_content(path,cpuset_conf,csize) !=
-		     XCGROUP_SUCCESS ) {
-			debug2("unable to write %s configuration (%s) of '%s'",
-			       cpuset_meta,cpuset_conf,file_path);
-			return fstatus;
-		}
-
-	}
-
-	return XCGROUP_SUCCESS;
-}
diff --git a/src/plugins/proctrack/cgroup/xcgroup.h b/src/plugins/proctrack/cgroup/xcgroup.h
deleted file mode 100644
index 7f886b7b2..000000000
--- a/src/plugins/proctrack/cgroup/xcgroup.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*****************************************************************************\
- *  cgroup.h - cgroup related primitives headers
- *****************************************************************************
- *  Copyright (C) 2009 CEA/DAM/DIF
- *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#if HAVE_CONFIG_H
-#   include "config.h"
-#endif
-
-#ifndef _XCGROUP_H_
-#define _XCGROUP_H_
-
-#include <sys/types.h>
-
-#define XCGROUP_ERROR    1
-#define XCGROUP_SUCCESS  0
-
-#ifndef CGROUP_BASEDIR
-#define CGROUP_BASEDIR "/dev/cgroup"
-#endif
-
-typedef struct xcgroup_opts {
-
-	uid_t uid;        /* uid of the owner */
-	gid_t gid;        /* gid of the owner */
-
-	int create_only;  /* do nothing if the cgroup already exists */
-	int notify;       /* notify_on_release flag value (0/1) */
-
-} xcgroup_opts_t;
-
-/*
- * test if cgroup system is currently available (mounted)
- *
- * returned values:
- *  - 0 if not available
- *  - 1 if available
- */
-int xcgroup_is_available();
-
-/*
- * mount the cgroup system using given options
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_mount(char* mount_opts);
-
-/*
- * set cgroup system release agent
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_release_agent(char* agent);
-
-/*
- * create a cgroup according to input properties
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_create(char* cpath, xcgroup_opts_t* opts);
-
-/*
- * destroy a cgroup (do nothing for now)
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_destroy(char* cpath);
-
-/*
- * add a list of pids to a cgroup
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_add_pids(char* cpath,pid_t* pids,int npids);
-
-/*
- * extract the pids list of a cgroup
- *
- * pids array must be freed using xfree(...)
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_get_pids(char* cpath, pid_t **pids, int *npids);
-
-/*
- * return the cpath containing the input pid
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_find_by_pid(char* cpath, pid_t pid);
-
-/*
- * set cgroup memory limit to the value ot memlimit
- *
- * memlimit must be expressed in MB
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_memlimit(char* cpath,uint32_t memlimit);
-
-/*
- * get cgroup memory limit
- *
- * memlimit will be expressed in MB
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_get_memlimit(char* cpath,uint32_t* memlimit);
-
-/*
- * set cgroup mem+swap limit to the value ot memlimit
- *
- * memlimit must be expressed in MB
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_memswlimit(char* cpath,uint32_t memlimit);
-
-/*
- * get cgroup mem+swap limit
- *
- * memlimit will be expressed in MB
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_get_memswlimit(char* cpath,uint32_t* memlimit);
-
-/*
- * toggle memory use hierarchy behavior using flag value
- *
- * flag values are 0/1 to disable/enable the feature
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_mem_use_hierarchy(char* cpath,int flag);
-
-/*
- * set cgroup cpuset cpus configuration
- *
- * range is the ranges of cores to constrain the cgroup to
- * i.e. 0-1,4-5
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_cpuset_cpus(char* cpath,char* range);
-
-/* 
- * set cgroup parameters using string of the form :
- * parameteres="param=value[ param=value]*"
- *
- * param must correspond to a file of the cgroup that
- * will be written with the value content
- *
- * i.e. xcgroup_set_params("/dev/cgroup/slurm",
- *                         "memory.swappiness=10");
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_set_params(char* cpath,char* parameters);
-
-/* 
- * get a cgroup parameter
- *
- * param must correspond to a file of the cgroup that
- * will be read for its content
- *
- * i.e. xcgroup_get_param("/dev/cgroup/slurm",
- *                         "memory.swappiness",&value,&size);
- *
- * on success, content must be free using xfree
- *
- * returned values:
- *  - XCGROUP_ERROR
- *  - XCGROUP_SUCCESS
- */
-int xcgroup_get_param(char* cpath,char* param,char **content,size_t *csize);
-
-#endif
diff --git a/src/plugins/proctrack/cgroup/xcpuinfo.c b/src/plugins/proctrack/cgroup/xcpuinfo.c
deleted file mode 100644
index 73fc02cf8..000000000
--- a/src/plugins/proctrack/cgroup/xcpuinfo.c
+++ /dev/null
@@ -1,312 +0,0 @@
-/*****************************************************************************\
- *  xcpuinfo.c - cpuinfo related primitives
- *****************************************************************************
- *  Copyright (C) 2009 CEA/DAM/DIF
- *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#if HAVE_CONFIG_H
-#   include "config.h"
-#endif
-
-#if HAVE_STDINT_H
-#  include <stdint.h>
-#endif
-#if HAVE_INTTYPES_H
-#  include <inttypes.h>
-#endif
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <ctype.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <string.h>
-#include <strings.h>
-
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-#include "src/common/log.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/slurmd/slurmd/get_mach_stat.h"
-
-#include "xcpuinfo.h"
-
-bool     initialized = false;      
-uint16_t procs, sockets, cores, threads;
-uint16_t block_map_size;
-uint16_t *block_map, *block_map_inv;
-
-int _ranges_conv(char* lrange,char** prange,int mode);
-
-/* for testing purpose */
-/* uint16_t block_map_size=8; */
-/* uint16_t block_map[] = { 0, 4, 1, 5, 3, 7, 2, 6}; */
-/* uint16_t block_map_inv[] = { 0, 2, 6, 4, 1, 3, 7, 5}; */
-/* xcpuinfo_abs_to_mac("0,2,4,6",&mach); */
-/* xcpuinfo_mac_to_abs(mach,&abs); */
-
-int
-xcpuinfo_init()
-{
-	if ( initialized )
-		return XCPUINFO_SUCCESS;
-
-	if ( get_procs(&procs) )
-		return XCPUINFO_ERROR;
-	
-	if ( get_cpuinfo(procs,&sockets,&cores,&threads,
-			 &block_map_size,&block_map,&block_map_inv) )
-		return XCPUINFO_ERROR;
-
-	initialized = true ;
-
-	return XCPUINFO_SUCCESS;
-}
-
-int
-xcpuinfo_fini()
-{
-	if ( ! initialized )
-		return XCPUINFO_SUCCESS;
-
-	initialized = false ;
-	procs = sockets = cores = threads = 0;
-	block_map_size = 0;
-	xfree(block_map);
-	xfree(block_map_inv);
-
-	return XCPUINFO_SUCCESS;
-}
-
-int
-xcpuinfo_abs_to_mac(char* lrange,char** prange)
-{
-	return _ranges_conv(lrange,prange,0);
-}
-
-int
-xcpuinfo_mac_to_abs(char* lrange,char** prange)
-{
-	return _ranges_conv(lrange,prange,1);
-}
-
-
-/* 
- * set to 1 each element of already allocated map of size 
- * map_size if they are present in the input range
- */
-int
-_range_to_map(char* range,uint16_t *map,uint16_t map_size)
-{
-	int bad_nb=0;
-	int num_fl=0;
-	int con_fl=0;
-	int last=0;
-
-	char *dup;
-	char *p;
-	char *s=NULL;
-
-	uint16_t start=0,end=0,i;
-
-	/* duplicate input range */
-	dup = xstrdup(range);
-	p = dup;
-	while ( ! last ) {
-		if ( isdigit(*p) ) {
-			if ( !num_fl ) {
-				num_fl++;
-				s=p;
-			}
-		}
-		else if ( *p == '-' ) {
-			if ( s && num_fl ) {
-				*p = '\0';
-				start = (uint16_t) atoi(s);
-				con_fl=1;
-				num_fl=0;
-				s=NULL;
-			}
-		}
-		else if ( *p == ',' || *p == '\0') {
-			if ( *p == '\0' )
-				last = 1;
-			if ( s && num_fl ) {
-				*p = '\0';
-				end = (uint16_t) atoi(s);
-				if ( !con_fl )
-					start = end ;
-				con_fl=2;
-				num_fl=0;
-				s=NULL;
-			}
-		}
-		else {
-			bad_nb++;
-			break;
-		}
-		if ( con_fl == 2 ) {
-			for( i = start ; i <= end && i < map_size ; i++) {
-				map[i]=1;
-			}
-			con_fl=0;
-		}
-		p++;
-	}
-
-	xfree(dup);
-
-	if ( bad_nb > 0 ) {
-		/* bad format for input range */
-		return XCPUINFO_ERROR;
-	}
-
-	return XCPUINFO_SUCCESS;
-}
-
-
-/*
- * allocate and build a range of ids using an input map
- * having printable element set to 1
- */
-int
-_map_to_range(uint16_t *map,uint16_t map_size,char** prange)
-{
-	size_t len;
-	int num_fl=0;
-	int con_fl=0;
-
-	char id[12];
-	char *str;
-
-	uint16_t start=0,end=0,i;
-
-	str = xstrdup("");
-	for ( i = 0 ; i < map_size ; i++ ) {
-
-		if ( map[i] ) {
-			num_fl=1;
-			end=i;
-			if ( !con_fl ) {
-				start=end;
-				con_fl=1;
-			}
-		}
-		else if ( num_fl ) {
-			if ( start < end ) {
-				sprintf(id,"%u-%u,",start,end);
-				xstrcat(str,id);
-			}
-			else {
-				sprintf(id,"%u,",start);
-				xstrcat(str,id);
-			}
-			con_fl = num_fl = 0;
-		}
-	}
-	if ( num_fl ) {
-		if ( start < end ) {
-			sprintf(id,"%u-%u,",start,end);
-			xstrcat(str,id);
-		}
-		else {
-			sprintf(id,"%u,",start);
-			xstrcat(str,id);
-		}
-	}
-
-	len = strlen(str);
-	if ( len > 0 ) {
-		str[len-1]='\0';
-	}
-
-	if ( prange != NULL )
-		*prange = str;
-	else
-		xfree(str);
-
-	return XCPUINFO_SUCCESS;
-}
-
-/*
- * convert a range into an other one according to 
- * a modus operandi being 0 or 1 for abstract to machine
- * or machine to abstract representation of cores
- */
-int
-_ranges_conv(char* lrange,char** prange,int mode)
-{
-	int fstatus;
-	int i;
-	uint16_t *amap;
-	uint16_t *map;
-	uint16_t *map_out;
-
-	/* init internal data if not already done */
-	if ( xcpuinfo_init() != XCPUINFO_SUCCESS )
-		return XCPUINFO_ERROR;
-
-	if ( mode ) {
-		/* machine to abstract conversion */
-		amap = block_map_inv;
-	}
-	else {
-		/* abstract to machine conversion */
-		amap = block_map;
-	}
-
-	/* allocate map for local work */
-	map = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
-	map_out = (uint16_t*) xmalloc(block_map_size*sizeof(uint16_t));
-
-	/* extract the input map */
-	fstatus = _range_to_map(lrange,map,block_map_size);
-	if ( fstatus ) {
-		goto exit;
-	}
-
-	/* do the conversion (see src/slurmd/slurmd/get_mach_stat.c) */
-	for( i = 0 ; i < block_map_size ; i++) {
-		if ( map[i] )
-			map_out[amap[i]]=1;
-	}
-
-	/* build the ouput range */
-	fstatus = _map_to_range(map_out,block_map_size,prange);
-
-exit:
-	xfree(map);
-	xfree(map_out);
-	return fstatus;
-}
diff --git a/src/plugins/proctrack/linuxproc/Makefile.in b/src/plugins/proctrack/linuxproc/Makefile.in
index 64163e694..84b67e30f 100644
--- a/src/plugins/proctrack/linuxproc/Makefile.in
+++ b/src/plugins/proctrack/linuxproc/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/linuxproc/kill_tree.c b/src/plugins/proctrack/linuxproc/kill_tree.c
index 20b8c4068..58dd3d7f2 100644
--- a/src/plugins/proctrack/linuxproc/kill_tree.c
+++ b/src/plugins/proctrack/linuxproc/kill_tree.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -151,6 +151,7 @@ static xppid_t **_build_hashtbl(void)
 	struct dirent *de;
 	char path[PATH_MAX], *endptr, *num, rbuf[1024];
 	char myname[1024], cmd[1024];
+	char state;
 	int fd;
 	long pid, ppid, ret_l;
 	xppid_t **hashtbl;
@@ -185,11 +186,17 @@ static xppid_t **_build_hashtbl(void)
 			close(fd);
 			continue;
 		}
-		if (sscanf(rbuf, "%ld %s %*s %ld", &pid, cmd, &ppid) != 3) {
+		if (sscanf(rbuf, "%ld %s %c %ld", &pid, cmd, &state, &ppid)
+		    != 4) {
 			close(fd);
 			continue;
 		}
 		close(fd);
+		if (state == 'Z') {
+			debug3("Defunct process skipped: command=%s state=%c "
+			       "pid=%ld ppid=%ld", cmd, state, pid, ppid);
+			continue;	/* Defunct, don't try to kill */
+		}
 
 		/* Record cmd for debugging purpose */
 		_push_to_hashtbl((pid_t)ppid, (pid_t)pid,
diff --git a/src/plugins/proctrack/linuxproc/kill_tree.h b/src/plugins/proctrack/linuxproc/kill_tree.h
index dbe048a12..cf8103a7b 100644
--- a/src/plugins/proctrack/linuxproc/kill_tree.h
+++ b/src/plugins/proctrack/linuxproc/kill_tree.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
index 7c68191b8..283851eb4 100644
--- a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
+++ b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,8 +48,9 @@
 #endif
 
 #include <sys/types.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 #include "kill_tree.h"
@@ -85,7 +86,7 @@
  */
 const char plugin_name[]      = "Process tracking via linux /proc";
 const char plugin_type[]      = "proctrack/linuxproc";
-const uint32_t plugin_version = 90;
+const uint32_t plugin_version = 91;
 
 
 /*
@@ -105,37 +106,37 @@ extern int fini ( void )
 /*
  * Uses slurmd job-step manager's pid as the unique container id.
  */
-extern int slurm_container_create ( slurmd_job_t *job )
+extern int slurm_container_plugin_create ( slurmd_job_t *job )
 {
-	job->cont_id = (uint32_t)job->jmgr_pid;
+	job->cont_id = (uint64_t)job->jmgr_pid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_add ( slurmd_job_t *job, pid_t pid )
+extern int slurm_container_plugin_add ( slurmd_job_t *job, pid_t pid )
 {
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_signal ( uint32_t id, int signal )
+extern int slurm_container_plugin_signal ( uint64_t id, int signal )
 {
 	return kill_proc_tree((pid_t)id, signal);
 }
 
-extern int slurm_container_destroy ( uint32_t id )
+extern int slurm_container_plugin_destroy ( uint64_t id )
 {
 	return SLURM_SUCCESS;
 }
 
-extern uint32_t slurm_container_find(pid_t pid)
+extern uint64_t slurm_container_plugin_find(pid_t pid)
 {
-	return (uint32_t) find_ancestor(pid, "slurmstepd");
+	return (uint64_t) find_ancestor(pid, "slurmstepd");
 }
 
-extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
+extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
-	uint32_t cont;
+	uint64_t cont;
 
-	cont = (uint32_t) find_ancestor(pid, "slurmstepd");
+	cont = (uint64_t) find_ancestor(pid, "slurmstepd");
 	if (cont == cont_id)
 		return true;
 
@@ -143,7 +144,7 @@ extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
 }
 
 extern int
-slurm_container_wait(uint32_t cont_id)
+slurm_container_plugin_wait(uint64_t cont_id)
 {
 	int delay = 1;
 
@@ -153,13 +154,13 @@ slurm_container_wait(uint32_t cont_id)
 	}
 
 	/* Spin until the container is successfully destroyed */
-	while (slurm_container_destroy(cont_id) != SLURM_SUCCESS) {
-		slurm_container_signal(cont_id, SIGKILL);
+	while (slurm_container_plugin_destroy(cont_id) != SLURM_SUCCESS) {
+		slurm_container_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
 		} else {
-			error("Unable to destroy container %u", cont_id);
+			error("Unable to destroy container %"PRIu64"", cont_id);
 		}
 	}
 
@@ -167,7 +168,7 @@ slurm_container_wait(uint32_t cont_id)
 }
 
 extern int
-slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	return proctrack_linuxproc_get_pids((pid_t)cont_id, pids, npids);
 }
diff --git a/src/plugins/proctrack/lua/Makefile.in b/src/plugins/proctrack/lua/Makefile.in
index 4bd9132ba..35032302e 100644
--- a/src/plugins/proctrack/lua/Makefile.in
+++ b/src/plugins/proctrack/lua/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/lua/proctrack_lua.c b/src/plugins/proctrack/lua/proctrack_lua.c
index 7c32548c5..85c4c8fe0 100644
--- a/src/plugins/proctrack/lua/proctrack_lua.c
+++ b/src/plugins/proctrack/lua/proctrack_lua.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,13 +54,13 @@
 #include <dlfcn.h>
 #include <pthread.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
-
 #include <lua.h>
 #include <lauxlib.h>
 #include <lualib.h>
 
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
@@ -68,7 +68,7 @@
 
 const char plugin_name[]            = "LUA proctrack module";
 const char plugin_type[]            = "proctrack/lua";
-const uint32_t plugin_version       = 90;
+const uint32_t plugin_version       = 91;
 
 static const char lua_script_path[] = DEFAULT_SCRIPT_DIR "/proctrack.lua";
 static lua_State *L = NULL;
@@ -136,7 +136,7 @@ static const struct luaL_Reg slurm_functions [] = {
 	{ NULL,    NULL        }
 };
 
-static int lua_register_slurm_output_functions ()
+static int lua_register_slurm_output_functions (void)
 {
 	/*
 	 *  Register slurm output functions in a global "slurm" table
@@ -188,7 +188,7 @@ static int check_lua_script_function (const char *name)
 /*
  *   Verify all required fuctions are defined in the proctrack/lua script
  */
-static int check_lua_script_functions ()
+static int check_lua_script_functions (void)
 {
 	int rc = 0;
 	int i;
@@ -320,7 +320,7 @@ static int lua_job_table_create (slurmd_job_t *job)
 	return (0);
 }
 
-int slurm_container_create (slurmd_job_t *job)
+int slurm_container_plugin_create (slurmd_job_t *job)
 {
 	int rc = SLURM_ERROR;
 	double id;
@@ -337,8 +337,8 @@ int slurm_container_create (slurmd_job_t *job)
 
 	lua_job_table_create (job);
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("proctrack/lua: %s: slurm_container_create: %s",
-				lua_script_path, lua_tostring (L, -1));
+		error ("proctrack/lua: %s: slurm_container_plugin_create: %s",
+		       lua_script_path, lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -347,14 +347,14 @@ int slurm_container_create (slurmd_job_t *job)
 	 */
 	if (lua_isnil (L, -1)) {
 		error ("proctrack/lua: "
-		       "slurm_container_create did not return id");
+		       "slurm_container_plugin_create did not return id");
 		lua_pop (L, -1);
 		goto out;
 	}
 
 	id = lua_tonumber (L, -1);
-	job->cont_id = id;
-	info ("job->cont_id = %u (%.0f)", job->cont_id, id);
+	job->cont_id = (uint64_t) id;
+	info ("job->cont_id = %"PRIu64" (%.0f)", job->cont_id, id);
 	lua_pop (L, -1);
 
 	rc = SLURM_SUCCESS;
@@ -363,7 +363,7 @@ out:
 	return rc;
 }
 
-int slurm_container_add (slurmd_job_t *job, pid_t pid)
+int slurm_container_plugin_add (slurmd_job_t *job, pid_t pid)
 {
 	int rc = SLURM_ERROR;
 
@@ -378,8 +378,9 @@ int slurm_container_add (slurmd_job_t *job, pid_t pid)
 	lua_pushnumber (L, pid);
 
 	if (lua_pcall (L, 3, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_add': %s",
-				lua_tostring (L, -1));
+		error ("running lua function "
+		       "'slurm_container_plugin_add': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -390,7 +391,7 @@ out:
 	return (rc);
 }
 
-int slurm_container_signal (uint32_t id, int sig)
+int slurm_container_plugin_signal (uint64_t id, int sig)
 {
 	int rc = SLURM_ERROR;
 
@@ -404,8 +405,9 @@ int slurm_container_signal (uint32_t id, int sig)
 	lua_pushnumber (L, sig);
 
 	if (lua_pcall (L, 2, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_signal': %s",
-				lua_tostring (L, -1));
+		error ("running lua function "
+		       "'slurm_container_plugin_signal': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -416,7 +418,7 @@ out:
 	return (rc);
 }
 
-int slurm_container_destroy (uint32_t id)
+int slurm_container_plugin_destroy (uint64_t id)
 {
 	int rc = SLURM_ERROR;
 
@@ -429,8 +431,9 @@ int slurm_container_destroy (uint32_t id)
 	lua_pushnumber (L, id);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_destroy': %s",
-				lua_tostring (L, -1));
+		error ("running lua function "
+		       "'slurm_container_plugin_destroy': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -442,9 +445,9 @@ out:
 	return (rc);
 }
 
-uint32_t slurm_container_find (pid_t pid)
+uint64_t slurm_container_plugin_find (pid_t pid)
 {
-	uint32_t id = (uint32_t) SLURM_ERROR;
+	uint64_t id = (uint64_t) SLURM_ERROR;
 
 	slurm_mutex_lock (&lua_lock);
 
@@ -455,12 +458,12 @@ uint32_t slurm_container_find (pid_t pid)
 	lua_pushnumber (L, pid);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_find': %s",
-				lua_tostring (L, -1));
+		error ("running lua function 'slurm_container_plugin_find': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
-	id = (uint32_t) lua_tonumber (L, -1);
+	id = (uint64_t) lua_tonumber (L, -1);
 	lua_pop (L, -1);
 
 out:
@@ -468,7 +471,7 @@ out:
 	return (id);
 }
 
-bool slurm_container_has_pid (uint32_t id, pid_t pid)
+bool slurm_container_plugin_has_pid (uint64_t id, pid_t pid)
 {
 	int rc = 0;
 
@@ -482,8 +485,9 @@ bool slurm_container_has_pid (uint32_t id, pid_t pid)
 	lua_pushnumber (L, pid);
 
 	if (lua_pcall (L, 2, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_has_pid': %s",
-				lua_tostring (L, -1));
+		error ("running lua function "
+		       "'slurm_container_plugin_has_pid': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -495,7 +499,7 @@ out:
 	return (rc == 1);
 }
 
-int slurm_container_wait (uint32_t id)
+int slurm_container_plugin_wait (uint64_t id)
 {
 	int rc = SLURM_ERROR;
 
@@ -508,8 +512,8 @@ int slurm_container_wait (uint32_t id)
 	lua_pushnumber (L, id);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-		error ("running lua function 'slurm_container_wait': %s",
-			lua_tostring (L, -1));
+		error ("running lua function 'slurm_container_plugin_wait': %s",
+		       lua_tostring (L, -1));
 		goto out;
 	}
 
@@ -520,7 +524,7 @@ out:
 	return (rc);
 }
 
-int slurm_container_get_pids (uint32_t cont_id, pid_t **pids, int *npids)
+int slurm_container_plugin_get_pids (uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int rc = SLURM_ERROR;
 	int i = 0;
@@ -533,16 +537,16 @@ int slurm_container_get_pids (uint32_t cont_id, pid_t **pids, int *npids)
 
 	lua_getglobal (L, "slurm_container_get_pids");
 	if (lua_isnil (L, -1))
-	    goto out;
+		goto out;
 
 	lua_pushnumber (L, cont_id);
 
 	if (lua_pcall (L, 1, 1, 0) != 0) {
-	    error ("%s: %s: %s",
-		    "proctrack/lua",
-		    __func__,
-		    lua_tostring (L, -1));
-	    goto out;
+		error ("%s: %s: %s",
+		       "proctrack/lua",
+		       __func__,
+		       lua_tostring (L, -1));
+		goto out;
 	}
 
 	/*
@@ -551,8 +555,8 @@ int slurm_container_get_pids (uint32_t cont_id, pid_t **pids, int *npids)
 	 */
 	if (!lua_istable(L, -1)) {
 		error ("%s: %s: function should return a table",
-			"proctrack/lua",
-			__func__);
+		       "proctrack/lua",
+		       __func__);
 		goto out;
 	}
 
diff --git a/src/plugins/proctrack/pgid/Makefile.in b/src/plugins/proctrack/pgid/Makefile.in
index b44bf3619..52e49db6c 100644
--- a/src/plugins/proctrack/pgid/Makefile.in
+++ b/src/plugins/proctrack/pgid/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/pgid/proctrack_pgid.c b/src/plugins/proctrack/pgid/proctrack_pgid.c
index 4371c10db..5b1d8ea7e 100644
--- a/src/plugins/proctrack/pgid/proctrack_pgid.c
+++ b/src/plugins/proctrack/pgid/proctrack_pgid.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,8 +55,9 @@
 #include <sys/types.h>
 #include <signal.h>
 #include <stdlib.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
@@ -91,7 +92,7 @@
  */
 const char plugin_name[]      = "Process tracking via process group ID plugin";
 const char plugin_type[]      = "proctrack/pgid";
-const uint32_t plugin_version = 90;
+const uint32_t plugin_version = 91;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -107,7 +108,7 @@ extern int fini ( void )
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_create ( slurmd_job_t *job )
+extern int slurm_container_plugin_create ( slurmd_job_t *job )
 {
 	return SLURM_SUCCESS;
 }
@@ -115,71 +116,71 @@ extern int slurm_container_create ( slurmd_job_t *job )
 /*
  * Uses job step process group id.
  */
-extern int slurm_container_add ( slurmd_job_t *job, pid_t pid )
+extern int slurm_container_plugin_add ( slurmd_job_t *job, pid_t pid )
 {
-	job->cont_id = (uint32_t)job->pgid;
+	job->cont_id = (uint64_t)job->pgid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_signal  ( uint32_t id, int signal )
+extern int slurm_container_plugin_signal  ( uint64_t id, int signal )
 {
 	pid_t pid = (pid_t) id;
 
-	if (!id)	/* no container ID */
-		return ESRCH;
-
-	if (id == getpid() || id == getpgid(0)) {
+	if (!id) {
+		/* no container ID */
+	} else if (pid == getpid() || pid == getpgid(0)) {
 		error("slurm_signal_container would kill caller!");
-		return ESRCH;
+	} else {
+		return killpg(pid, signal);
 	}
-
-	return (int)killpg(pid, signal);
+	slurm_seterrno(ESRCH);
+	return SLURM_ERROR;
 }
 
-extern int slurm_container_destroy ( uint32_t id )
+extern int slurm_container_plugin_destroy ( uint64_t id )
 {
 	return SLURM_SUCCESS;
 }
 
-extern uint32_t slurm_container_find(pid_t pid)
+extern uint64_t slurm_container_plugin_find(pid_t pid)
 {
 	pid_t rc = getpgid(pid);
 
 	if (rc == -1)
-		return (uint32_t) 0;
+		return (uint64_t) 0;
 	else
-		return (uint32_t) rc;
+		return (uint64_t) rc;
 }
 
-extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
+extern bool slurm_container_plugin_has_pid(uint64_t cont_id, pid_t pid)
 {
 	pid_t pgid = getpgid(pid);
 
-	if (pgid == -1 || (uint32_t)pgid != cont_id)
+	if ((pgid == -1) || ((uint64_t)pgid != cont_id))
 		return false;
 
 	return true;
 }
 
 extern int
-slurm_container_wait(uint32_t cont_id)
+slurm_container_plugin_wait(uint64_t cont_id)
 {
 	pid_t pgid = (pid_t)cont_id;
 	int delay = 1;
 
 	if (cont_id == 0 || cont_id == 1) {
-		errno = EINVAL;
+		slurm_seterrno(EINVAL);
 		return SLURM_ERROR;
 	}
 
 	/* Spin until the process group is gone. */
 	while (killpg(pgid, 0) == 0) {
-		slurm_container_signal(cont_id, SIGKILL);
+		slurm_container_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
 		} else {
-			error("Unable to destroy container %u", cont_id);
+			error("Unable to destroy container %"PRIu64"", cont_id);
 		}
 	}
 
@@ -187,8 +188,9 @@ slurm_container_wait(uint32_t cont_id)
 }
 
 extern int
-slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
-	error("proctrack/pgid does not implement slurm_container_get_pids");
+	error("proctrack/pgid does not implement "
+	      "slurm_container_plugin_get_pids");
 	return SLURM_ERROR;
 }
diff --git a/src/plugins/proctrack/rms/Makefile.in b/src/plugins/proctrack/rms/Makefile.in
index ad6e6e006..b8d7b9835 100644
--- a/src/plugins/proctrack/rms/Makefile.in
+++ b/src/plugins/proctrack/rms/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/rms/proctrack_rms.c b/src/plugins/proctrack/rms/proctrack_rms.c
index 9ff88e6ee..6a95962f1 100644
--- a/src/plugins/proctrack/rms/proctrack_rms.c
+++ b/src/plugins/proctrack/rms/proctrack_rms.c
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,14 +52,15 @@
 #include <signal.h>
 #include <stdlib.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/slurmd/common/proctrack.h"
 
 const char plugin_name[] = "Process tracking for QsNet via the rms module";
 const char plugin_type[]      = "proctrack/rms";
-const uint32_t plugin_version = 1;
+const uint32_t plugin_version = 91;
 
 static int _prg_destructor_fork(void);
 static void _prg_destructor_send(int fd, int prgid);
@@ -81,14 +82,14 @@ extern int fini (void)
 
 /*
  * When proctrack/rms is used in conjunction with switch/elan,
- * slurm_container_create will not normally create the program description.
- * It just retrieves the prgid created in switch/elan.
+ * slurm_container_plugin_create will not normally create the program
+ * description.  It just retrieves the prgid created in switch/elan.
  *
  * When the program description cannot be retrieved (switch/elan is not
  * being used, the job step is a batch script, etc.) then rms_prgcreate()
  * is called here.
  */
-extern int slurm_container_create (slurmd_job_t *job)
+extern int slurm_container_plugin_create (slurmd_job_t *job)
 {
 	int prgid;
 	/*
@@ -107,21 +108,21 @@ extern int slurm_container_create (slurmd_job_t *job)
 	}
         debug3("proctrack/rms: prgid = %d", prgid);
 
-	job->cont_id = (uint32_t)prgid;
+	job->cont_id = (uint64_t)prgid;
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_container_add (slurmd_job_t *job, pid_t pid)
+extern int slurm_container_plugin_add (slurmd_job_t *job, pid_t pid)
 {
 	return SLURM_SUCCESS;
 }
 
 /*
- * slurm_container_signal assumes that the slurmd jobstep manager
+ * slurm_container_plugin_signal assumes that the slurmd jobstep manager
  * is always the last process in the rms program description.
  * No signals are sent to the last process.
  */
-extern int slurm_container_signal  (uint32_t id, int signal)
+extern int slurm_container_plugin_signal  (uint64_t id, int signal)
 {
 	pid_t *pids;
 	int nids = 0;
@@ -165,42 +166,42 @@ extern int slurm_container_signal  (uint32_t id, int signal)
  * returns SLURM_SUCCESS when the program description contains one and
  * only one process, assumed to be the slurmd jobstep manager.
  */
-extern int slurm_container_destroy (uint32_t id)
+extern int slurm_container_plugin_destroy (uint64_t id)
 {
-	debug2("proctrack/rms: destroying container %u", id);
+	debug2("proctrack/rms: destroying container %"PRIu64"", id);
 	if (id == 0)
 		return SLURM_SUCCESS;
 
-	if (slurm_container_signal(id, 0) == -1)
+	if (slurm_container_plugin_signal(id, 0) == -1)
 		return SLURM_SUCCESS;
 
 	return SLURM_ERROR;
 }
 
 
-extern uint32_t slurm_container_find (pid_t pid)
+extern uint64_t slurm_container_plugin_find (pid_t pid)
 {
 	int prgid = 0;
 
 	if (rms_getprgid ((int) pid, &prgid) < 0)
-		return (uint32_t) 0;
-	return (uint32_t) prgid;
+		return (uint64_t) 0;
+	return (uint64_t) prgid;
 }
 
-extern bool slurm_container_has_pid (uint32_t cont_id, pid_t pid)
+extern bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
 {
 	int prgid = 0;
 
 	if (rms_getprgid ((int) pid, &prgid) < 0)
 		return false;
-	if ((uint32_t)prgid != cont_id)
+	if ((uint64_t)prgid != cont_id)
 		return false;
 
 	return true;
 }
 
 extern int
-slurm_container_wait(uint32_t cont_id)
+slurm_container_plugin_wait(uint64_t cont_id)
 {
 	int delay = 1;
 
@@ -210,13 +211,13 @@ slurm_container_wait(uint32_t cont_id)
 	}
 
 	/* Spin until the container is empty */
-	while (slurm_container_signal(cont_id, 0) != -1) {
-		slurm_container_signal(cont_id, SIGKILL);
+	while (slurm_container_plugin_signal(cont_id, 0) != -1) {
+		slurm_container_plugin_signal(cont_id, SIGKILL);
 		sleep(delay);
 		if (delay < 120) {
 			delay *= 2;
 		} else {
-			error("Unable to destroy container %u", cont_id);
+			error("Unable to destroy container %"PRIu64"", cont_id);
 		}
 	}
 
@@ -229,7 +230,7 @@ slurm_container_wait(uint32_t cont_id)
  * the slurmstepd in the list of pids that we return.
  */
 extern int
-slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	pid_t *p;
 	int np;
@@ -280,7 +281,7 @@ _close_all_fd_except(int fd)
  * parent process has exited.  Then call rms_prgdestroy.
  */
 static int
-_prg_destructor_fork()
+_prg_destructor_fork(void)
 {
 	pid_t pid;
 	int fdpair[2];
diff --git a/src/plugins/proctrack/sgi_job/Makefile.in b/src/plugins/proctrack/sgi_job/Makefile.in
index 540034e0d..dbb950e67 100644
--- a/src/plugins/proctrack/sgi_job/Makefile.in
+++ b/src/plugins/proctrack/sgi_job/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
index 2295503af..6106f7b5f 100644
--- a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
+++ b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,8 +53,8 @@
 #include <unistd.h>
 #include <dlfcn.h>
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
@@ -62,7 +62,7 @@
 
 const char plugin_name[]      = "Process tracking via SGI job module";
 const char plugin_type[]      = "proctrack/sgi_job";
-const uint32_t plugin_version = 90;
+const uint32_t plugin_version = 91;
 
 /*
  * We can't include <job.h> since its prototypes conflict with some
@@ -142,7 +142,7 @@ int init (void)
 	if (!job_ops.getpidcnt)
 		error ("Unable to resolve job_getpidcnt in libjob.so");
 
-	info ("successfully loaded libjob.so");
+	debug ("successfully loaded libjob.so");
 	return SLURM_SUCCESS;
 }
 
@@ -192,10 +192,10 @@ int _job_getpidcnt (jid_t jid)
 	return ((*job_ops.getpidcnt) (jid));
 }
 
-int slurm_container_create (slurmd_job_t *job)
+int slurm_container_plugin_create (slurmd_job_t *job)
 {
 	jid_t jid;
-	job->cont_id = (uint32_t) -1;
+	job->cont_id = (uint64_t) -1;
 
 	if (!libjob_handle)
 		init();
@@ -209,10 +209,10 @@ int slurm_container_create (slurmd_job_t *job)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_add (slurmd_job_t *job, pid_t pid)
+int slurm_container_plugin_add (slurmd_job_t *job, pid_t pid)
 {
-	if (job->cont_id == (uint32_t) -1) {
-		job->cont_id = (uint32_t) _job_getjid (getpid());
+	if (job->cont_id == (uint64_t) -1) {
+		job->cont_id = (uint64_t) _job_getjid (getpid());
 		/*
 		 *  Detach ourselves from the job container now that there
 		 *   is at least one other process in it.
@@ -225,7 +225,7 @@ int slurm_container_add (slurmd_job_t *job, pid_t pid)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_signal (uint32_t id, int sig)
+int slurm_container_plugin_signal (uint64_t id, int sig)
 {
 	if ( (_job_killjid ((jid_t) id, sig) < 0)
 	   && (errno != ENODATA) && (errno != EBADF) )
@@ -233,7 +233,7 @@ int slurm_container_signal (uint32_t id, int sig)
 	return (SLURM_SUCCESS);
 }
 
-int slurm_container_destroy (uint32_t id)
+int slurm_container_plugin_destroy (uint64_t id)
 {
 	int status;
 	_job_waitjid ((jid_t) id, &status, 0);
@@ -243,29 +243,29 @@ int slurm_container_destroy (uint32_t id)
 	return SLURM_SUCCESS;
 }
 
-uint32_t slurm_container_find (pid_t pid)
+uint64_t slurm_container_plugin_find (pid_t pid)
 {
 	jid_t jid;
 
 	if ((jid = _job_getjid (pid)) == (jid_t) -1)
-		return ((uint32_t) 0);
+		return ((uint64_t) 0);
 
-	return ((uint32_t) jid);
+	return ((uint64_t) jid);
 }
 
-bool slurm_container_has_pid (uint32_t cont_id, pid_t pid)
+bool slurm_container_plugin_has_pid (uint64_t cont_id, pid_t pid)
 {
 	jid_t jid;
 
 	if ((jid = _job_getjid (pid)) == (jid_t) -1)
 		return false;
-	if ((uint32_t)jid != cont_id)
+	if ((uint64_t)jid != cont_id)
 		return false;
 
 	return true;
 }
 
-int slurm_container_wait (uint32_t id)
+int slurm_container_plugin_wait (uint64_t id)
 {
 	int status;
 	if (_job_waitjid ((jid_t) id, &status, 0) == (jid_t)-1)
@@ -274,7 +274,7 @@ int slurm_container_wait (uint32_t id)
 	return SLURM_SUCCESS;
 }
 
-int slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids)
+int slurm_container_plugin_get_pids(uint64_t cont_id, pid_t **pids, int *npids)
 {
 	int pidcnt, bufsize;
 	pid_t *p;
diff --git a/src/plugins/sched/Makefile.in b/src/plugins/sched/Makefile.in
index 72b321e1b..8db0b4c17 100644
--- a/src/plugins/sched/Makefile.in
+++ b/src/plugins/sched/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/backfill/Makefile.in b/src/plugins/sched/backfill/Makefile.in
index 03d283d29..5ee47061b 100644
--- a/src/plugins/sched/backfill/Makefile.in
+++ b/src/plugins/sched/backfill/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 27532ccea..1db023970 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -21,7 +21,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -69,6 +69,7 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
@@ -83,6 +84,10 @@
 #  define BACKFILL_INTERVAL	30
 #endif
 
+#ifndef BACKFILL_RESOLUTION
+#  define BACKFILL_RESOLUTION	60
+#endif
+
 /* Do not build job/resource/time record for more than this
  * far in the future, in seconds, currently one day */
 #ifndef BACKFILL_WINDOW
@@ -107,6 +112,7 @@ static pthread_cond_t  term_cond = PTHREAD_COND_INITIALIZER;
 static bool config_flag = false;
 static uint32_t debug_flags = 0;
 static int backfill_interval = BACKFILL_INTERVAL;
+static int backfill_resolution = BACKFILL_RESOLUTION;
 static int backfill_window = BACKFILL_WINDOW;
 static int max_backfill_job_cnt = 50;
 
@@ -116,8 +122,6 @@ static void _add_reservation(uint32_t start_time, uint32_t end_reserve,
 			     node_space_map_t *node_space,
 			     int *node_space_recs);
 static int  _attempt_backfill(void);
-static void _diff_tv_str(struct timeval *tv1,struct timeval *tv2,
-		char *tv_str, int len_tv_str);
 static bool _job_is_completing(void);
 static void _load_config(void);
 static bool _many_pending_rpcs(void);
@@ -156,22 +160,6 @@ static void _dump_node_space_table(node_space_map_t *node_space_ptr)
 	info("=========================================");
 }
 
-/*
- * _diff_tv_str - build a string showing the time difference between two times
- * IN tv1 - start of event
- * IN tv2 - end of event
- * OUT tv_str - place to put delta time in format "usec=%ld"
- * IN len_tv_str - size of tv_str in bytes
- */
-static void _diff_tv_str(struct timeval *tv1,struct timeval *tv2,
-		char *tv_str, int len_tv_str)
-{
-	long delta_t;
-	delta_t  = (tv2->tv_sec  - tv1->tv_sec) * 1000000;
-	delta_t +=  tv2->tv_usec - tv1->tv_usec;
-	snprintf(tv_str, len_tv_str, "usec=%ld", delta_t);
-}
-
 /*
  * _job_is_completing - Determine if jobs are in the process of completing.
  *	This is a variant of job_is_completing in slurmctld/job_scheduler.c.
@@ -381,6 +369,13 @@ static void _load_config(void)
 		fatal("Invalid backfill scheduler max_job_bf: %d",
 		      max_backfill_job_cnt);
 	}
+	if (sched_params && (tmp_ptr=strstr(sched_params, "bf_res=")))
+		backfill_resolution = atoi(tmp_ptr + 7);
+	if (backfill_resolution < 1) {
+		fatal("Invalid backfill scheduler resolution: %d",
+		      backfill_resolution);
+	}
+
 	xfree(sched_params);
 }
 
@@ -393,8 +388,6 @@ extern void backfill_reconfig(void)
 /* backfill_agent - detached thread periodically attempts to backfill jobs */
 extern void *backfill_agent(void *args)
 {
-	struct timeval tv1, tv2;
-	char tv_str[20];
 	time_t now;
 	double wait_time;
 	static time_t last_backfill_time = 0;
@@ -416,25 +409,20 @@ extern void *backfill_agent(void *args)
 		wait_time = difftime(now, last_backfill_time);
 		if ((wait_time < backfill_interval) ||
 		    _job_is_completing() || _many_pending_rpcs() ||
-		    !_more_work(last_backfill_time))
+		    !avail_front_end() || !_more_work(last_backfill_time))
 			continue;
 
-		gettimeofday(&tv1, NULL);
 		lock_slurmctld(all_locks);
 		while (_attempt_backfill()) ;
 		last_backfill_time = time(NULL);
 		unlock_slurmctld(all_locks);
-		gettimeofday(&tv2, NULL);
-		_diff_tv_str(&tv1, &tv2, tv_str, 20);
-		if (debug_flags & DEBUG_FLAG_BACKFILL)
-			info("backfill: completed, %s", tv_str);
 	}
 	return NULL;
 }
 
 /* Return non-zero to break the backfill loop if change in job, node or
  * partition state or the backfill scheduler needs to be stopped. */
-static int _yield_locks(void)
+static int _yield_locks(int secs)
 {
 	slurmctld_lock_t all_locks = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
@@ -445,7 +433,7 @@ static int _yield_locks(void)
 	part_update = last_part_update;
 
 	unlock_slurmctld(all_locks);
-	_my_sleep(backfill_interval);
+	_my_sleep(secs);
 	lock_slurmctld(all_locks);
 
 	if ((last_job_update  == job_update)  &&
@@ -459,6 +447,7 @@ static int _yield_locks(void)
 
 static int _attempt_backfill(void)
 {
+	DEF_TIMERS;
 	bool filter_root = false;
 	List job_queue;
 	job_queue_rec_t *job_queue_rec;
@@ -470,12 +459,36 @@ static int _attempt_backfill(void)
 	uint32_t time_limit, comp_time_limit, orig_time_limit;
 	uint32_t min_nodes, max_nodes, req_nodes;
 	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
-	time_t now = time(NULL), sched_start, later_start, start_res;
+	time_t now, sched_start, later_start, start_res;
 	node_space_map_t *node_space;
 	static int sched_timeout = 0;
 	int this_sched_timeout = 0, rc = 0;
+	int job_test_count = 0;
+
+#ifdef HAVE_CRAY
+	/*
+	 * Run a Basil Inventory immediately before setting up the schedule
+	 * plan, to avoid race conditions caused by ALPS node state change.
+	 * Needs to be done with the node-state lock taken.
+	 */
+	START_TIMER;
+	if (select_g_reconfigure()) {
+		debug4("backfill: not scheduling due to ALPS");
+		return SLURM_SUCCESS;
+	}
+	END_TIMER;
+	if (debug_flags & DEBUG_FLAG_BACKFILL)
+		info("backfill: ALPS inventory completed, %s", TIME_STR);
+
+	/* The Basil inventory can take a long time to complete. Process
+	 * pending RPCs before starting the backfill scheduling logic */
+	_yield_locks(1);
+#endif
 
-	sched_start = now;
+	START_TIMER;
+	if (debug_flags & DEBUG_FLAG_BACKFILL)
+		info("backfill: beginning");
+	sched_start = now = time(NULL);
 	if (sched_timeout == 0) {
 		sched_timeout = slurm_get_msg_timeout() / 2;
 		sched_timeout = MAX(sched_timeout, 1);
@@ -505,6 +518,7 @@ static int _attempt_backfill(void)
 
 	while ((job_queue_rec = (job_queue_rec_t *)
 				list_pop_bottom(job_queue, sort_job_queue2))) {
+		job_test_count++;
 		job_ptr  = job_queue_rec->job_ptr;
 		part_ptr = job_queue_rec->part_ptr;
 		xfree(job_queue_rec);
@@ -517,7 +531,11 @@ static int _attempt_backfill(void)
 
 		if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
 		    (job_ptr->state_reason == WAIT_ASSOC_RESOURCE_LIMIT) ||
-		    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT)) {
+		    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT) ||
+		    (job_ptr->state_reason == WAIT_QOS_JOB_LIMIT) ||
+		    (job_ptr->state_reason == WAIT_QOS_RESOURCE_LIMIT) ||
+		    (job_ptr->state_reason == WAIT_QOS_TIME_LIMIT) ||
+		    !acct_policy_job_runnable(job_ptr)) {
 			debug2("backfill: job %u is not allowed to run now. "
 			       "Skipping it. State=%s. Reason=%s. Priority=%u",
 			       job_ptr->job_id,
@@ -570,6 +588,7 @@ static int _attempt_backfill(void)
 		}
 		comp_time_limit = time_limit;
 		orig_time_limit = job_ptr->time_limit;
+		qos_ptr = job_ptr->qos_ptr;
 		if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE))
 			time_limit = job_ptr->time_limit = 1;
 		else if (job_ptr->time_min && (job_ptr->time_min < time_limit))
@@ -624,10 +643,12 @@ static int _attempt_backfill(void)
 				     avail_bitmap))) ||
 		    (job_req_node_filter(job_ptr, avail_bitmap))) {
 			if (later_start) {
-				job_ptr->start_time = 0;	
+				job_ptr->start_time = 0;
 				goto TRY_LATER;
 			}
+			/* Job can not start until too far in the future */
 			job_ptr->time_limit = orig_time_limit;
+			job_ptr->start_time = sched_start + backfill_window;
 			continue;
 		}
 
@@ -637,15 +658,28 @@ static int _attempt_backfill(void)
 		bit_not(resv_bitmap);
 
 		if ((time(NULL) - sched_start) >= this_sched_timeout) {
-			debug("backfill: loop taking too long, yielding locks");
-			if (_yield_locks()) {
-				debug("backfill: system state changed, "
-				      "breaking out");
+			uint32_t save_time_limit = job_ptr->time_limit;
+			job_ptr->time_limit = orig_time_limit;
+			if (debug_flags & DEBUG_FLAG_BACKFILL) {
+				END_TIMER;
+				info("backfill: yielding locks after testing "
+				     "%d jobs, %s",
+				     job_test_count, TIME_STR);
+			}
+			if (_yield_locks(backfill_interval)) {
+				if (debug_flags & DEBUG_FLAG_BACKFILL) {
+					info("backfill: system state changed, "
+					     "breaking out after testing %d "
+					     "jobs", job_test_count);
+				}
 				rc = 1;
 				break;
-			} else {
-				this_sched_timeout += sched_timeout;
 			}
+			job_ptr->time_limit = save_time_limit;
+			/* Reset backfill scheduling timers, resume testing */
+			sched_start = time(NULL);
+			job_test_count = 0;
+			START_TIMER;
 		}
 		/* this is the time consuming operation */
 		debug2("backfill: entering _try_sched for job %u.",
@@ -667,8 +701,11 @@ static int _attempt_backfill(void)
 		}
 		if (job_ptr->start_time <= now) {
 			int rc = _start_job(job_ptr, resv_bitmap);
-			if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE))
+			if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE)){
 				job_ptr->time_limit = orig_time_limit;
+				job_ptr->end_time = job_ptr->start_time +
+						    (orig_time_limit * 60);
+			}
 			else if ((rc == SLURM_SUCCESS) && job_ptr->time_min) {
 				/* Set time limit as high as possible */
 				job_ptr->time_limit = comp_time_limit;
@@ -699,7 +736,7 @@ static int _attempt_backfill(void)
 		if (later_start && (job_ptr->start_time > later_start)) {
 			/* Try later when some nodes currently reserved for
 			 * pending jobs are free */
-			job_ptr->start_time = 0;	
+			job_ptr->start_time = 0;
 			goto TRY_LATER;
 		}
 
@@ -720,14 +757,13 @@ static int _attempt_backfill(void)
 			 * job to be backfill scheduled, which the sched
 			 * plugin does not know about. Try again later. */
 			later_start = job_ptr->start_time;
-			job_ptr->start_time = 0;	
+			job_ptr->start_time = 0;
 			goto TRY_LATER;
 		}
 
 		/*
 		 * Add reservation to scheduling table if appropriate
 		 */
-		qos_ptr = job_ptr->qos_ptr;
 		if (qos_ptr && (qos_ptr->flags & QOS_FLAG_NO_RESERVE))
 			continue;
 		bit_not(avail_bitmap);
@@ -746,6 +782,11 @@ static int _attempt_backfill(void)
 	}
 	xfree(node_space);
 	list_destroy(job_queue);
+	if (debug_flags & DEBUG_FLAG_BACKFILL) {
+		END_TIMER;
+		info("backfill: completed testing %d jobs, %s",
+		     job_test_count, TIME_STR);
+	}
 	return rc;
 }
 
@@ -763,8 +804,11 @@ static int _start_job(struct job_record *job_ptr, bitstr_t *resv_bitmap)
 		job_ptr->details->exc_node_bitmap = bit_copy(resv_bitmap);
 
 	rc = select_nodes(job_ptr, false, NULL);
-	FREE_NULL_BITMAP(job_ptr->details->exc_node_bitmap);
-	job_ptr->details->exc_node_bitmap = orig_exc_nodes;
+	if (job_ptr->details) { /* select_nodes() might cancel the job! */
+		FREE_NULL_BITMAP(job_ptr->details->exc_node_bitmap);
+		job_ptr->details->exc_node_bitmap = orig_exc_nodes;
+	} else
+		FREE_NULL_BITMAP(orig_exc_nodes);
 	if (rc == SLURM_SUCCESS) {
 		/* job initiated */
 		last_job_update = time(NULL);
@@ -858,6 +902,11 @@ static void _add_reservation(uint32_t start_time, uint32_t end_reserve,
 	bool placed = false;
 	int i, j;
 
+	/* If we decrease the resolution of our timing information, this can
+	 * decrease the number of records managed and increase performance */
+	start_time = (start_time / backfill_resolution) * backfill_resolution;
+	end_reserve = (end_reserve / backfill_resolution) * backfill_resolution;
+
 	for (j=0; ; ) {
 		if (node_space[j].end_time > start_time) {
 			/* insert start entry record */
diff --git a/src/plugins/sched/backfill/backfill.h b/src/plugins/sched/backfill/backfill.h
index dcb9db334..2060d29dc 100644
--- a/src/plugins/sched/backfill/backfill.h
+++ b/src/plugins/sched/backfill/backfill.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c
index b8b7a7851..cc7812161 100644
--- a/src/plugins/sched/backfill/backfill_wrapper.c
+++ b/src/plugins/sched/backfill/backfill_wrapper.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,7 +41,8 @@
 #include <pthread.h>
 #include <stdio.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
diff --git a/src/plugins/sched/builtin/Makefile.in b/src/plugins/sched/builtin/Makefile.in
index a790fbc0a..985bfd225 100644
--- a/src/plugins/sched/builtin/Makefile.in
+++ b/src/plugins/sched/builtin/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/builtin/builtin.c b/src/plugins/sched/builtin/builtin.c
index b18ed7998..9ea5aa302 100644
--- a/src/plugins/sched/builtin/builtin.c
+++ b/src/plugins/sched/builtin/builtin.c
@@ -12,7 +12,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -190,23 +190,24 @@ static void _compute_start_times(void)
 				       min_nodes, max_nodes, req_nodes,
 				       SELECT_MODE_WILL_RUN,
 				       preemptee_candidates, NULL);
-		last_job_update = now;
-
-		if (job_ptr->time_limit == INFINITE)
-			time_limit = 365 * 24 * 60 * 60;
-		else if (job_ptr->time_limit != NO_VAL)
-			time_limit = job_ptr->time_limit * 60;
-		else if (job_ptr->part_ptr &&
-			 (job_ptr->part_ptr->max_time != INFINITE))
-			time_limit = job_ptr->part_ptr->max_time * 60;
-		else
-			time_limit = 365 * 24 * 60 * 60;
-		if (bit_overlap(alloc_bitmap, avail_bitmap) &&
-		    (job_ptr->start_time <= last_job_alloc)) {
-			job_ptr->start_time = last_job_alloc;
+		if (rc == SLURM_SUCCESS) {
+			last_job_update = now;
+			if (job_ptr->time_limit == INFINITE)
+				time_limit = 365 * 24 * 60 * 60;
+			else if (job_ptr->time_limit != NO_VAL)
+				time_limit = job_ptr->time_limit * 60;
+			else if (job_ptr->part_ptr &&
+				 (job_ptr->part_ptr->max_time != INFINITE))
+				time_limit = job_ptr->part_ptr->max_time * 60;
+			else
+				time_limit = 365 * 24 * 60 * 60;
+			if (bit_overlap(alloc_bitmap, avail_bitmap) &&
+			    (job_ptr->start_time <= last_job_alloc)) {
+				job_ptr->start_time = last_job_alloc;
+			}
+			bit_or(alloc_bitmap, avail_bitmap);
+			last_job_alloc = job_ptr->start_time + time_limit;
 		}
-		bit_or(alloc_bitmap, avail_bitmap);
-		last_job_alloc = job_ptr->start_time + time_limit;
 		FREE_NULL_BITMAP(avail_bitmap);
 
 		if ((time(NULL) - sched_start) >= sched_timeout) {
diff --git a/src/plugins/sched/builtin/builtin.h b/src/plugins/sched/builtin/builtin.h
index 8682356ad..d08740399 100644
--- a/src/plugins/sched/builtin/builtin.h
+++ b/src/plugins/sched/builtin/builtin.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/builtin/builtin_wrapper.c b/src/plugins/sched/builtin/builtin_wrapper.c
index 2ea60856e..3dfe16659 100644
--- a/src/plugins/sched/builtin/builtin_wrapper.c
+++ b/src/plugins/sched/builtin/builtin_wrapper.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,8 @@
 \*****************************************************************************/
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
diff --git a/src/plugins/sched/hold/Makefile.in b/src/plugins/sched/hold/Makefile.in
index 1f5d3e46a..481e334f9 100644
--- a/src/plugins/sched/hold/Makefile.in
+++ b/src/plugins/sched/hold/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/hold/hold_wrapper.c b/src/plugins/sched/hold/hold_wrapper.c
index 19e7dd4e5..4e83f20bf 100644
--- a/src/plugins/sched/hold/hold_wrapper.c
+++ b/src/plugins/sched/hold/hold_wrapper.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,7 +42,7 @@
 #include <sys/stat.h>
 #include <unistd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
diff --git a/src/plugins/sched/wiki/Makefile.in b/src/plugins/sched/wiki/Makefile.in
index aa24751cb..c702e0878 100644
--- a/src/plugins/sched/wiki/Makefile.in
+++ b/src/plugins/sched/wiki/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -140,7 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -177,6 +182,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -234,6 +240,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -269,6 +276,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/wiki/cancel_job.c b/src/plugins/sched/wiki/cancel_job.c
index 63741d8e2..98c0a510e 100644
--- a/src/plugins/sched/wiki/cancel_job.c
+++ b/src/plugins/sched/wiki/cancel_job.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -105,7 +105,7 @@ static int	_cancel_job(uint32_t jobid, int *err_code, char **err_msg)
 		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
 
 	lock_slurmctld(job_write_lock);
-	slurm_rc = job_signal(jobid, SIGKILL, 0, 0);
+	slurm_rc = job_signal(jobid, SIGKILL, 0, 0, false);
 	if (slurm_rc != SLURM_SUCCESS) {
 		*err_code = -700;
 		*err_msg = slurm_strerror(slurm_rc);
diff --git a/src/plugins/sched/wiki/get_jobs.c b/src/plugins/sched/wiki/get_jobs.c
index 881098c6e..f460f096d 100644
--- a/src/plugins/sched/wiki/get_jobs.c
+++ b/src/plugins/sched/wiki/get_jobs.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -501,7 +501,7 @@ static char *	_get_job_state(struct job_record *job_ptr)
 
 	if (IS_JOB_COMPLETE(job_ptr))
 		return "Completed";
-	else /* JOB_CANCELLED, JOB_FAILED, JOB_TIMEOUT, JOB_NODE_FAIL */
+	else /* JOB_CANCELLED, JOB_FAILED, JOB_TIMEOUT, JOB_NODE_FAIL, etc. */
 		return "Removed";
 }
 
diff --git a/src/plugins/sched/wiki/get_nodes.c b/src/plugins/sched/wiki/get_nodes.c
index 81902b3a2..12c9c20d7 100644
--- a/src/plugins/sched/wiki/get_nodes.c
+++ b/src/plugins/sched/wiki/get_nodes.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,6 +38,7 @@
 \*****************************************************************************/
 
 #include "./msg.h"
+#include "src/common/node_select.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -73,6 +74,28 @@ extern int	get_nodes(char *cmd_ptr, int *err_code, char **err_msg)
 		NO_LOCK, NO_LOCK, READ_LOCK, READ_LOCK };
 	int node_rec_cnt = 0, buf_size = 0;
 
+#ifdef HAVE_CRAY
+	/* Locks: write node */
+	slurmctld_lock_t node_write_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+
+	/*
+	 * Run a Basil Inventory immediately before scheduling, to avoid
+	 * race conditions caused by ALPS node state change (caused e.g.
+	 * by the node health checker).
+	 * This relies on the above write lock for the node state.
+	 */
+	lock_slurmctld(node_write_lock);
+	if (select_g_reconfigure()) {
+		unlock_slurmctld(node_write_lock);
+		*err_code = -720;
+		*err_msg = "Unable to run ALPS inventory";
+		error("wiki: Unable to run ALPS inventory");
+		return -1;
+	}
+	unlock_slurmctld(node_write_lock);
+#endif
+
 	arg_ptr = strstr(cmd_ptr, "ARG=");
 	if (arg_ptr == NULL) {
 		*err_code = -300;
diff --git a/src/plugins/sched/wiki/hostlist.c b/src/plugins/sched/wiki/hostlist.c
index 530073829..a713dfbf9 100644
--- a/src/plugins/sched/wiki/hostlist.c
+++ b/src/plugins/sched/wiki/hostlist.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki/job_modify.c b/src/plugins/sched/wiki/job_modify.c
index 8e719dc8f..abf6914d7 100644
--- a/src/plugins/sched/wiki/job_modify.c
+++ b/src/plugins/sched/wiki/job_modify.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index 17f435bb2..836f21cd4 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,10 +38,10 @@
 \*****************************************************************************/
 
 #include "slurm/slurm.h"
-#include <src/common/uid.h>
-#include <src/slurmctld/locks.h>
-#include <src/plugins/sched/wiki/crypto.h>
-#include <src/plugins/sched/wiki/msg.h>
+#include "src/common/uid.h"
+#include "src/slurmctld/locks.h"
+#include "src/plugins/sched/wiki/crypto.h"
+#include "src/plugins/sched/wiki/msg.h"
 
 #define _DEBUG 0
 
@@ -285,7 +285,7 @@ extern int parse_wiki_config(void)
 
 	debug("Reading wiki.conf file (%s)",wiki_conf);
 	tbl = s_p_hashtbl_create(options);
-	if (s_p_parse_file(tbl, NULL, wiki_conf) == SLURM_ERROR)
+	if (s_p_parse_file(tbl, NULL, wiki_conf, false) == SLURM_ERROR)
 		fatal("something wrong with opening/reading wiki.conf file");
 
 	if (! s_p_get_string(&key, "AuthKey", tbl))
diff --git a/src/plugins/sched/wiki/msg.h b/src/plugins/sched/wiki/msg.h
index 0cbff35c4..b9f329c60 100644
--- a/src/plugins/sched/wiki/msg.h
+++ b/src/plugins/sched/wiki/msg.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,7 +68,8 @@
 #include <unistd.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
diff --git a/src/plugins/sched/wiki/resume_job.c b/src/plugins/sched/wiki/resume_job.c
index b64437913..9eff6fff3 100644
--- a/src/plugins/sched/wiki/resume_job.c
+++ b/src/plugins/sched/wiki/resume_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki/sched_wiki.c b/src/plugins/sched/wiki/sched_wiki.c
index 96a1f2c90..724848731 100644
--- a/src/plugins/sched/wiki/sched_wiki.c
+++ b/src/plugins/sched/wiki/sched_wiki.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,7 +37,8 @@
 \*****************************************************************************/
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
@@ -170,7 +171,7 @@ char *slurm_sched_strerror( int errnum )
 /**************************************************************************/
 void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason )
 {
-	/* Empty. */
+	job_ptr->priority = 0;
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c
index 3802abe49..b3f9ab165 100644
--- a/src/plugins/sched/wiki/start_job.c
+++ b/src/plugins/sched/wiki/start_job.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki/suspend_job.c b/src/plugins/sched/wiki/suspend_job.c
index 879b5eb09..e1ef9f64c 100644
--- a/src/plugins/sched/wiki/suspend_job.c
+++ b/src/plugins/sched/wiki/suspend_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/Makefile.in b/src/plugins/sched/wiki2/Makefile.in
index afedcc449..695d6d3df 100644
--- a/src/plugins/sched/wiki2/Makefile.in
+++ b/src/plugins/sched/wiki2/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -143,7 +145,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -180,6 +185,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -237,6 +243,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -272,6 +279,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/sched/wiki2/cancel_job.c b/src/plugins/sched/wiki2/cancel_job.c
index 95ff9e104..b3ef339b3 100644
--- a/src/plugins/sched/wiki2/cancel_job.c
+++ b/src/plugins/sched/wiki2/cancel_job.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -171,7 +171,7 @@ static int	_cancel_job(uint32_t jobid, char *comment_ptr,
 		job_ptr->comment = xstrdup(comment_ptr);
 	}
 
-	slurm_rc = job_signal(jobid, SIGKILL, 0, 0);
+	slurm_rc = job_signal(jobid, SIGKILL, 0, 0, false);
 	if (slurm_rc != SLURM_SUCCESS) {
 		*err_code = -700;
 		*err_msg = slurm_strerror(slurm_rc);
diff --git a/src/plugins/sched/wiki2/event.c b/src/plugins/sched/wiki2/event.c
index 3d0dc09c2..a40e725c9 100644
--- a/src/plugins/sched/wiki2/event.c
+++ b/src/plugins/sched/wiki2/event.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c
index ef1f24653..b3a362380 100644
--- a/src/plugins/sched/wiki2/get_jobs.c
+++ b/src/plugins/sched/wiki2/get_jobs.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -645,7 +645,7 @@ static char *	_get_job_state(struct job_record *job_ptr)
 
 	if (IS_JOB_COMPLETE(job_ptr) || IS_JOB_FAILED(job_ptr))
 		state_str = "Completed";
-	else /* JOB_CANCELLED, JOB_TIMEOUT, JOB_NODE_FAIL */
+	else /* JOB_CANCELLED, JOB_TIMEOUT, JOB_NODE_FAIL, etc. */
 		state_str = "Removed";
 	snprintf(return_msg, sizeof(return_msg), "%s;EXITCODE=%u",
 		state_str, WEXITSTATUS(job_ptr->exit_code));
@@ -703,6 +703,7 @@ extern void wiki_job_requeue(struct job_record *job_ptr, char *reason)
 {
 	int empty = -1, i;
 
+	job_ptr->priority = 0;
 	for (i=0; i<REJECT_MSG_MAX; i++) {
 		if ((reject_msgs[i].job_id == 0) && (empty == -1)) {
 			empty = i;
diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c
index d1648226b..d31ce43a0 100644
--- a/src/plugins/sched/wiki2/get_nodes.c
+++ b/src/plugins/sched/wiki2/get_nodes.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,7 @@
 
 #include "./msg.h"
 #include "src/common/hostlist.h"
+#include "src/common/node_select.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -85,6 +86,28 @@ extern int	get_nodes(char *cmd_ptr, int *err_code, char **err_msg)
 		NO_LOCK, NO_LOCK, READ_LOCK, READ_LOCK };
 	int node_rec_cnt = 0, buf_size = 0;
 
+#ifdef HAVE_CRAY
+	/* Locks: write node */
+	slurmctld_lock_t node_write_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+
+	/*
+	 * Run a Basil Inventory immediately before scheduling, to avoid
+	 * race conditions caused by ALPS node state change (caused e.g.
+	 * by the node health checker).
+	 * This relies on the above write lock for the node state.
+	 */
+	lock_slurmctld(node_write_lock);
+	if (select_g_reconfigure()) {
+		unlock_slurmctld(node_write_lock);
+		*err_code = -720;
+		*err_msg = "Unable to run ALPS inventory";
+		error("wiki: Unable to run ALPS inventory");
+		return -1;
+	}
+	unlock_slurmctld(node_write_lock);
+#endif
+
 	arg_ptr = strstr(cmd_ptr, "ARG=");
 	if (arg_ptr == NULL) {
 		*err_code = -300;
diff --git a/src/plugins/sched/wiki2/hostlist.c b/src/plugins/sched/wiki2/hostlist.c
index 06821bd98..273cc591f 100644
--- a/src/plugins/sched/wiki2/hostlist.c
+++ b/src/plugins/sched/wiki2/hostlist.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/initialize.c b/src/plugins/sched/wiki2/initialize.c
index a89c2151d..49e01c175 100644
--- a/src/plugins/sched/wiki2/initialize.c
+++ b/src/plugins/sched/wiki2/initialize.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/job_add_task.c b/src/plugins/sched/wiki2/job_add_task.c
index 956c12f00..85710fbf7 100644
--- a/src/plugins/sched/wiki2/job_add_task.c
+++ b/src/plugins/sched/wiki2/job_add_task.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index 4c29b9880..2924189cb 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/job_notify.c b/src/plugins/sched/wiki2/job_notify.c
index 09b819cc9..6cd73ecde 100644
--- a/src/plugins/sched/wiki2/job_notify.c
+++ b/src/plugins/sched/wiki2/job_notify.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/job_release_task.c b/src/plugins/sched/wiki2/job_release_task.c
index e5f7fe78a..288c414ce 100644
--- a/src/plugins/sched/wiki2/job_release_task.c
+++ b/src/plugins/sched/wiki2/job_release_task.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/job_requeue.c b/src/plugins/sched/wiki2/job_requeue.c
index a9db26aad..b601b0cdb 100644
--- a/src/plugins/sched/wiki2/job_requeue.c
+++ b/src/plugins/sched/wiki2/job_requeue.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,7 +68,7 @@ extern int	job_requeue_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 	}
 
 	lock_slurmctld(job_write_lock);
-	slurm_rc = job_requeue(0, jobid, -1, (uint16_t)NO_VAL);
+	slurm_rc = job_requeue(0, jobid, -1, (uint16_t)NO_VAL, false);
 	if (slurm_rc != SLURM_SUCCESS) {
 		unlock_slurmctld(job_write_lock);
 		*err_code = -700;
@@ -85,7 +85,6 @@ extern int	job_requeue_wiki(char *cmd_ptr, int *err_code, char **err_msg)
 		xfree(job_ptr->details->req_nodes);
 		FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap);
 	}
-	job_ptr->priority = 0;
 	info("wiki: requeued job %u", jobid);
 	unlock_slurmctld(job_write_lock);
 	snprintf(reply_msg, sizeof(reply_msg),
diff --git a/src/plugins/sched/wiki2/job_signal.c b/src/plugins/sched/wiki2/job_signal.c
index 958610369..db294f7c9 100644
--- a/src/plugins/sched/wiki2/job_signal.c
+++ b/src/plugins/sched/wiki2/job_signal.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -100,9 +100,9 @@ static int	_job_signal(uint32_t jobid, uint16_t sig_num)
 		return ESLURM_ALREADY_DONE;
 
 	if (job_ptr->batch_flag)
-		rc = job_signal(jobid, sig_num, 1, 0);
+		rc = job_signal(jobid, sig_num, 1, 0, false);
 	if (rc == SLURM_SUCCESS)
-		rc = job_signal(jobid, sig_num, 0, 0);
+		rc = job_signal(jobid, sig_num, 0, 0, false);
 	return rc;
 }
 
diff --git a/src/plugins/sched/wiki2/job_will_run.c b/src/plugins/sched/wiki2/job_will_run.c
index cb858a90d..48e3fbf1e 100644
--- a/src/plugins/sched/wiki2/job_will_run.c
+++ b/src/plugins/sched/wiki2/job_will_run.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index efe23ac2a..3b34af0d7 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,10 +38,10 @@
 \*****************************************************************************/
 
 #include "slurm/slurm.h"
-#include <src/common/uid.h>
-#include <src/slurmctld/locks.h>
-#include <src/plugins/sched/wiki2/crypto.h>
-#include <src/plugins/sched/wiki2/msg.h>
+#include "src/common/uid.h"
+#include "src/slurmctld/locks.h"
+#include "src/plugins/sched/wiki2/crypto.h"
+#include "src/plugins/sched/wiki2/msg.h"
 #include <sys/poll.h>
 
 #define _DEBUG 0
@@ -292,7 +292,7 @@ extern int parse_wiki_config(void)
 
 	debug("Reading wiki.conf file (%s)",wiki_conf);
 	tbl = s_p_hashtbl_create(options);
-	if (s_p_parse_file(tbl, NULL, wiki_conf) == SLURM_ERROR)
+	if (s_p_parse_file(tbl, NULL, wiki_conf, false) == SLURM_ERROR)
 		fatal("something wrong with opening/reading wiki.conf file");
 
 	if (! s_p_get_string(&key, "AuthKey", tbl))
diff --git a/src/plugins/sched/wiki2/msg.h b/src/plugins/sched/wiki2/msg.h
index bade52ae4..75fa6dbae 100644
--- a/src/plugins/sched/wiki2/msg.h
+++ b/src/plugins/sched/wiki2/msg.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,7 +68,8 @@
 #include <unistd.h>
 #include <sys/stat.h>
 #include <sys/types.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
diff --git a/src/plugins/sched/wiki2/resume_job.c b/src/plugins/sched/wiki2/resume_job.c
index b64437913..9eff6fff3 100644
--- a/src/plugins/sched/wiki2/resume_job.c
+++ b/src/plugins/sched/wiki2/resume_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/sched_wiki.c b/src/plugins/sched/wiki2/sched_wiki.c
index 229f66c80..b1cf9348e 100644
--- a/src/plugins/sched/wiki2/sched_wiki.c
+++ b/src/plugins/sched/wiki2/sched_wiki.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -174,7 +174,7 @@ char *slurm_sched_strerror( int errnum )
 /**************************************************************************/
 void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason )
 {
-        wiki_job_requeue(job_ptr, reason);
+ 	wiki_job_requeue(job_ptr, reason);
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c
index 5f7fc3b97..9d6393a64 100644
--- a/src/plugins/sched/wiki2/start_job.c
+++ b/src/plugins/sched/wiki2/start_job.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/sched/wiki2/suspend_job.c b/src/plugins/sched/wiki2/suspend_job.c
index 879b5eb09..e1ef9f64c 100644
--- a/src/plugins/sched/wiki2/suspend_job.c
+++ b/src/plugins/sched/wiki2/suspend_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/select/Makefile.am b/src/plugins/select/Makefile.am
index 7757d670f..dbcc83bb8 100644
--- a/src/plugins/select/Makefile.am
+++ b/src/plugins/select/Makefile.am
@@ -1,6 +1,10 @@
 # Makefile for node selection plugins
 
-SUBDIRS = bgq bluegene cons_res cray linear
+if WITH_CXX
+BLUEGENE = bluegene
+endif
+
+SUBDIRS = $(BLUEGENE) cons_res cray linear
 
 # Each plugin here needs a plugin_id, here are the currect plug_ids
 # for each plugin.
@@ -10,4 +14,4 @@ SUBDIRS = bgq bluegene cons_res cray linear
 # linear	  = 102
 # bgq		  = 103
 # cray + linear   = 104
-# cray + cons_res = 105 ==> To be done, currently not possible on Cray.
+# cray + cons_res = 105 ==> Not current possible on Cray with ALPS/BASIL.
diff --git a/src/plugins/select/Makefile.in b/src/plugins/select/Makefile.in
index 08ca4aa48..71c4ba5d5 100644
--- a/src/plugins/select/Makefile.in
+++ b/src/plugins/select/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -96,7 +98,7 @@ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
 	distdir
 ETAGS = etags
 CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
+DIST_SUBDIRS = bluegene cons_res cray linear
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
 am__relativize = \
   dir0=`pwd`; \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,7 +322,8 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = bgq bluegene cons_res cray linear
+@WITH_CXX_TRUE@BLUEGENE = bluegene
+SUBDIRS = $(BLUEGENE) cons_res cray linear
 all: all-recursive
 
 .SUFFIXES:
@@ -670,7 +679,7 @@ uninstall-am:
 # linear	  = 102
 # bgq		  = 103
 # cray + linear   = 104
-# cray + cons_res = 105 ==> To be done, currently not possible on Cray.
+# cray + cons_res = 105 ==> Not current possible on Cray with ALPS/BASIL.
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/src/plugins/select/bgq/Makefile.am b/src/plugins/select/bgq/Makefile.am
deleted file mode 100644
index 75edd2423..000000000
--- a/src/plugins/select/bgq/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-# Makefile for select/bgq plugin
-
-CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
-AUTOMAKE_OPTIONS = foreign
-
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
-
-pkglib_LTLIBRARIES = select_bgq.la
-
-# Linear node selection plugin.
-select_bgq_la_SOURCES = select_bgq.cc bgq.h
-select_bgq_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
diff --git a/src/plugins/select/bgq/select_bgq.cc b/src/plugins/select/bgq/select_bgq.cc
deleted file mode 100644
index d017bc011..000000000
--- a/src/plugins/select/bgq/select_bgq.cc
+++ /dev/null
@@ -1,427 +0,0 @@
-/*****************************************************************************\
- *  select_bgq.cc - node selection plugin for Blue Gene/Q system.
- *****************************************************************************
- *  Copyright (C) 2010 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-
-#include "bgq.h"
-
-#define HUGE_BUF_SIZE (1024*16)
-#define NOT_FROM_CONTROLLER -2
-
-/* These are defined here so when we link with something other than
- * the slurmctld we will have these symbols defined.  They will get
- * overwritten when linking with the slurmctld.
- */
-#if defined (__APPLE__)
-slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
-struct node_record *node_record_table_ptr  __attribute__((weak_import)) = NULL;
-int bg_recover __attribute__((weak_import)) = NOT_FROM_CONTROLLER;
-List part_list  __attribute__((weak_import)) = NULL;
-int node_record_count __attribute__((weak_import));
-time_t last_node_update __attribute__((weak_import));
-time_t last_job_update __attribute__((weak_import));
-char *alpha_num  __attribute__((weak_import)) =
-	"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-void *acct_db_conn  __attribute__((weak_import)) = NULL;
-char *slurmctld_cluster_name  __attribute__((weak_import)) = NULL;
-slurmdb_cluster_rec_t *working_cluster_rec  __attribute__((weak_import)) = NULL;
-#else
-slurm_ctl_conf_t slurmctld_conf;
-struct node_record *node_record_table_ptr = NULL;
-int bg_recover = NOT_FROM_CONTROLLER;
-List part_list = NULL;
-int node_record_count;
-time_t last_node_update;
-time_t last_job_update;
-char *alpha_num = (char *)"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-void *acct_db_conn = NULL;
-char *slurmctld_cluster_name = NULL;
-slurmdb_cluster_rec_t *working_cluster_rec = NULL;
-#endif
-
-/* In C++ const objects have internal linkage by default so we have to
- * do an extra extern here to make them export correctly.
- */
-extern const char plugin_name[]       	= "BG/Q node selection plugin";
-extern const char plugin_type[]       	= "select/bgq";
-extern const uint32_t plugin_id     	= 103;
-extern const uint32_t plugin_version	= 100;
-
-extern "C" {
-
-/*
- * init() is called when the plugin is loaded, before any other functions
- * are called.  Put global initialization here.
- */
-extern int init ( void )
-{
-
-#ifdef HAVE_BGQ
-	if(bg_recover != NOT_FROM_CONTROLLER) {
-#if (SYSTEM_DIMENSIONS != 4)
-		fatal("SYSTEM_DIMENSIONS value (%d) invalid for BGQ",
-		      SYSTEM_DIMENSIONS);
-#endif
-
-		verbose("%s loading...", plugin_name);
-		/* if this is coming from something other than the controller
-		   we don't want to read the config or anything like that. */
-	}
-	verbose("%s loaded", plugin_name);
-#else
-	if (bg_recover != NOT_FROM_CONTROLLER)
-		fatal("select/bgq is incompatible with a "
-		      "non BlueGene/Q system");
-#endif
-	return SLURM_SUCCESS;
-}
-
-extern int fini ( void )
-{
-	int rc = SLURM_SUCCESS;
-
-	return rc;
-}
-
-/*
- * The remainder of this file implements the standard SLURM
- * node selection API.
- */
-
-/* We rely upon DB2 to save and restore BlueGene state */
-extern int select_p_state_save(char *dir_name)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_state_restore(char *dir_name)
-{
-#ifdef HAVE_BGQ
-	debug("bgq: select_p_state_restore");
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/* Sync BG blocks to currently active jobs */
-extern int select_p_job_init(List job_list)
-{
-#ifdef HAVE_BGQ
-	int rc = SLURM_SUCCESS;
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/* All initialization is performed by init() */
-extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/*
- * Called by slurmctld when a new configuration file is loaded
- * or scontrol is used to change block configuration
- */
- extern int select_p_block_init(List part_list)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-
-/*
- * select_p_job_test - Given a specification of scheduling requirements,
- *	identify the nodes which "best" satify the request. The specified
- *	nodes may be DOWN or BUSY at the time of this test as may be used
- *	to deterime if a job could ever run.
- * IN/OUT job_ptr - pointer to job being scheduled start_time is set
- *	when we can possibly start job.
- * IN/OUT bitmap - usable nodes are set on input, nodes not required to
- *	satisfy the request are cleared, other left set
- * IN min_nodes - minimum count of nodes
- * IN max_nodes - maximum count of nodes (0==don't care)
- * IN req_nodes - requested (or desired) count of nodes
- * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now
- *           SELECT_MODE_TEST_ONLY: test if job can ever run
- *           SELECT_MODE_WILL_RUN: determine when and where job can run
- * IN preemptee_candidates - List of pointers to jobs which can be preempted.
- * IN/OUT preemptee_job_list - Pointer to list of job pointers. These are the
- *		jobs to be preempted to initiate the pending job. Not set
- *		if mode=SELECT_MODE_TEST_ONLY or input pointer is NULL.
- * RET zero on success, EINVAL otherwise
- * NOTE: bitmap must be a superset of req_nodes at the time that
- *	select_p_job_test is called
- */
-extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			     uint32_t min_nodes, uint32_t max_nodes,
-			     uint32_t req_nodes, uint16_t mode,
-			     List preemptee_candidates,
-			     List *preemptee_job_list)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_begin(struct job_record *job_ptr)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_ready(struct job_record *job_ptr)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_resized(struct job_record *job_ptr,
-				struct node_record *node_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_job_fini(struct job_record *job_ptr)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_suspend(struct job_record *job_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_job_resume(struct job_record *job_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_pack_select_info(time_t last_query_time,
-				     uint16_t show_flags, Buf *buffer_ptr,
-				     uint16_t protocol_version)
-{
-#ifdef HAVE_BGQ
-	return ESLURM_NOT_SUPPORTED;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_select_nodeinfo_pack(select_nodeinfo_t *nodeinfo,
-					 Buf buffer,
-					 uint16_t protocol_version)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
-					   Buf buffer,
-					   uint16_t protocol_version)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_nodeinfo_set_all(time_t last_query_time)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_nodeinfo_set(struct job_record *job_ptr)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
-					enum select_nodedata_type dinfo,
-					enum node_states state,
-					void *data)
-{
-	return SLURM_SUCCESS;
-}
-
-select_jobinfo_t *select_p_select_jobinfo_alloc(void)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
-				       enum select_jobdata_type data_type,
-				       void *data)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_jobinfo_get (select_jobinfo_t *jobinfo,
-				 enum select_jobdata_type data_type, void *data)
-{
-	return SLURM_SUCCESS;
-}
-
-extern select_jobinfo_t *select_p_select_jobinfo_copy(select_jobinfo_t *jobinfo)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_jobinfo_free  (select_jobinfo_t *jobinfo)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int  select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
-					 uint16_t protocol_version)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int  select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo,
-					   Buf buffer,
-					   uint16_t protocol_version)
-{
-	return SLURM_SUCCESS;
-}
-
-extern char *select_p_select_jobinfo_sprint(select_jobinfo_t *jobinfo,
-				     char *buf, size_t size, int mode)
-{
-	return SLURM_SUCCESS;
-}
-
-extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
-					     int mode)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_update_block (update_block_msg_t *block_desc_ptr)
-{
-#ifdef HAVE_BGQ
-	int rc = SLURM_SUCCESS;
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_update_sub_node (update_block_msg_t *block_desc_ptr)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_get_info_from_plugin (enum select_plugindata_info dinfo,
-					  struct job_record *job_ptr,
-					  void *data)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_update_node_config (int index)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_update_node_state (int index, uint16_t state)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#endif
-	return SLURM_ERROR;
-}
-
-extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_reconfigure(void)
-{
-#ifdef HAVE_BGQ
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-}
diff --git a/src/plugins/select/bluegene/Makefile.am b/src/plugins/select/bluegene/Makefile.am
index e4963a75d..6f564f52b 100644
--- a/src/plugins/select/bluegene/Makefile.am
+++ b/src/plugins/select/bluegene/Makefile.am
@@ -1,3 +1,88 @@
 # Makefile for select/bluegene plugin
 
-SUBDIRS = block_allocator plugin
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
+
+INCLUDES = -I$(top_srcdir)  -I$(top_srcdir)/src/common $(BG_INCLUDES)
+
+pkglib_LTLIBRARIES = select_bluegene.la
+
+noinst_LTLIBRARIES = libba_common.la libconfigure_api.la
+
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+
+# These are needed for pack/unpack of structures for cross-cluster stuff
+select_bluegene_la_SOURCES = select_bluegene.c \
+			bg_job_info.c bg_job_info.h \
+			bg_node_info.c bg_node_info.h
+
+libba_common_la_SOURCES = ba_common.c ba_common.h
+
+libconfigure_api_la_SOURCES = configure_api.c
+
+select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+select_bluegene_la_LIBADD = libba_common.la
+
+if BLUEGENE_LOADED
+
+SUBDIRS = sfree
+
+if BG_L_P_LOADED
+SUBDIRS += bl ba
+
+if BGL_LOADED
+pkglib_LTLIBRARIES += libsched_if64.la
+libsched_if64_la_SOURCES = libsched_if64.c
+libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+else
+pkglib_LTLIBRARIES += libsched_if.la
+libsched_if_la_SOURCES = libsched_if64.c
+libsched_if_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+endif
+
+block_allocator_lib  = ba/libblock_allocator.la
+endif
+
+if BGQ_LOADED
+  SUBDIRS += bl_bgq ba_bgq
+
+  block_allocator_lib = ba_bgq/libblock_allocator.la
+
+  # force link with g++
+  nodist_EXTRA_select_bluegene_la_SOURCES = dummy.cxx
+
+  pkglib_LTLIBRARIES += runjob_plugin.la
+  runjob_plugin_la_SOURCES = runjob_plugin.cc
+  runjob_plugin_la_LDFLAGS = -export-dynamic $(SO_LDFLAGS) $(PLUGIN_FLAGS) \
+	$(convenience_libs) -lpthread
+
+endif
+
+select_bluegene_la_SOURCES += bg_core.c bg_defined_block.c \
+			bg_dynamic_block.c bg_job_place.c \
+			bg_job_run.c bg_list_functions.c \
+			bg_read_config.c bg_record_functions.c \
+			bg_status.c
+
+select_bluegene_la_LIBADD += $(block_allocator_lib) $(BG_LDFLAGS)
+
+
+sbin_PROGRAMS = slurm_prolog slurm_epilog
+
+slurm_prolog_LDADD = $(convenience_libs)
+slurm_prolog_SOURCES = slurm_prolog.c
+slurm_prolog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+
+slurm_epilog_LDADD = $(convenience_libs)
+slurm_epilog_SOURCES = slurm_epilog.c
+slurm_epilog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+
+force:
+$(block_allocator_lib) $(convenience_libs) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+endif
diff --git a/src/plugins/select/bluegene/Makefile.in b/src/plugins/select/bluegene/Makefile.in
index 160f0f6aa..e8b858546 100644
--- a/src/plugins/select/bluegene/Makefile.in
+++ b/src/plugins/select/bluegene/Makefile.in
@@ -16,6 +16,8 @@
 @SET_MAKE@
 
 # Makefile for select/bluegene plugin
+
+
 VPATH = @srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
 pkgincludedir = $(includedir)/@PACKAGE@
@@ -36,6 +38,20 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am__append_1 = bl ba
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am__append_2 = libsched_if64.la
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am__append_3 = libsched_if.la
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am__append_4 = bl_bgq ba_bgq
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am__append_5 = runjob_plugin.la
+@BLUEGENE_LOADED_TRUE@am__append_6 = bg_core.c bg_defined_block.c \
+@BLUEGENE_LOADED_TRUE@			bg_dynamic_block.c bg_job_place.c \
+@BLUEGENE_LOADED_TRUE@			bg_job_run.c bg_list_functions.c \
+@BLUEGENE_LOADED_TRUE@			bg_read_config.c bg_record_functions.c \
+@BLUEGENE_LOADED_TRUE@			bg_status.c
+
+@BLUEGENE_LOADED_TRUE@am__append_7 = $(block_allocator_lib) $(BG_LDFLAGS)
+@BLUEGENE_LOADED_TRUE@sbin_PROGRAMS = slurm_prolog$(EXEEXT) \
+@BLUEGENE_LOADED_TRUE@	slurm_epilog$(EXEEXT)
 subdir = src/plugins/select/bluegene
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -62,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +89,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -80,8 +98,139 @@ mkinstalldirs = $(install_sh) -d
 CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
-SOURCES =
-DIST_SOURCES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)"
+LTLIBRARIES = $(noinst_LTLIBRARIES) $(pkglib_LTLIBRARIES)
+libba_common_la_LIBADD =
+am_libba_common_la_OBJECTS = ba_common.lo
+libba_common_la_OBJECTS = $(am_libba_common_la_OBJECTS)
+libconfigure_api_la_LIBADD =
+am_libconfigure_api_la_OBJECTS = configure_api.lo
+libconfigure_api_la_OBJECTS = $(am_libconfigure_api_la_OBJECTS)
+libsched_if_la_LIBADD =
+am__libsched_if_la_SOURCES_DIST = libsched_if64.c
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if_la_OBJECTS = libsched_if64.lo
+libsched_if_la_OBJECTS = $(am_libsched_if_la_OBJECTS)
+libsched_if_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(libsched_if_la_LDFLAGS) $(LDFLAGS) -o $@
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if_la_rpath = -rpath \
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	$(pkglibdir)
+libsched_if64_la_LIBADD =
+am__libsched_if64_la_SOURCES_DIST = libsched_if64.c
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if64_la_OBJECTS = libsched_if64.lo
+libsched_if64_la_OBJECTS = $(am_libsched_if64_la_OBJECTS)
+libsched_if64_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(libsched_if64_la_LDFLAGS) $(LDFLAGS) -o $@
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if64_la_rpath = -rpath \
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	$(pkglibdir)
+runjob_plugin_la_LIBADD =
+am__runjob_plugin_la_SOURCES_DIST = runjob_plugin.cc
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_runjob_plugin_la_OBJECTS =  \
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	runjob_plugin.lo
+runjob_plugin_la_OBJECTS = $(am_runjob_plugin_la_OBJECTS)
+runjob_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+	$(CXXFLAGS) $(runjob_plugin_la_LDFLAGS) $(LDFLAGS) -o $@
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_runjob_plugin_la_rpath =  \
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	-rpath $(pkglibdir)
+am__DEPENDENCIES_1 =
+@BLUEGENE_LOADED_TRUE@am__DEPENDENCIES_2 = $(block_allocator_lib) \
+@BLUEGENE_LOADED_TRUE@	$(am__DEPENDENCIES_1)
+select_bluegene_la_DEPENDENCIES = libba_common.la \
+	$(am__DEPENDENCIES_2)
+am__select_bluegene_la_SOURCES_DIST = select_bluegene.c bg_job_info.c \
+	bg_job_info.h bg_node_info.c bg_node_info.h bg_core.c \
+	bg_defined_block.c bg_dynamic_block.c bg_job_place.c \
+	bg_job_run.c bg_list_functions.c bg_read_config.c \
+	bg_record_functions.c bg_status.c
+@BLUEGENE_LOADED_TRUE@am__objects_1 = bg_core.lo bg_defined_block.lo \
+@BLUEGENE_LOADED_TRUE@	bg_dynamic_block.lo bg_job_place.lo \
+@BLUEGENE_LOADED_TRUE@	bg_job_run.lo bg_list_functions.lo \
+@BLUEGENE_LOADED_TRUE@	bg_read_config.lo bg_record_functions.lo \
+@BLUEGENE_LOADED_TRUE@	bg_status.lo
+am_select_bluegene_la_OBJECTS = select_bluegene.lo bg_job_info.lo \
+	bg_node_info.lo $(am__objects_1)
+select_bluegene_la_OBJECTS = $(am_select_bluegene_la_OBJECTS)
+select_bluegene_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+	$(CXXFLAGS) $(select_bluegene_la_LDFLAGS) $(LDFLAGS) -o $@
+PROGRAMS = $(sbin_PROGRAMS)
+am__slurm_epilog_SOURCES_DIST = slurm_epilog.c
+@BLUEGENE_LOADED_TRUE@am_slurm_epilog_OBJECTS =  \
+@BLUEGENE_LOADED_TRUE@	slurm_epilog.$(OBJEXT)
+slurm_epilog_OBJECTS = $(am_slurm_epilog_OBJECTS)
+am__DEPENDENCIES_3 = $(top_builddir)/src/api/libslurm.o
+@BLUEGENE_LOADED_TRUE@slurm_epilog_DEPENDENCIES =  \
+@BLUEGENE_LOADED_TRUE@	$(am__DEPENDENCIES_3)
+slurm_epilog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(slurm_epilog_LDFLAGS) $(LDFLAGS) -o $@
+am__slurm_prolog_SOURCES_DIST = slurm_prolog.c
+@BLUEGENE_LOADED_TRUE@am_slurm_prolog_OBJECTS =  \
+@BLUEGENE_LOADED_TRUE@	slurm_prolog.$(OBJEXT)
+slurm_prolog_OBJECTS = $(am_slurm_prolog_OBJECTS)
+@BLUEGENE_LOADED_TRUE@slurm_prolog_DEPENDENCIES =  \
+@BLUEGENE_LOADED_TRUE@	$(am__DEPENDENCIES_3)
+slurm_prolog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(slurm_prolog_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libba_common_la_SOURCES) $(libconfigure_api_la_SOURCES) \
+	$(libsched_if_la_SOURCES) $(libsched_if64_la_SOURCES) \
+	$(runjob_plugin_la_SOURCES) $(select_bluegene_la_SOURCES) \
+	$(nodist_EXTRA_select_bluegene_la_SOURCES) \
+	$(slurm_epilog_SOURCES) $(slurm_prolog_SOURCES)
+DIST_SOURCES = $(libba_common_la_SOURCES) \
+	$(libconfigure_api_la_SOURCES) \
+	$(am__libsched_if_la_SOURCES_DIST) \
+	$(am__libsched_if64_la_SOURCES_DIST) \
+	$(am__runjob_plugin_la_SOURCES_DIST) \
+	$(am__select_bluegene_la_SOURCES_DIST) \
+	$(am__slurm_epilog_SOURCES_DIST) \
+	$(am__slurm_prolog_SOURCES_DIST)
 RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
 	html-recursive info-recursive install-data-recursive \
 	install-dvi-recursive install-exec-recursive \
@@ -96,7 +245,7 @@ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
 	distdir
 ETAGS = etags
 CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
+DIST_SUBDIRS = sfree bl ba bl_bgq ba_bgq
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
 am__relativize = \
   dir0=`pwd`; \
@@ -133,7 +282,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -144,7 +296,7 @@ CCDEPMODE = @CCDEPMODE@
 CFLAGS = @CFLAGS@
 CMD_LDFLAGS = @CMD_LDFLAGS@
 CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
+CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
 CXX = @CXX@
 CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
@@ -170,6 +322,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +380,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +416,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,10 +469,44 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = block_allocator plugin
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
+INCLUDES = -I$(top_srcdir)  -I$(top_srcdir)/src/common $(BG_INCLUDES)
+pkglib_LTLIBRARIES = select_bluegene.la $(am__append_2) \
+	$(am__append_3) $(am__append_5)
+noinst_LTLIBRARIES = libba_common.la libconfigure_api.la
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+
+# These are needed for pack/unpack of structures for cross-cluster stuff
+select_bluegene_la_SOURCES = select_bluegene.c bg_job_info.c \
+	bg_job_info.h bg_node_info.c bg_node_info.h $(am__append_6)
+libba_common_la_SOURCES = ba_common.c ba_common.h
+libconfigure_api_la_SOURCES = configure_api.c
+select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+select_bluegene_la_LIBADD = libba_common.la $(am__append_7)
+@BLUEGENE_LOADED_TRUE@SUBDIRS = sfree $(am__append_1) $(am__append_4)
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if64_la_SOURCES = libsched_if64.c
+@BGL_LOADED_TRUE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if_la_SOURCES = libsched_if64.c
+@BGL_LOADED_FALSE@@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@block_allocator_lib = ba_bgq/libblock_allocator.la
+@BG_L_P_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@block_allocator_lib = ba/libblock_allocator.la
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@nodist_EXTRA_select_bluegene_la_SOURCES = dummy.cxx
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@runjob_plugin_la_SOURCES = runjob_plugin.cc
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@runjob_plugin_la_LDFLAGS = -export-dynamic $(SO_LDFLAGS) $(PLUGIN_FLAGS) \
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	$(convenience_libs) -lpthread
+
+@BLUEGENE_LOADED_TRUE@slurm_prolog_LDADD = $(convenience_libs)
+@BLUEGENE_LOADED_TRUE@slurm_prolog_SOURCES = slurm_prolog.c
+@BLUEGENE_LOADED_TRUE@slurm_prolog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+@BLUEGENE_LOADED_TRUE@slurm_epilog_LDADD = $(convenience_libs)
+@BLUEGENE_LOADED_TRUE@slurm_epilog_SOURCES = slurm_epilog.c
+@BLUEGENE_LOADED_TRUE@slurm_epilog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 all: all-recursive
 
 .SUFFIXES:
+.SUFFIXES: .c .cc .cxx .lo .o .obj
 $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
 	@for dep in $?; do \
 	  case '$(am__configure_deps)' in \
@@ -327,9 +516,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/select/bluegene/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --gnu src/plugins/select/bluegene/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -349,6 +538,196 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
 	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
 $(am__aclocal_m4_deps):
 
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libba_common.la: $(libba_common_la_OBJECTS) $(libba_common_la_DEPENDENCIES) 
+	$(LINK)  $(libba_common_la_OBJECTS) $(libba_common_la_LIBADD) $(LIBS)
+libconfigure_api.la: $(libconfigure_api_la_OBJECTS) $(libconfigure_api_la_DEPENDENCIES) 
+	$(LINK)  $(libconfigure_api_la_OBJECTS) $(libconfigure_api_la_LIBADD) $(LIBS)
+libsched_if.la: $(libsched_if_la_OBJECTS) $(libsched_if_la_DEPENDENCIES) 
+	$(libsched_if_la_LINK) $(am_libsched_if_la_rpath) $(libsched_if_la_OBJECTS) $(libsched_if_la_LIBADD) $(LIBS)
+libsched_if64.la: $(libsched_if64_la_OBJECTS) $(libsched_if64_la_DEPENDENCIES) 
+	$(libsched_if64_la_LINK) $(am_libsched_if64_la_rpath) $(libsched_if64_la_OBJECTS) $(libsched_if64_la_LIBADD) $(LIBS)
+runjob_plugin.la: $(runjob_plugin_la_OBJECTS) $(runjob_plugin_la_DEPENDENCIES) 
+	$(runjob_plugin_la_LINK) $(am_runjob_plugin_la_rpath) $(runjob_plugin_la_OBJECTS) $(runjob_plugin_la_LIBADD) $(LIBS)
+select_bluegene.la: $(select_bluegene_la_OBJECTS) $(select_bluegene_la_DEPENDENCIES) 
+	$(select_bluegene_la_LINK) -rpath $(pkglibdir) $(select_bluegene_la_OBJECTS) $(select_bluegene_la_LIBADD) $(LIBS)
+install-sbinPROGRAMS: $(sbin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
+	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+	for p in $$list; do echo "$$p $$p"; done | \
+	sed 's/$(EXEEXT)$$//' | \
+	while read p p1; do if test -f $$p || test -f $$p1; \
+	  then echo "$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+	sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
+	    else { print "f", $$3 "/" $$4, $$1; } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	    test -z "$$files" || { \
+	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \
+	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \
+	    } \
+	; done
+
+uninstall-sbinPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+	      -e 's/$$/$(EXEEXT)/' `; \
+	test -n "$$list" || exit 0; \
+	echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
+	cd "$(DESTDIR)$(sbindir)" && rm -f $$files
+
+clean-sbinPROGRAMS:
+	@list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+slurm_epilog$(EXEEXT): $(slurm_epilog_OBJECTS) $(slurm_epilog_DEPENDENCIES) 
+	@rm -f slurm_epilog$(EXEEXT)
+	$(slurm_epilog_LINK) $(slurm_epilog_OBJECTS) $(slurm_epilog_LDADD) $(LIBS)
+slurm_prolog$(EXEEXT): $(slurm_prolog_OBJECTS) $(slurm_prolog_DEPENDENCIES) 
+	@rm -f slurm_prolog$(EXEEXT)
+	$(slurm_prolog_LINK) $(slurm_prolog_OBJECTS) $(slurm_prolog_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ba_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_core.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_defined_block.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_dynamic_block.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_info.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_place.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_run.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_list_functions.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_node_info.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_read_config.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_record_functions.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_status.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/configure_api.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dummy.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsched_if64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/runjob_plugin.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_bluegene.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_epilog.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_prolog.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+.cc.o:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ $<
+
+.cc.obj:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cc.lo:
+@am__fastdepCXX_TRUE@	$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(LTCXXCOMPILE) -c -o $@ $<
+
+.cxx.o:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ $<
+
+.cxx.obj:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cxx.lo:
+@am__fastdepCXX_TRUE@	$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(LTCXXCOMPILE) -c -o $@ $<
+
 mostlyclean-libtool:
 	-rm -f *.lo
 
@@ -550,9 +929,12 @@ distdir: $(DISTFILES)
 	done
 check-am: all-am
 check: check-recursive
-all-am: Makefile
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
 installdirs: installdirs-recursive
 installdirs-am:
+	for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
 install: install-recursive
 install-exec: install-exec-recursive
 install-data: install-data-recursive
@@ -570,6 +952,7 @@ install-strip:
 mostlyclean-generic:
 
 clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
 
 distclean-generic:
 	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
@@ -580,11 +963,14 @@ maintainer-clean-generic:
 	@echo "it deletes files that may require special tools to rebuild."
 clean: clean-recursive
 
-clean-am: clean-generic clean-libtool mostlyclean-am
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	clean-pkglibLTLIBRARIES clean-sbinPROGRAMS mostlyclean-am
 
 distclean: distclean-recursive
+	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
-distclean-am: clean-am distclean-generic distclean-tags
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
 
 dvi: dvi-recursive
 
@@ -604,7 +990,7 @@ install-dvi: install-dvi-recursive
 
 install-dvi-am:
 
-install-exec-am:
+install-exec-am: install-pkglibLTLIBRARIES install-sbinPROGRAMS
 
 install-html: install-html-recursive
 
@@ -627,12 +1013,14 @@ install-ps-am:
 installcheck-am:
 
 maintainer-clean: maintainer-clean-recursive
+	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
 maintainer-clean-am: distclean-am maintainer-clean-generic
 
 mostlyclean: mostlyclean-recursive
 
-mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
 
 pdf: pdf-recursive
 
@@ -642,25 +1030,34 @@ ps: ps-recursive
 
 ps-am:
 
-uninstall-am:
+uninstall-am: uninstall-pkglibLTLIBRARIES uninstall-sbinPROGRAMS
 
 .MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \
 	install-am install-strip tags-recursive
 
 .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
 	all all-am check check-am clean clean-generic clean-libtool \
-	ctags ctags-recursive distclean distclean-generic \
-	distclean-libtool distclean-tags distdir dvi dvi-am html \
-	html-am info info-am install install-am install-data \
-	install-data-am install-dvi install-dvi-am install-exec \
-	install-exec-am install-html install-html-am install-info \
-	install-info-am install-man install-pdf install-pdf-am \
-	install-ps install-ps-am install-strip installcheck \
-	installcheck-am installdirs installdirs-am maintainer-clean \
-	maintainer-clean-generic mostlyclean mostlyclean-generic \
-	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
-	uninstall uninstall-am
+	clean-noinstLTLIBRARIES clean-pkglibLTLIBRARIES \
+	clean-sbinPROGRAMS ctags ctags-recursive distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-sbinPROGRAMS install-strip \
+	installcheck installcheck-am installdirs installdirs-am \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES uninstall-sbinPROGRAMS
+
+
+@BGQ_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@  # force link with g++
 
+@BLUEGENE_LOADED_TRUE@force:
+@BLUEGENE_LOADED_TRUE@$(block_allocator_lib) $(convenience_libs) : force
+@BLUEGENE_LOADED_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/src/plugins/select/bluegene/ba/Makefile.am b/src/plugins/select/bluegene/ba/Makefile.am
new file mode 100644
index 000000000..481783415
--- /dev/null
+++ b/src/plugins/select/bluegene/ba/Makefile.am
@@ -0,0 +1,35 @@
+# Makefile.am for block_allocator
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+
+noinst_LTLIBRARIES = libblock_allocator.la
+libblock_allocator_la_SOURCES = block_allocator.c block_allocator.h
+
+libblock_allocator_la_LIBADD = ../bl/libbridge_linker.la
+
+libblock_allocator_la_LDFLAGS = $(LIB_LDFLAGS) -lm
+
+total = $(libblock_allocator_la_LDADD)
+
+#to build the debug executable
+noinst_PROGRAMS = wire_test
+
+wire_testSOURCES = wire_test.c
+
+# compile against the block_allocator.o since we don't really want to
+# link against the bridge_linker.
+wire_test_LDADD = $(top_builddir)/src/api/libslurm.o -ldl \
+		../libba_common.la $(libblock_allocator_la_OBJECTS)
+
+total += ../libba_common.la $(top_builddir)/src/api/libslurm.o
+
+wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
+
+force:
+$(total) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.in b/src/plugins/select/bluegene/ba/Makefile.in
similarity index 90%
rename from src/plugins/select/bluegene/block_allocator/Makefile.in
rename to src/plugins/select/bluegene/ba/Makefile.in
index 98bcef56a..37eb17b95 100644
--- a/src/plugins/select/bluegene/block_allocator/Makefile.in
+++ b/src/plugins/select/bluegene/ba/Makefile.in
@@ -15,7 +15,7 @@
 
 @SET_MAKE@
 
-# Makefile.am for bluegene_block_allocator
+# Makefile.am for block_allocator
 
 
 VPATH = @srcdir@
@@ -38,8 +38,8 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
-@BLUEGENE_LOADED_TRUE@noinst_PROGRAMS = wire_test$(EXEEXT)
-subdir = src/plugins/select/bluegene/block_allocator
+noinst_PROGRAMS = wire_test$(EXEEXT)
+subdir = src/plugins/select/bluegene/ba
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
@@ -65,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -75,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -84,21 +86,17 @@ CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
 LTLIBRARIES = $(noinst_LTLIBRARIES)
-libbluegene_block_allocator_la_LIBADD =
-am_libbluegene_block_allocator_la_OBJECTS = block_allocator.lo \
-	bridge_linker.lo
-libbluegene_block_allocator_la_OBJECTS =  \
-	$(am_libbluegene_block_allocator_la_OBJECTS)
-libbluegene_block_allocator_la_LINK = $(LIBTOOL) --tag=CC \
-	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
-	$(AM_CFLAGS) $(CFLAGS) \
-	$(libbluegene_block_allocator_la_LDFLAGS) $(LDFLAGS) -o $@
+libblock_allocator_la_DEPENDENCIES = ../bl/libbridge_linker.la
+am_libblock_allocator_la_OBJECTS = block_allocator.lo
+libblock_allocator_la_OBJECTS = $(am_libblock_allocator_la_OBJECTS)
+libblock_allocator_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(libblock_allocator_la_LDFLAGS) $(LDFLAGS) -o $@
 PROGRAMS = $(noinst_PROGRAMS)
 wire_test_SOURCES = wire_test.c
 wire_test_OBJECTS = wire_test.$(OBJEXT)
-@BLUEGENE_LOADED_TRUE@wire_test_DEPENDENCIES =  \
-@BLUEGENE_LOADED_TRUE@	libbluegene_block_allocator.la \
-@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o
+wire_test_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o \
+	../libba_common.la $(libblock_allocator_la_OBJECTS)
 wire_test_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
 	$(wire_test_LDFLAGS) $(LDFLAGS) -o $@
@@ -115,8 +113,8 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(libbluegene_block_allocator_la_SOURCES) wire_test.c
-DIST_SOURCES = $(libbluegene_block_allocator_la_SOURCES) wire_test.c
+SOURCES = $(libblock_allocator_la_SOURCES) wire_test.c
+DIST_SOURCES = $(libblock_allocator_la_SOURCES) wire_test.c
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -130,7 +128,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -167,6 +168,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -224,6 +226,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -259,6 +262,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -316,18 +320,20 @@ CLEANFILES = core.*
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
 # making a .la
-noinst_LTLIBRARIES = libbluegene_block_allocator.la
-libbluegene_block_allocator_la_SOURCES = \
-	block_allocator.c bridge_linker.c block_allocator.h bridge_linker.h
-
-libbluegene_block_allocator_la_LDFLAGS = \
-	$(LIB_LDFLAGS) -lm
-
-@BLUEGENE_LOADED_TRUE@wire_testSOURCES = wire_test.c block_allocator.h
-@BLUEGENE_LOADED_TRUE@wire_test_LDADD = libbluegene_block_allocator.la \
-@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o -ldl
-
-@BLUEGENE_LOADED_TRUE@wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
+noinst_LTLIBRARIES = libblock_allocator.la
+libblock_allocator_la_SOURCES = block_allocator.c block_allocator.h
+libblock_allocator_la_LIBADD = ../bl/libbridge_linker.la
+libblock_allocator_la_LDFLAGS = $(LIB_LDFLAGS) -lm
+total = $(libblock_allocator_la_LDADD) ../libba_common.la \
+	$(top_builddir)/src/api/libslurm.o
+wire_testSOURCES = wire_test.c
+
+# compile against the block_allocator.o since we don't really want to
+# link against the bridge_linker.
+wire_test_LDADD = $(top_builddir)/src/api/libslurm.o -ldl \
+		../libba_common.la $(libblock_allocator_la_OBJECTS)
+
+wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
 all: all-am
 
 .SUFFIXES:
@@ -341,9 +347,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/block_allocator/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/ba/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/block_allocator/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/ba/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -371,8 +377,8 @@ clean-noinstLTLIBRARIES:
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-libbluegene_block_allocator.la: $(libbluegene_block_allocator_la_OBJECTS) $(libbluegene_block_allocator_la_DEPENDENCIES) 
-	$(libbluegene_block_allocator_la_LINK)  $(libbluegene_block_allocator_la_OBJECTS) $(libbluegene_block_allocator_la_LIBADD) $(LIBS)
+libblock_allocator.la: $(libblock_allocator_la_OBJECTS) $(libblock_allocator_la_DEPENDENCIES) 
+	$(libblock_allocator_la_LINK)  $(libblock_allocator_la_OBJECTS) $(libblock_allocator_la_LIBADD) $(LIBS)
 
 clean-noinstPROGRAMS:
 	@list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
@@ -393,7 +399,6 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_allocator.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wire_test.Po@am__quote@
 
 .c.o:
@@ -623,6 +628,10 @@ uninstall-am:
 	tags uninstall uninstall-am
 
 
+force:
+$(total) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
 .NOEXPORT:
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/ba/block_allocator.c
similarity index 56%
rename from src/plugins/select/bluegene/block_allocator/block_allocator.c
rename to src/plugins/select/bluegene/ba/block_allocator.c
index 1c267e547..a0af0deca 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/ba/block_allocator.c
@@ -9,7 +9,7 @@
  *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,9 +46,27 @@
 #include <stdlib.h>
 #include <math.h>
 #include "block_allocator.h"
-#include "src/common/uid.h"
-#include "src/common/timers.h"
 #include "src/common/slurmdb_defs.h"
+#include "src/common/timers.h"
+#include "src/common/uid.h"
+
+/*
+ * structure that holds switch path information for finding the wiring
+ * path without setting the configuration.
+ *
+ * - dim      - Which Axis it is on
+ * - geometry - node location
+ * - in       - ingress port.
+ * - out      - egress port.
+ *
+ */
+typedef struct {
+	int dim;
+	uint16_t geometry[HIGHEST_DIMENSIONS];
+	int in;
+	int out;
+} ba_path_switch_t;
+
 
 #define DEBUG_PA
 #define BEST_COUNT_INIT 20
@@ -56,64 +74,19 @@
 /* Global */
 bool _initialized = false;
 bool _wires_initialized = false;
-bool _bp_map_initialized = false;
+bool _mp_map_initialized = false;
 
 /* _ba_system is the "current" system that the structures will work
  *  on */
-ba_system_t *ba_system_ptr = NULL;
 List path = NULL;
 List best_path = NULL;
 int best_count;
-int color_count = 0;
 uint16_t *deny_pass = NULL;
-#if (SYSTEM_DIMENSIONS == 1)
-int cluster_dims = 1;
-int cluster_base = 10;
-#else
-int cluster_dims = 3;
-int cluster_base = 36;
-#endif
-uint32_t cluster_flags = 0;
 char *p = '\0';
-uint32_t ba_debug_flags = 0;
 
 /* extern Global */
 my_bluegene_t *bg = NULL;
-uint16_t ba_deny_pass = 0;
-List bp_map_list = NULL;
-char letters[62];
-char colors[6];
-uint16_t DIM_SIZE[HIGHEST_DIMENSIONS] = {0,0,0,0};
-uint16_t REAL_DIM_SIZE[HIGHEST_DIMENSIONS] = {0,0,0,0};
-
-s_p_options_t bg_conf_file_options[] = {
-#ifdef HAVE_BGL
-	{"BlrtsImage", S_P_STRING},
-	{"LinuxImage", S_P_STRING},
-	{"RamDiskImage", S_P_STRING},
-	{"AltBlrtsImage", S_P_ARRAY, parse_image, NULL},
-	{"AltLinuxImage", S_P_ARRAY, parse_image, NULL},
-	{"AltRamDiskImage", S_P_ARRAY, parse_image, NULL},
-#else
-	{"CnloadImage", S_P_STRING},
-	{"IoloadImage", S_P_STRING},
-	{"AltCnloadImage", S_P_ARRAY, parse_image, NULL},
-	{"AltIoloadImage", S_P_ARRAY, parse_image, NULL},
-#endif
-	{"DenyPassthrough", S_P_STRING},
-	{"LayoutMode", S_P_STRING},
-	{"MloaderImage", S_P_STRING},
-	{"BridgeAPILogFile", S_P_STRING},
-	{"BridgeAPIVerbose", S_P_UINT16},
-	{"BasePartitionNodeCnt", S_P_UINT16},
-	{"NodeCardNodeCnt", S_P_UINT16},
-	{"Numpsets", S_P_UINT16},
-	{"BPs", S_P_ARRAY, parse_blockreq, destroy_blockreq},
-	/* these are just going to be put into a list that will be
-	   freed later don't free them after reading them */
-	{"AltMloaderImage", S_P_ARRAY, parse_image, NULL},
-	{NULL}
-};
+ba_mp_t ***ba_main_grid = NULL;
 
 typedef enum {
 	BLOCK_ALGO_FIRST,
@@ -121,17 +94,9 @@ typedef enum {
 } block_algo_t;
 
 /** internal helper functions */
-#ifdef HAVE_BG_FILES
-/** */
-static void _bp_map_list_del(void *object);
-
-/** */
-static int _port_enum(int port);
-
-#endif /* HAVE_BG_FILES */
 
 /* */
-static int _check_for_options(ba_request_t* ba_request);
+static int _check_for_options(select_ba_request_t* ba_request);
 
 /* */
 static int _append_geo(uint16_t *geo, List geos, int rotate);
@@ -146,51 +111,44 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 			  int source, int dim);
 
 /* */
-static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
+static int _find_yz_path(ba_mp_t *ba_node, uint16_t *first,
 			 uint16_t *geometry, int conn_type);
 
 #ifndef HAVE_BG_FILES
 /* */
-static int _emulate_ext_wiring(ba_node_t ***grid);
+static int _emulate_ext_wiring(ba_mp_t ***grid);
 #endif
 
-/** */
-static void _new_ba_node(ba_node_t *ba_node, uint16_t *coord,
-			 bool track_down_nodes);
 /** */
 static int _reset_the_path(ba_switch_t *curr_switch, int source,
 			   int target, int dim);
-/** */
-static void _create_ba_system(void);
-/* */
-static void _delete_ba_system(void);
 /* */
 static void _delete_path_list(void *object);
 
 /* find the first block match in the system */
-static int _find_match(ba_request_t* ba_request, List results);
+static int _find_match(select_ba_request_t* ba_request, List results);
 
 /** */
-static bool _node_used(ba_node_t* ba_node, int x_size);
+static bool _node_used(ba_mp_t* ba_node, int x_size);
 
 /* */
-static void _switch_config(ba_node_t* source, ba_node_t* target, int dim,
+static void _switch_config(ba_mp_t* source, ba_mp_t* target, int dim,
 			   int port_src, int port_tar);
 
 /* */
-static int _set_external_wires(int dim, int count, ba_node_t* source,
-			       ba_node_t* target);
+static int _set_external_wires(int dim, int count, ba_mp_t* source,
+			       ba_mp_t* target);
 
 /* */
 static char *_set_internal_wires(List nodes, int size, int conn_type);
 
 /* */
-static int _find_x_path(List results, ba_node_t *ba_node, uint16_t *start,
+static int _find_x_path(List results, ba_mp_t *ba_node, uint16_t *start,
 			int x_size, int found, int conn_type,
 			block_algo_t algo);
 
 /* */
-static int _remove_node(List results, uint16_t *node_tar);
+static int _remove_node(List results, uint16_t *mp_tar);
 
 /* */
 static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
@@ -214,220 +172,11 @@ static int _set_one_dim(uint16_t *start, uint16_t *end, uint16_t *coord);
 /* */
 static void _destroy_geo(void *object);
 
-static int _coord(char coord);
-
-extern char *ba_passthroughs_string(uint16_t passthrough)
-{
-	char *pass = NULL;
-	if (passthrough & PASS_FOUND_X)
-		xstrcat(pass, "X");
-	if (passthrough & PASS_FOUND_Y) {
-		if (pass)
-			xstrcat(pass, ",Y");
-		else
-			xstrcat(pass, "Y");
-	}
-	if (passthrough & PASS_FOUND_Z) {
-		if (pass)
-			xstrcat(pass, ",Z");
-		else
-			xstrcat(pass, "Z");
-	}
-
-	return pass;
-}
-
-
-extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
-			  const char *key, const char *value,
-			  const char *line, char **leftover)
-{
-	s_p_options_t block_options[] = {
-		{"Type", S_P_STRING},
-		{"32CNBlocks", S_P_UINT16},
-		{"128CNBlocks", S_P_UINT16},
-#ifdef HAVE_BGL
-		{"Nodecards", S_P_UINT16},
-		{"Quarters", S_P_UINT16},
-		{"BlrtsImage", S_P_STRING},
-		{"LinuxImage", S_P_STRING},
-		{"RamDiskImage", S_P_STRING},
-#else
-		{"16CNBlocks", S_P_UINT16},
-		{"64CNBlocks", S_P_UINT16},
-		{"256CNBlocks", S_P_UINT16},
-		{"CnloadImage", S_P_STRING},
-		{"IoloadImage", S_P_STRING},
-#endif
-		{"MloaderImage", S_P_STRING},
-		{NULL}
-	};
-	s_p_hashtbl_t *tbl;
-	char *tmp = NULL;
-	blockreq_t *n = NULL;
-	hostlist_t hl = NULL;
-
-	tbl = s_p_hashtbl_create(block_options);
-	s_p_parse_line(tbl, *leftover, leftover);
-	if (!value) {
-		return 0;
-	}
-	n = xmalloc(sizeof(blockreq_t));
-	hl = hostlist_create(value);
-	n->block = hostlist_ranged_string_xmalloc(hl);
-	hostlist_destroy(hl);
-#ifdef HAVE_BGL
-	s_p_get_string(&n->blrtsimage, "BlrtsImage", tbl);
-	s_p_get_string(&n->linuximage, "LinuxImage", tbl);
-	s_p_get_string(&n->ramdiskimage, "RamDiskImage", tbl);
-#else
-	s_p_get_string(&n->linuximage, "CnloadImage", tbl);
-	s_p_get_string(&n->ramdiskimage, "IoloadImage", tbl);
-#endif
-	s_p_get_string(&n->mloaderimage, "MloaderImage", tbl);
-
-	s_p_get_string(&tmp, "Type", tbl);
-	if (!tmp || !strcasecmp(tmp,"TORUS"))
-		n->conn_type = SELECT_TORUS;
-	else if (!strcasecmp(tmp,"MESH"))
-		n->conn_type = SELECT_MESH;
-	else
-		n->conn_type = SELECT_SMALL;
-	xfree(tmp);
-
-	if (!s_p_get_uint16(&n->small32, "32CNBlocks", tbl)) {
-#ifdef HAVE_BGL
-		s_p_get_uint16(&n->small32, "Nodecards", tbl);
-#else
-		;
-#endif
-	}
-	if (!s_p_get_uint16(&n->small128, "128CNBlocks", tbl)) {
-#ifdef HAVE_BGL
-		s_p_get_uint16(&n->small128, "Quarters", tbl);
-#else
-		;
-#endif
-	}
-
-#ifndef HAVE_BGL
-	s_p_get_uint16(&n->small16, "16CNBlocks", tbl);
-	s_p_get_uint16(&n->small64, "64CNBlocks", tbl);
-	s_p_get_uint16(&n->small256, "256CNBlocks", tbl);
-#endif
-
-	s_p_hashtbl_destroy(tbl);
-
-	*dest = (void *)n;
-	return 1;
-}
-
-extern void destroy_blockreq(void *ptr)
-{
-	blockreq_t *n = (blockreq_t *)ptr;
-	if (n) {
-		xfree(n->block);
-#ifdef HAVE_BGL
-		xfree(n->blrtsimage);
-#endif
-		xfree(n->linuximage);
-		xfree(n->mloaderimage);
-		xfree(n->ramdiskimage);
-		xfree(n);
-	}
-}
-
-extern int parse_image(void **dest, slurm_parser_enum_t type,
-		       const char *key, const char *value,
-		       const char *line, char **leftover)
-{
-	s_p_options_t image_options[] = {
-		{"GROUPS", S_P_STRING},
-		{NULL}
-	};
-	s_p_hashtbl_t *tbl = NULL;
-	char *tmp = NULL;
-	image_t *n = NULL;
-	image_group_t *image_group = NULL;
-	int i = 0, j = 0;
-
-	tbl = s_p_hashtbl_create(image_options);
-	s_p_parse_line(tbl, *leftover, leftover);
-
-	n = xmalloc(sizeof(image_t));
-	n->name = xstrdup(value);
-	n->def = false;
-	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-		info("image %s", n->name);
-	n->groups = list_create(destroy_image_group_list);
-	s_p_get_string(&tmp, "Groups", tbl);
-	if (tmp) {
-		for(i=0; i<strlen(tmp); i++) {
-			if ((tmp[i] == ':') || (tmp[i] == ',')) {
-				image_group = xmalloc(sizeof(image_group_t));
-				image_group->name = xmalloc(i-j+2);
-				snprintf(image_group->name,
-					 (i-j)+1, "%s", tmp+j);
-				gid_from_string (image_group->name,
-						 &image_group->gid);
-				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-					info("adding group %s %d",
-					     image_group->name,
-					     image_group->gid);
-				list_append(n->groups, image_group);
-				j=i;
-				j++;
-			}
-		}
-		if (j != i) {
-			image_group = xmalloc(sizeof(image_group_t));
-			image_group->name = xmalloc(i-j+2);
-			snprintf(image_group->name, (i-j)+1, "%s", tmp+j);
-			if (gid_from_string (image_group->name,
-			                     &image_group->gid) < 0)
-				fatal("Invalid bluegene.conf parameter "
-				      "Groups=%s",
-				      image_group->name);
-			else if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-				info("adding group %s %d",
-				     image_group->name,
-				     image_group->gid);
-			list_append(n->groups, image_group);
-		}
-		xfree(tmp);
-	}
-	s_p_hashtbl_destroy(tbl);
-
-	*dest = (void *)n;
-	return 1;
-}
-
-extern void destroy_image_group_list(void *ptr)
-{
-	image_group_t *image_group = (image_group_t *)ptr;
-	if (image_group) {
-		xfree(image_group->name);
-		xfree(image_group);
-	}
-}
-
-extern void destroy_image(void *ptr)
-{
-	image_t *n = (image_t *)ptr;
-	if (n) {
-		xfree(n->name);
-		if (n->groups) {
-			list_destroy(n->groups);
-			n->groups = NULL;
-		}
-		xfree(n);
-	}
-}
-
 extern void destroy_ba_node(void *ptr)
 {
-	ba_node_t *ba_node = (ba_node_t *)ptr;
+	ba_mp_t *ba_node = (ba_mp_t *)ptr;
 	if (ba_node) {
+		xfree(ba_node->loc);
 		xfree(ba_node);
 	}
 }
@@ -466,7 +215,7 @@ extern void destroy_ba_node(void *ptr)
  * IN - start_req: if set use the start variable to start at
  * return success of allocation/validation of params
  */
-extern int new_ba_request(ba_request_t* ba_request)
+extern int new_ba_request(select_ba_request_t* ba_request)
 {
 	int i=0;
 	float sz=1;
@@ -805,31 +554,10 @@ endit:
 	return 1;
 }
 
-/**
- * delete a block request
- */
-extern void delete_ba_request(void *arg)
-{
-	ba_request_t *ba_request = (ba_request_t *)arg;
-	if (ba_request) {
-		xfree(ba_request->save_name);
-		if (ba_request->elongate_geos)
-			list_destroy(ba_request->elongate_geos);
-#ifdef HAVE_BGL
-		xfree(ba_request->blrtsimage);
-#endif
-		xfree(ba_request->linuximage);
-		xfree(ba_request->mloaderimage);
-		xfree(ba_request->ramdiskimage);
-
-		xfree(ba_request);
-	}
-}
-
 /**
  * print a block request
  */
-extern void print_ba_request(ba_request_t* ba_request)
+extern void print_ba_request(select_ba_request_t* ba_request)
 {
 	int i;
 
@@ -843,296 +571,27 @@ extern void print_ba_request(ba_request_t* ba_request)
 		debug("%d", ba_request->geometry[i]);
 	}
 	debug("        size:\t%d", ba_request->size);
-	debug("   conn_type:\t%d", ba_request->conn_type);
+	debug("   conn_type:\t%d", ba_request->conn_type[X]);
 	debug("      rotate:\t%d", ba_request->rotate);
 	debug("    elongate:\t%d", ba_request->elongate);
 }
 
-/**
- * empty a list that we don't want to destroy the memory of the
- * elements always returns 1
- */
-extern int empty_null_destroy_list(void *arg, void *key)
-{
-	return 1;
-}
-
-/**
- * Initialize internal structures by either reading previous block
- * configurations from a file or by running the graph solver.
- *
- * IN: node_info_msg_t * can be null,
- *     should be from slurm_load_node().
- *
- * return: void.
- */
-extern void ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
-{
-	int x,y,z;
-	node_info_t *node_ptr = NULL;
-	int number, count;
-	char *numeric = NULL;
-	int i, j, k;
-	slurm_conf_node_t *node = NULL, **ptr_array;
-	int coords[HIGHEST_DIMENSIONS];
-
-#ifdef HAVE_BG_FILES
-	rm_size3D_t bp_size;
-	int rc = 0;
-#endif /* HAVE_BG_FILES */
-
-	/* We only need to initialize once, so return if already done so. */
-	if (_initialized)
-		return;
-
-	cluster_dims = slurmdb_setup_cluster_dims();
-	cluster_flags = slurmdb_setup_cluster_flags();
-	set_ba_debug_flags(slurm_get_debug_flags());
-
-#ifdef HAVE_BG_FILES
-	bridge_init();
-#endif
-
-	/* make the letters array only contain letters upper and lower
-	 * (62) */
-	y = 'A';
-	for (x = 0; x < 62; x++) {
-		if (y == '[')
-			y = 'a';
-		else if (y == '{')
-			y = '0';
-		else if (y == ':')
-			y = 'A';
-		letters[x] = y;
-		y++;
-	}
-
-	z=1;
-	for (x = 0; x < 6; x++) {
-		if (z == 4)
-			z++;
-		colors[x] = z;
-		z++;
-	}
-
-	best_count=BEST_COUNT_INIT;
-
-	if (ba_system_ptr)
-		_delete_ba_system();
-
-	ba_system_ptr = (ba_system_t *) xmalloc(sizeof(ba_system_t));
-
-	ba_system_ptr->num_of_proc = 0;
-
-	/* cluster_dims is already set up off of working_cluster_rec */
-	if (cluster_dims == 1) {
-		if (node_info_ptr) {
-			REAL_DIM_SIZE[X] = DIM_SIZE[X] =
-				node_info_ptr->record_count;
-			ba_system_ptr->num_of_proc =
-				node_info_ptr->record_count;
-			REAL_DIM_SIZE[Y] = DIM_SIZE[Y] = 1;
-			REAL_DIM_SIZE[Z] = DIM_SIZE[Z] = 1;
-		}
-		goto setup_done;
-	} else if (working_cluster_rec && working_cluster_rec->dim_size) {
-		for(i=0; i<working_cluster_rec->dimensions; i++) {
-			DIM_SIZE[i] = working_cluster_rec->dim_size[i];
-			REAL_DIM_SIZE[i] = DIM_SIZE[i];
-		}
-		goto setup_done;
-	}
-
-
-	if (node_info_ptr) {
-		for (i = 0; i < node_info_ptr->record_count; i++) {
-			node_ptr = &node_info_ptr->node_array[i];
-			number = 0;
-
-			if (!node_ptr->name) {
-				for (j=0; j<HIGHEST_DIMENSIONS; j++)
-					DIM_SIZE[j] = 0;
-				goto node_info_error;
-			}
-
-			numeric = node_ptr->name;
-			while (numeric) {
-				if (numeric[0] < '0' || numeric[0] > 'Z'
-				    || (numeric[0] > '9'
-					&& numeric[0] < 'A')) {
-					numeric++;
-					continue;
-				}
-				number = xstrntol(numeric, &p, cluster_dims,
-						  cluster_base);
-				break;
-			}
-			hostlist_parse_int_to_array(
-				number, coords, cluster_dims, cluster_base);
-
-			for(j=0; j<cluster_dims; j++) {
-				if (DIM_SIZE[j] < coords[j])
-					DIM_SIZE[j] = coords[j];
-			}
-		}
-		for(j=0; j<cluster_dims; j++) {
-			DIM_SIZE[j]++;
-			/* this will probably be reset below */
-			REAL_DIM_SIZE[j] = DIM_SIZE[j];
-		}
-		ba_system_ptr->num_of_proc = node_info_ptr->record_count;
-	}
-node_info_error:
-
-	if ((DIM_SIZE[X]==0) || (DIM_SIZE[Y]==0) || (DIM_SIZE[Z]==0)) {
-		debug("Setting dimensions from slurm.conf file");
-		count = slurm_conf_nodename_array(&ptr_array);
-		if (count == 0)
-			fatal("No NodeName information available!");
-
-		for (i = 0; i < count; i++) {
-			node = ptr_array[i];
-			j = 0;
-			while (node->nodenames[j] != '\0') {
-				if ((node->nodenames[j] == '['
-				     || node->nodenames[j] == ',')
-				    && (node->nodenames[j+8] == ']'
-					|| node->nodenames[j+8] == ',')
-				    && (node->nodenames[j+4] == 'x'
-					|| node->nodenames[j+4] == '-')) {
-					j+=5;
-				} else if ((node->nodenames[j] >= '0'
-					    && node->nodenames[j] <= '9')
-					   || (node->nodenames[j] >= 'A'
-					       && node->nodenames[j] <= 'Z')) {
-					/* suppose to be blank, just
-					   making sure this is the
-					   correct alpha num
-					*/
-				} else {
-					j++;
-					continue;
-				}
-				number = xstrntol(node->nodenames + j,
-						  &p, cluster_dims,
-						  cluster_base);
-				hostlist_parse_int_to_array(
-					number, coords, cluster_dims,
-					cluster_base);
-				j += 3;
-
-				for(k=0; k<cluster_dims; k++)
-					DIM_SIZE[k] = MAX(DIM_SIZE[k],
-							  coords[k]);
-
-				if (node->nodenames[j] != ',')
-					break;
-			}
-		}
-		if ((DIM_SIZE[X]==0) && (DIM_SIZE[Y]==0) && (DIM_SIZE[Z]==0))
-			info("are you sure you only have 1 midplane? %s",
-			     node->nodenames);
-		for(j=0; j<cluster_dims; j++) {
-			DIM_SIZE[j]++;
-			/* this will probably be reset below */
-			REAL_DIM_SIZE[j] = DIM_SIZE[j];
-		}
-	}
-#ifdef HAVE_BG_FILES
-	/* sanity check.  We can only request part of the system, but
-	   we don't want to allow more than we have. */
-	if (sanity_check && have_db2) {
-		verbose("Attempting to contact MMCS");
-		if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-			fatal("bridge_get_BG() failed.  This usually means "
-			      "there is something wrong with the database.  "
-			      "You might want to run slurmctld in daemon "
-			      "mode (-D) to see what the real error from "
-			      "the api was.  The return code was %d", rc);
-			return;
-		}
-
-		if ((bg != NULL)
-		    &&  ((rc = bridge_get_data(bg, RM_Msize, &bp_size))
-			 == STATUS_OK)) {
-			verbose("BlueGene configured with "
-				"%d x %d x %d base blocks",
-				bp_size.X, bp_size.Y, bp_size.Z);
-			REAL_DIM_SIZE[X] = bp_size.X;
-			REAL_DIM_SIZE[Y] = bp_size.Y;
-			REAL_DIM_SIZE[Z] = bp_size.Z;
-			if ((DIM_SIZE[X] > bp_size.X)
-			    || (DIM_SIZE[Y] > bp_size.Y)
-			    || (DIM_SIZE[Z] > bp_size.Z)) {
-				fatal("You requested a %c%c%c system, "
-				      "but we only have a system of %c%c%c.  "
-				      "Change your slurm.conf.",
-				      alpha_num[DIM_SIZE[X]],
-				      alpha_num[DIM_SIZE[Y]],
-				      alpha_num[DIM_SIZE[Z]],
-				      alpha_num[bp_size.X],
-				      alpha_num[bp_size.Y],
-				      alpha_num[bp_size.Z]);
-			}
-		} else {
-			error("bridge_get_data(RM_Msize): %d", rc);
-		}
-	}
-#endif
-
-setup_done:
-	if (cluster_dims == 1) {
-		if (DIM_SIZE[X]==0) {
-			debug("Setting default system dimensions");
-			REAL_DIM_SIZE[X] = DIM_SIZE[X]=100;
-			REAL_DIM_SIZE[Y] = DIM_SIZE[Y]=1;
-			REAL_DIM_SIZE[Z] = DIM_SIZE[Z]=1;
-		}
-	} else {
-		debug("We are using %c x %c x %c of the system.",
-		      alpha_num[DIM_SIZE[X]],
-		      alpha_num[DIM_SIZE[Y]],
-		      alpha_num[DIM_SIZE[Z]]);
-	}
-
-	if (!ba_system_ptr->num_of_proc) {
-		ba_system_ptr->num_of_proc = 1;
-		for(i=0; i<cluster_dims; i++)
-			ba_system_ptr->num_of_proc *= DIM_SIZE[i];
-	}
-
-	_create_ba_system();
-
-
-#ifndef HAVE_BG_FILES
-	if ((cluster_flags & CLUSTER_FLAG_BGL) ||
-	    (cluster_flags & CLUSTER_FLAG_BGP))
-		_emulate_ext_wiring(ba_system_ptr->grid);
-#endif
-
-	path = list_create(_delete_path_list);
-	best_path = list_create(_delete_path_list);
-
-	_initialized = true;
-	init_grid(node_info_ptr);
-}
-
 /* If emulating a system set up a known configuration for wires in a
  * system of the size given.
  * If a real bluegene system, query the system and get all wiring
  * information of the system.
  */
-extern void init_wires()
+extern void init_wires(void)
 {
 	int x, y, z, i;
-	ba_node_t *source = NULL;
+	ba_mp_t *source = NULL;
 	if (_wires_initialized)
 		return;
 
 	for(x=0;x<DIM_SIZE[X];x++) {
 		for(y=0;y<DIM_SIZE[Y];y++) {
 			for(z=0;z<DIM_SIZE[Z];z++) {
-				source = &ba_system_ptr->grid[x][y][z];
+				source = &ba_main_grid[x][y][z];
 				for(i=0; i<NUM_PORTS_PER_NODE; i++) {
 					_switch_config(source, source,
 						       X, i, i);
@@ -1146,120 +605,19 @@ extern void init_wires()
 	}
 #ifdef HAVE_BG_FILES
 	_set_external_wires(0,0,NULL,NULL);
-	if (!bp_map_list) {
-		if (set_bp_map() == -1) {
-			return;
-		}
-	}
+	if (bridge_setup_system() == -1)
+		return;
 #endif
 
 	_wires_initialized = true;
 	return;
 }
 
-
-/**
- * destroy all the internal (global) data structs.
- */
-extern void ba_fini()
-{
-	int i = 0;
-
-	if (!_initialized){
-		return;
-	}
-
-	if (path) {
-		list_destroy(path);
-		path = NULL;
-	}
-	if (best_path) {
-		list_destroy(best_path);
-		best_path = NULL;
-	}
-#ifdef HAVE_BG_FILES
-	if (bg)
-		bridge_free_bg(bg);
-
-	if (bp_map_list) {
-		list_destroy(bp_map_list);
-		bp_map_list = NULL;
-		_bp_map_initialized = false;
-	}
-	bridge_fini();
-#endif
-	_delete_ba_system();
-	_initialized = false;
-	_bp_map_initialized = false;
-	_wires_initialized = true;
-	for (i=0; i<HIGHEST_DIMENSIONS; i++)
-		DIM_SIZE[i] = 0;
-
-//	debug3("pa system destroyed");
-}
-
-extern void set_ba_debug_flags(uint32_t debug_flags)
-{
-	ba_debug_flags = debug_flags;
-}
-
-/*
- * set the node in the internal configuration as in, or not in use,
- * along with the current state of the node.
- *
- * IN ba_node: ba_node_t to update state
- * IN state: new state of ba_node_t
- */
-extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state)
-{
-	uint16_t node_base_state = state & NODE_STATE_BASE;
-	uint16_t node_flags = state & NODE_STATE_FLAGS;
-
-	if (!_initialized){
-		error("Error, configuration not initialized, "
-		      "calling ba_init(NULL, 1)");
-		ba_init(NULL, 1);
-	}
-
-#ifdef HAVE_BG_L_P
-	debug2("ba_update_node_state: new state of [%c%c%c] is %s",
-	       alpha_num[ba_node->coord[X]], alpha_num[ba_node->coord[Y]],
-	       alpha_num[ba_node->coord[Z]], node_state_string(state));
-#else
-	debug2("ba_update_node_state: new state of [%d] is %s",
-	       ba_node->coord[X],
-	       node_state_string(state));
-#endif
-
-	/* basically set the node as used */
-	if ((node_base_state == NODE_STATE_DOWN)
-	    || (node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)))
-		ba_node->used = true;
-	else
-		ba_node->used = false;
-
-	ba_node->state = state;
-}
-
-/*
- * copy info from a ba_node, a direct memcpy of the ba_node_t
- *
- * IN ba_node: node to be copied
- * Returned ba_node_t *: copied info must be freed with destroy_ba_node
- */
-extern ba_node_t *ba_copy_node(ba_node_t *ba_node)
-{
-	ba_node_t *new_ba_node = xmalloc(sizeof(ba_node_t));
-
-	memcpy(new_ba_node, ba_node, sizeof(ba_node_t));
-	return new_ba_node;
-}
-
 /*
  * copy the path of the nodes given
  *
- * IN nodes List of ba_node_t *'s: nodes to be copied
- * OUT dest_nodes List of ba_node_t *'s: filled in list of nodes
+ * IN nodes List of ba_mp_t *'s: nodes to be copied
+ * OUT dest_nodes List of ba_mp_t *'s: filled in list of nodes
  * wiring.
  * Return on success SLURM_SUCCESS, on error SLURM_ERROR
  */
@@ -1270,7 +628,7 @@ extern int copy_node_path(List nodes, List *dest_nodes)
 #ifdef HAVE_BG_L_P
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
-	ba_node_t *ba_node = NULL, *new_ba_node = NULL;
+	ba_mp_t *ba_node = NULL, *new_ba_node = NULL;
 	int dim;
 	ba_switch_t *curr_switch = NULL, *new_switch = NULL;
 
@@ -1296,8 +654,8 @@ extern int copy_node_path(List nodes, List *dest_nodes)
 				     alpha_num[ba_node->coord[X]],
 				     alpha_num[ba_node->coord[Y]],
 				     alpha_num[ba_node->coord[Z]]);
-			new_ba_node = ba_copy_node(ba_node);
-			_new_ba_node(new_ba_node, ba_node->coord, false);
+			new_ba_node = ba_copy_mp(ba_node);
+			ba_setup_mp(new_ba_node, false, false);
 			list_push(*dest_nodes, new_ba_node);
 
 		}
@@ -1322,6 +680,17 @@ extern int copy_node_path(List nodes, List *dest_nodes)
 	return rc;
 }
 
+extern ba_mp_t *coord2ba_mp(const uint16_t *coord)
+{
+	if ((coord[X] >= DIM_SIZE[X]) || (coord[Y] >= DIM_SIZE[Y]) ||
+	    (coord[Z] >= DIM_SIZE[Z])) {
+		error("Invalid coordinate %d:%d:%d",
+		      coord[X], coord[Y], coord[Z]);
+		return NULL;
+	}
+	return &ba_main_grid[coord[X]][coord[Y]][coord[Z]];
+}
+
 /*
  * Try to allocate a block.
  *
@@ -1332,9 +701,9 @@ extern int copy_node_path(List nodes, List *dest_nodes)
  *
  * return: success or error of request
  */
-extern int allocate_block(ba_request_t* ba_request, List results)
+extern int allocate_block(select_ba_request_t* ba_request, List results)
 {
-	if (!_initialized){
+	if (!ba_initialized){
 		error("Error, configuration not initialized, "
 		      "calling ba_init(NULL, 1)");
 		ba_init(NULL, 1);
@@ -1358,29 +727,27 @@ extern int allocate_block(ba_request_t* ba_request, List results)
  * Admin wants to remove a previous allocation.
  * will allow Admin to delete a previous allocation retrival by letter code.
  */
-extern int remove_block(List nodes, int new_count, int conn_type)
+extern int remove_block(List nodes, bool is_small)
 {
 	int dim;
-	ba_node_t* curr_ba_node = NULL;
-	ba_node_t* ba_node = NULL;
+	ba_mp_t* curr_ba_node = NULL;
+	ba_mp_t* ba_node = NULL;
 	ba_switch_t *curr_switch = NULL;
 	ListIterator itr;
 
 	itr = list_iterator_create(nodes);
-	while ((curr_ba_node = (ba_node_t*) list_next(itr))) {
+	while ((curr_ba_node = (ba_mp_t*) list_next(itr))) {
 		/* since the list that comes in might not be pointers
 		   to the main list we need to point to that main list */
-		ba_node = &ba_system_ptr->
-			grid[curr_ba_node->coord[X]]
+		ba_node = &ba_main_grid[curr_ba_node->coord[X]]
 			[curr_ba_node->coord[Y]]
 			[curr_ba_node->coord[Z]];
+		if (curr_ba_node->used)
+			ba_node->used &= (~BA_MP_USED_TRUE);
 
-		ba_node->used = false;
-		ba_node->color = 7;
-		ba_node->letter = '.';
 		/* Small blocks don't use wires, and only have 1 node,
 		   so just break. */
-		if (conn_type == SELECT_SMALL)
+		if (is_small)
 			break;
 		for(dim=0;dim<cluster_dims;dim++) {
 			curr_switch = &ba_node->axis_switch[dim];
@@ -1390,100 +757,30 @@ extern int remove_block(List nodes, int new_count, int conn_type)
 		}
 	}
 	list_iterator_destroy(itr);
-	if (new_count == NO_VAL) {
-	} else if (new_count == -1)
-		color_count--;
-	else
-		color_count=new_count;
-	if (color_count < 0)
-		color_count = 0;
 	return 1;
 }
 
 /*
- * Admin wants to change something about a previous allocation.
- * will allow Admin to change previous allocation by giving the
- * letter code for the allocation and the variable to alter
- * (Not currently used in the system, update this if it is)
+ * Used to set a block into a virtual system.  The system can be
+ * cleared first and this function sets all the wires and midplanes
+ * used in the nodelist given.  The nodelist is a list of ba_mp_t's
+ * that are already set up.  This is very handly to test if there are
+ * any passthroughs used by one block when adding another block that
+ * also uses those wires, and neither use any overlapping
+ * midplanes. Doing a simple bitmap & will not reveal this.
+ *
+ * Returns SLURM_SUCCESS if nodelist fits into system without
+ * conflict, and SLURM_ERROR if nodelist conflicts with something
+ * already in the system.
  */
-extern int alter_block(List nodes, int conn_type)
+extern int check_and_set_mp_list(List nodes)
 {
-	/* int dim; */
-/* 	ba_node_t* ba_node = NULL; */
-/* 	ba_switch_t *curr_switch = NULL;  */
-/* 	int size=0; */
-/* 	char *name = NULL; */
-/* 	ListIterator results_i;	 */
-
-	return SLURM_ERROR;
-	/* results_i = list_iterator_create(nodes); */
-/* 	while ((ba_node = list_next(results_i)) != NULL) { */
-/* 		ba_node->used = false; */
-
-/* 		for(dim=0;dim<cluster_dims;dim++) { */
-/* 			curr_switch = &ba_node->axis_switch[dim]; */
-/* 			if (curr_switch->int_wire[0].used) { */
-/* 				_reset_the_path(curr_switch, 0, 1, dim); */
-/* 			} */
-/* 		} */
-/* 		size++; */
-/* 	} */
-/* 	list_iterator_destroy(results_i); */
-/* 	if ((name = _set_internal_wires(nodes, size, conn_type)) == NULL) */
-/* 		return SLURM_ERROR; */
-/* 	else { */
-/* 		xfree(name); */
-/* 		return SLURM_SUCCESS; */
-/* 	} */
-}
-
-/*
- * After a block is deleted or altered following allocations must
- * be redone to make sure correct path will be used in the real system
- * (Not currently used in the system, update this if it is)
- */
-extern int redo_block(List nodes, uint16_t *geo, int conn_type, int new_count)
-{
-       	ba_node_t* ba_node;
-	char *name = NULL;
-
-	ba_node = list_peek(nodes);
-	if (!ba_node)
-		return SLURM_ERROR;
-
-	remove_block(nodes, new_count, conn_type);
-	list_delete_all(nodes, &empty_null_destroy_list, "");
-
-	name = set_bg_block(nodes, ba_node->coord, geo, conn_type);
-	if (!name)
-		return SLURM_ERROR;
-	else {
-		xfree(name);
-		return SLURM_SUCCESS;
-	}
-}
-
-/*
- * Used to set a block into a virtual system.  The system can be
- * cleared first and this function sets all the wires and midplanes
- * used in the nodelist given.  The nodelist is a list of ba_node_t's
- * that are already set up.  This is very handly to test if there are
- * any passthroughs used by one block when adding another block that
- * also uses those wires, and neither use any overlapping
- * midplanes. Doing a simple bitmap & will not reveal this.
- *
- * Returns SLURM_SUCCESS if nodelist fits into system without
- * conflict, and SLURM_ERROR if nodelist conflicts with something
- * already in the system.
- */
-extern int check_and_set_node_list(List nodes)
-{
-	int rc = SLURM_ERROR;
+	int rc = SLURM_ERROR;
 
 #ifdef HAVE_BG_L_P
 	int i, j;
 	ba_switch_t *ba_switch = NULL, *curr_ba_switch = NULL;
-	ba_node_t *ba_node = NULL, *curr_ba_node = NULL;
+	ba_mp_t *ba_node = NULL, *curr_ba_node = NULL;
 	ListIterator itr = NULL;
 
 	if (!nodes)
@@ -1496,8 +793,7 @@ extern int check_and_set_node_list(List nodes)
 /* 		     ba_node->coord[Y], */
 /* 		     ba_node->coord[Z]); */
 
-		curr_ba_node = &ba_system_ptr->
-			grid[ba_node->coord[X]]
+		curr_ba_node = &ba_main_grid[ba_node->coord[X]]
 			[ba_node->coord[Y]]
 			[ba_node->coord[Z]];
 
@@ -1525,7 +821,12 @@ extern int check_and_set_node_list(List nodes)
 		}
 
 		if (ba_node->used)
-			curr_ba_node->used = true;
+			curr_ba_node->used = ba_node->used;
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("check_and_set_mp_list: "
+			     "%s is used ?= %d %d",
+			     curr_ba_node->coord_str,
+			     curr_ba_node->used, ba_node->used);
 		for(i=0; i<cluster_dims; i++) {
 			ba_switch = &ba_node->axis_switch[i];
 			curr_ba_switch = &curr_ba_node->axis_switch[i];
@@ -1556,1208 +857,183 @@ extern int check_and_set_node_list(List nodes)
 					goto end_it;
 				}
 				if (!ba_switch->int_wire[j].used)
-					continue;
-
-				/* info("setting %c%c%c dim %d port %d -> %d",*/
-/* 				     alpha_num[ba_node->coord[X]],  */
-/* 				     alpha_num[ba_node->coord[Y]], */
-/* 				     alpha_num[ba_node->coord[Z]],  */
-/* 				     i, */
-/* 				     j, */
-/* 				     ba_switch->int_wire[j].port_tar); */
-				curr_ba_switch->int_wire[j].used = 1;
-				curr_ba_switch->int_wire[j].port_tar
-					= ba_switch->int_wire[j].port_tar;
-			}
-		}
-	}
-	rc = SLURM_SUCCESS;
-end_it:
-	list_iterator_destroy(itr);
-#endif
-	return rc;
-}
-
-/*
- * Used to find, and set up midplanes and the wires in the virtual
- * system and return them in List results
- *
- * IN/OUT results - a list with a NULL destroyer filled in with
- *        midplanes and wires set to create the block with the api. If
- *        only interested in the hostlist NULL can be excepted also.
- * IN start - where to start the allocation.
- * IN geometry - the requested geometry of the block.
- * IN conn_type - mesh, torus, or small.
- *
- * RET char * - hostlist of midplanes results represent must be
- *     xfreed.  NULL on failure
- */
-extern char *set_bg_block(List results, uint16_t *start,
-			  uint16_t *geometry, int conn_type)
-{
-	char *name = NULL;
-	ba_node_t* ba_node = NULL;
-	int size = 0;
-	int send_results = 0;
-	int found = 0;
-
-
-	if (cluster_dims == 1) {
-		if (start[X]>=DIM_SIZE[X])
-			return NULL;
-		size = geometry[X];
-		ba_node = &ba_system_ptr->grid[start[X]][0][0];
-	} else {
-		if (start[X]>=DIM_SIZE[X]
-		    || start[Y]>=DIM_SIZE[Y]
-		    || start[Z]>=DIM_SIZE[Z])
-			return NULL;
-
-		if (geometry[X] <= 0 || geometry[Y] <= 0 || geometry[Z] <= 0) {
-			error("problem with geometry %c%c%c, needs to be "
-			      "at least 111",
-			      alpha_num[geometry[X]],
-			      alpha_num[geometry[Y]],
-			      alpha_num[geometry[Z]]);
-			return NULL;
-		}
-		/* info("looking at %d%d%d", geometry[X], */
-		/*      geometry[Y], geometry[Z]); */
-		size = geometry[X] * geometry[Y] * geometry[Z];
-		ba_node = &ba_system_ptr->grid[start[X]][start[Y]][start[Z]];
-	}
-
-	if (!ba_node)
-		return NULL;
-
-	if (!results)
-		results = list_create(NULL);
-	else
-		send_results = 1;
-	/* This midplane should have already been checked if it was in
-	   use or not */
-	list_append(results, ba_node);
-	if (conn_type >= SELECT_SMALL) {
-		/* adding the ba_node and ending */
-		ba_node->used = true;
-		name = xstrdup_printf("%c%c%c",
-				      alpha_num[ba_node->coord[X]],
-				      alpha_num[ba_node->coord[Y]],
-				      alpha_num[ba_node->coord[Z]]);
-		if (ba_node->letter == '.') {
-			ba_node->letter = letters[color_count%62];
-			ba_node->color = colors[color_count%6];
-			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-				info("count %d setting letter = %c "
-				     "color = %d",
-				     color_count,
-				     ba_node->letter,
-				     ba_node->color);
-			color_count++;
-		}
-		goto end_it;
-	}
-	found = _find_x_path(results, ba_node,
-			     ba_node->coord,
-			     geometry[X],
-			     1,
-			     conn_type, BLOCK_ALGO_FIRST);
-
-	if (!found) {
-		debug2("trying less efficient code");
-		remove_block(results, color_count, conn_type);
-		list_delete_all(results, &empty_null_destroy_list, "");
-		list_append(results, ba_node);
-		found = _find_x_path(results, ba_node,
-				     ba_node->coord,
-				     geometry[X],
-				     1,
-				     conn_type, BLOCK_ALGO_SECOND);
-	}
-	if (found) {
-		if (cluster_flags & CLUSTER_FLAG_BG) {
-			List start_list = NULL;
-			ListIterator itr;
-
-			start_list = list_create(NULL);
-			itr = list_iterator_create(results);
-			while ((ba_node = (ba_node_t*) list_next(itr))) {
-				list_append(start_list, ba_node);
-			}
-			list_iterator_destroy(itr);
-
-			if (!_fill_in_coords(results,
-					     start_list,
-					     geometry,
-					     conn_type)) {
-				list_destroy(start_list);
-				goto end_it;
-			}
-			list_destroy(start_list);
-		}
-	} else {
-		goto end_it;
-	}
-
-	name = _set_internal_wires(results,
-				   size,
-				   conn_type);
-end_it:
-	if (!send_results && results) {
-		list_destroy(results);
-		results = NULL;
-	}
-	if (name!=NULL) {
-		debug2("name = %s", name);
-	} else {
-		debug2("can't allocate");
-		xfree(name);
-	}
-
-	return name;
-}
-
-/*
- * Resets the virtual system to a virgin state.  If track_down_nodes is set
- * then those midplanes are not set to idle, but kept in a down state.
- */
-extern int reset_ba_system(bool track_down_nodes)
-{
-	int x, y, z;
-	uint16_t coord[cluster_dims];
-
-	for (x = 0; x < DIM_SIZE[X]; x++) {
-		for (y = 0; y < DIM_SIZE[Y]; y++)
-			for (z = 0; z < DIM_SIZE[Z]; z++) {
-				coord[X] = x;
-				coord[Y] = y;
-				coord[Z] = z;
-				_new_ba_node(&ba_system_ptr->grid[x][y][z],
-					     coord, track_down_nodes);
-			}
-	}
-
-	return 1;
-}
-
-/*
- * Used to set all midplanes in a special used state except the ones
- * we are able to use in a new allocation.
- *
- * IN: hostlist of midplanes we do not want
- * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
- *
- * Note: Need to call reset_all_removed_bps before starting another
- * allocation attempt after
- */
-extern int removable_set_bps(char *bps)
-{
-#ifdef HAVE_BG_L_P
-	int j=0, number;
-	int x;
-	int y,z;
-	int start[cluster_dims];
-        int end[cluster_dims];
-
-	if (!bps)
-		return SLURM_ERROR;
-
-	while (bps[j] != '\0') {
-		if ((bps[j] == '[' || bps[j] == ',')
-		    && (bps[j+8] == ']' || bps[j+8] == ',')
-		    && (bps[j+4] == 'x' || bps[j+4] == '-')) {
-
-			j++;
-			number = xstrntol(bps + j, &p, cluster_dims,
-					  cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, cluster_dims, cluster_base);
-			j += 4;
-			number = xstrntol(bps + j, &p, cluster_dims,
-					  cluster_base);
-			hostlist_parse_int_to_array(
-				number, end, cluster_dims, cluster_base);
-			j += 3;
-			for (x = start[X]; x <= end[X]; x++) {
-				for (y = start[Y]; y <= end[Y]; y++) {
-					for (z = start[Z]; z <= end[Z]; z++) {
-						if (!ba_system_ptr->
-						    grid[x][y][z].used)
-							ba_system_ptr->
-								grid[x][y][z]
-								.used = 2;
-					}
-				}
-			}
-
-			if (bps[j] != ',')
-				break;
-			j--;
-		} else if ((bps[j] >= '0' && bps[j] <= '9')
-			   || (bps[j] >= 'A' && bps[j] <= 'Z')) {
-			number = xstrntol(bps + j, &p, cluster_dims,
-					  cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, cluster_dims, cluster_base);
-			x = start[X];
-			y = start[Y];
-			z = start[Z];
-			j+=3;
-			if (!ba_system_ptr->grid[x][y][z].used)
-				ba_system_ptr->grid[x][y][z].used = 2;
-
-			if (bps[j] != ',')
-				break;
-			j--;
-		}
-		j++;
-	}
-#endif
- 	return SLURM_SUCCESS;
-}
-
-/*
- * Resets the virtual system to the pervious state before calling
- * removable_set_bps, or set_all_bps_except.
- */
-extern int reset_all_removed_bps()
-{
-	int x, y, z;
-
-	for (x = 0; x < DIM_SIZE[X]; x++) {
-		for (y = 0; y < DIM_SIZE[Y]; y++)
-			for (z = 0; z < DIM_SIZE[Z]; z++)
-				if (ba_system_ptr->grid[x][y][z].used == 2)
-					ba_system_ptr->grid[x][y][z].used = 0;
-	}
-	return SLURM_SUCCESS;
-}
-
-/*
- * IN: hostlist of midplanes we want to be able to use, mark all
- *     others as used.
- * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
- *
- * Need to call reset_all_removed_bps before starting another
- * allocation attempt if possible use removable_set_bps since it is
- * faster. It does basically the opposite of this function. If you
- * have to come up with this list though it is faster to use this
- * function than if you have to call bitmap2node_name since that is slow.
- */
-extern int set_all_bps_except(char *bps)
-{
-	int x, y, z;
-	hostlist_t hl = hostlist_create(bps);
-	char *host = NULL, *numeric = NULL;
-	int number, coords[HIGHEST_DIMENSIONS];
-
-	memset(coords, 0, sizeof(coords));
-
-	while ((host = hostlist_shift(hl))){
-		numeric = host;
-		number = 0;
-		while (numeric) {
-			if (numeric[0] < '0' || numeric[0] > 'Z'
-			    || (numeric[0] > '9'
-				&& numeric[0] < 'A')) {
-				numeric++;
-				continue;
-			}
-			number = xstrntol(numeric, &p, cluster_dims,
-					  cluster_base);
-			break;
-		}
-		hostlist_parse_int_to_array(
-			number, coords, cluster_dims, cluster_base);
-		ba_system_ptr->grid[coords[X]][coords[Y]][coords[Z]].state
-			|= NODE_RESUME;
-		free(host);
-	}
-	hostlist_destroy(hl);
-
-	for (x = 0; x < DIM_SIZE[X]; x++) {
-		for (y = 0; y < DIM_SIZE[Y]; y++)
-			for (z = 0; z < DIM_SIZE[Z]; z++) {
-				if (ba_system_ptr->grid[x][y][z].state
-				    & NODE_RESUME) {
-					/* clear the bit and mark as unused */
-					ba_system_ptr->grid[x][y][z].state &=
-						~NODE_RESUME;
-				} else if (!ba_system_ptr->grid[x][y][z].used) {
-					ba_system_ptr->grid[x][y][z].used = 2;
-				}
-			}
-	}
-
- 	return SLURM_SUCCESS;
-}
-
-/*
- * set values of every grid point (used in smap)
- */
-extern void init_grid(node_info_msg_t * node_info_ptr)
-{
-	int i = 0, j, x, y, z;
-	ba_node_t *ba_node = NULL;
-	char *host;
-
-	if (!node_info_ptr) {
-		for (x = 0; x < DIM_SIZE[X]; x++) {
-			for (y = 0; y < DIM_SIZE[Y]; y++) {
-				for (z = 0; z < DIM_SIZE[Z]; z++) {
-					ba_node = &ba_system_ptr->grid[x][y][z];
-					ba_node->color = 7;
-					ba_node->letter = '.';
-					ba_node->state = NODE_STATE_IDLE;
-					ba_node->index = i++;
-				}
-			}
-		}
-		return;
-	}
-
-	for (j = 0; j < node_info_ptr->record_count; j++) {
-		node_info_t *node_ptr = &node_info_ptr->node_array[j];
-		host = node_ptr->name;
-		if (!host)
-			continue;
-		if (cluster_dims == 1) {
-			x = j;
-			y = 0;
-			z = 0;
-		} else {
-			if ((i = strlen(host)) < 3)
-				continue;
-			x = _coord(host[i-3]);
-			y = _coord(host[i-2]);
-			z = _coord(host[i-1]);
-		}
-
-		if ((x < 0) || (y < 0) || (z < 0))
-			continue;
-
-		ba_node = &ba_system_ptr->grid[x][y][z];
-		ba_node->index = j;
-		if (IS_NODE_DOWN(node_ptr) || IS_NODE_DRAIN(node_ptr)) {
-			ba_node->color = 0;
-			ba_node->letter = '#';
-			if (_initialized)
-				ba_update_node_state(
-					ba_node, node_ptr->node_state);
-		} else {
-			ba_node->color = 7;
-			ba_node->letter = '.';
-		}
-		ba_node->state = node_ptr->node_state;
-	}
-}
-
-/*
- * Convert a BG API error code to a string
- * IN inx - error code from any of the BG Bridge APIs
- * RET - string describing the error condition
- */
-extern char *bg_err_str(status_t inx)
-{
-#ifdef HAVE_BG_FILES
-	switch (inx) {
-	case STATUS_OK:
-		return "Status OK";
-	case PARTITION_NOT_FOUND:
-		return "Partition not found";
-	case JOB_NOT_FOUND:
-		return "Job not found";
-	case BP_NOT_FOUND:
-		return "Base partition not found";
-	case SWITCH_NOT_FOUND:
-		return "Switch not found";
-#ifndef HAVE_BGL
-	case PARTITION_ALREADY_DEFINED:
-		return "Partition already defined";
-#endif
-	case JOB_ALREADY_DEFINED:
-		return "Job already defined";
-	case CONNECTION_ERROR:
-		return "Connection error";
-	case INTERNAL_ERROR:
-		return "Internal error";
-	case INVALID_INPUT:
-		return "Invalid input";
-	case INCOMPATIBLE_STATE:
-		return "Incompatible state";
-	case INCONSISTENT_DATA:
-		return "Inconsistent data";
-	}
-#endif
-
-	return "?";
-}
-
-/*
- * Set up the map for resolving
- */
-extern int set_bp_map(void)
-{
-#ifdef HAVE_BG_FILES
-	int rc;
-	rm_BP_t *my_bp = NULL;
-	ba_bp_map_t *bp_map = NULL;
-	int bp_num, i;
-	char *bp_id = NULL;
-	rm_location_t bp_loc;
-
-	if (_bp_map_initialized)
-		return 1;
-
-	bp_map_list = list_create(_bp_map_list_del);
-
-	if (!have_db2) {
-		error("Can't access DB2 library, run from service node");
-		return -1;
-	}
-
-#ifdef HAVE_BGL
-	if (!getenv("DB2INSTANCE") || !getenv("VWSPATH")) {
-		error("Missing DB2INSTANCE or VWSPATH env var.  "
-		      "Execute 'db2profile'");
-		return -1;
-	}
-#endif
-
-	if (!bg) {
-		if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
-			error("bridge_get_BG(): %d", rc);
-			return -1;
-		}
-	}
-
-	if ((rc = bridge_get_data(bg, RM_BPNum, &bp_num)) != STATUS_OK) {
-		error("bridge_get_data(RM_BPNum): %d", rc);
-		bp_num = 0;
-	}
-
-	for (i=0; i<bp_num; i++) {
-
-		if (i) {
-			if ((rc = bridge_get_data(bg, RM_NextBP, &my_bp))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_NextBP): %d", rc);
-				break;
-			}
-		} else {
-			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_bp))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_FirstBP): %d", rc);
-				break;
-			}
-		}
-
-		bp_map = (ba_bp_map_t *) xmalloc(sizeof(ba_bp_map_t));
-
-		if ((rc = bridge_get_data(my_bp, RM_BPID, &bp_id))
-		    != STATUS_OK) {
-			xfree(bp_map);
-			error("bridge_get_data(RM_BPID): %d", rc);
-			continue;
-		}
-
-		if (!bp_id) {
-			error("No BP ID was returned from database");
-			continue;
-		}
-
-		if ((rc = bridge_get_data(my_bp, RM_BPLoc, &bp_loc))
-		    != STATUS_OK) {
-			xfree(bp_map);
-			error("bridge_get_data(RM_BPLoc): %d", rc);
-			continue;
-		}
-
-		bp_map->bp_id = xstrdup(bp_id);
-		bp_map->coord[X] = bp_loc.X;
-		bp_map->coord[Y] = bp_loc.Y;
-		bp_map->coord[Z] = bp_loc.Z;
-
-		list_push(bp_map_list, bp_map);
-
-		free(bp_id);
-	}
-#endif
-	_bp_map_initialized = true;
-	return 1;
-
-}
-
-/*
- * find a base blocks bg location
- */
-extern uint16_t *find_bp_loc(char* bp_id)
-{
-#ifdef HAVE_BG_FILES
-	ba_bp_map_t *bp_map = NULL;
-	ListIterator itr;
-	char *check = NULL;
-
-	if (!bp_map_list) {
-		if (set_bp_map() == -1)
-			return NULL;
-	}
-
-	check = xstrdup(bp_id);
-	/* with BGP they changed the names of the rack midplane action from
-	 * R000 to R00-M0 so we now support both formats for each of the
-	 * systems */
-#ifdef HAVE_BGL
-	if (check[3] == '-') {
-		if (check[5]) {
-			check[3] = check[5];
-			check[4] = '\0';
-		}
-	}
-
-	if ((check[1] < '0' || check[1] > '9')
-	    || (check[2] < '0' || check[2] > '9')
-	    || (check[3] < '0' || check[3] > '9')) {
-		error("%s is not a valid Rack-Midplane (i.e. R000)", bp_id);
-		goto cleanup;
-	}
-
-#else
-	if (check[3] != '-') {
-		xfree(check);
-		check = xstrdup_printf("R%c%c-M%c",
-				       bp_id[1], bp_id[2], bp_id[3]);
-	}
-
-	if ((check[1] < '0' || check[1] > '9')
-	    || (check[2] < '0' || check[2] > '9')
-	    || (check[5] < '0' || check[5] > '9')) {
-		error("%s is not a valid Rack-Midplane (i.e. R00-M0)", bp_id);
-		goto cleanup;
-	}
-#endif
-
-	itr = list_iterator_create(bp_map_list);
-	while ((bp_map = list_next(itr)))
-		if (!strcasecmp(bp_map->bp_id, check))
-			break;	/* we found it */
-	list_iterator_destroy(itr);
-
-cleanup:
-	xfree(check);
-
-	if (bp_map != NULL)
-		return bp_map->coord;
-	else
-		return NULL;
-
-#else
-	return NULL;
-#endif
-}
-
-/*
- * find a rack/midplace location
- */
-extern char *find_bp_rack_mid(char* xyz)
-{
-#ifdef HAVE_BG_FILES
-	ba_bp_map_t *bp_map = NULL;
-	ListIterator itr;
-	int number;
-	int coord[cluster_dims];
-	int len = strlen(xyz);
-
-	len -= 3;
-	if (len<0)
-		return NULL;
-
-	if ((xyz[len] < '0' || xyz[len] > '9')
-	    || (xyz[len+1] < '0' || xyz[len+1] > '9')
-	    || (xyz[len+2] < '0' || xyz[len+2] > '9')) {
-		error("%s is not a valid Location (i.e. 000)", xyz);
-		return NULL;
-	}
-
-	number = xstrntol(xyz + len, &p, cluster_dims, cluster_base);
-	hostlist_parse_int_to_array(number, coord, cluster_dims, cluster_base);
-
-	if (!bp_map_list) {
-		if (set_bp_map() == -1)
-			return NULL;
-	}
-
-	itr = list_iterator_create(bp_map_list);
-	while ((bp_map = list_next(itr)) != NULL)
-		if (bp_map->coord[X] == coord[X] &&
-		    bp_map->coord[Y] == coord[Y] &&
-		    bp_map->coord[Z] == coord[Z])
-			break;	/* we found it */
-
-	list_iterator_destroy(itr);
-	if (bp_map != NULL)
-		return bp_map->bp_id;
-	else
-		return NULL;
-
-#else
-	return NULL;
-#endif
-}
-
-/*
- * set the used wires in the virtual system for a block from the real system
- */
-extern int load_block_wiring(char *bg_block_id)
-{
-#ifdef HAVE_BG_FILES
-	int rc, i, j;
-	rm_partition_t *block_ptr = NULL;
-	int cnt = 0;
-	int switch_cnt = 0;
-	rm_switch_t *curr_switch = NULL;
-	rm_BP_t *curr_bp = NULL;
-	char *switchid = NULL;
-	rm_connection_t curr_conn;
-	int dim;
-	ba_switch_t *ba_switch = NULL;
-	uint16_t *geo = NULL;
-
-	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-		info("getting info for block %s", bg_block_id);
-
-	if ((rc = bridge_get_block(bg_block_id,  &block_ptr)) != STATUS_OK) {
-		error("bridge_get_block(%s): %s",
-		      bg_block_id,
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-
-	if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
-				  &switch_cnt)) != STATUS_OK) {
-		error("bridge_get_data(RM_PartitionSwitchNum): %s",
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-	if (!switch_cnt) {
-		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-			info("no switch_cnt");
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionFirstBP,
-					  &curr_bp))
-		    != STATUS_OK) {
-			error("bridge_get_data: "
-			      "RM_PartitionFirstBP: %s",
-			      bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-		if ((rc = bridge_get_data(curr_bp, RM_BPID, &switchid))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-
-		geo = find_bp_loc(switchid);
-		if (!geo) {
-			error("find_bp_loc: bpid %s not known", switchid);
-			return SLURM_ERROR;
-		}
-		ba_system_ptr->grid[geo[X]][geo[Y]][geo[Z]].used = true;
-		return SLURM_SUCCESS;
-	}
-	for (i=0; i<switch_cnt; i++) {
-		if (i) {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionNextSwitch,
-						  &curr_switch))
-			    != STATUS_OK) {
-				error("bridge_get_data: "
-				      "RM_PartitionNextSwitch: %s",
-				      bg_err_str(rc));
-				return SLURM_ERROR;
-			}
-		} else {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionFirstSwitch,
-						  &curr_switch))
-			    != STATUS_OK) {
-				error("bridge_get_data: "
-				      "RM_PartitionFirstSwitch: %s",
-				      bg_err_str(rc));
-				return SLURM_ERROR;
-			}
-		}
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchDim, &dim))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchDim: %s",
-			      bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchBPID,
-					  &switchid))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-
-		geo = find_bp_loc(switchid);
-		if (!geo) {
-			error("find_bp_loc: bpid %s not known", switchid);
-			return SLURM_ERROR;
-		}
-
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchConnNum, &cnt))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-			info("switch id = %s dim %d conns = %d",
-			     switchid, dim, cnt);
-		ba_switch = &ba_system_ptr->
-			grid[geo[X]][geo[Y]][geo[Z]].axis_switch[dim];
-		for (j=0; j<cnt; j++) {
-			if (j) {
-				if ((rc = bridge_get_data(
-					     curr_switch,
-					     RM_SwitchNextConnection,
-					     &curr_conn))
-				    != STATUS_OK) {
-					error("bridge_get_data: "
-					      "RM_SwitchNextConnection: %s",
-					      bg_err_str(rc));
-					return SLURM_ERROR;
-				}
-			} else {
-				if ((rc = bridge_get_data(
-					     curr_switch,
-					     RM_SwitchFirstConnection,
-					     &curr_conn))
-				    != STATUS_OK) {
-					error("bridge_get_data: "
-					      "RM_SwitchFirstConnection: %s",
-					      bg_err_str(rc));
-					return SLURM_ERROR;
-				}
-			}
-			switch(curr_conn.p1) {
-			case RM_PORT_S1:
-				curr_conn.p1 = 1;
-				break;
-			case RM_PORT_S2:
-				curr_conn.p1 = 2;
-				break;
-			case RM_PORT_S4:
-				curr_conn.p1 = 4;
-				break;
-			default:
-				error("1 unknown port %d",
-				      _port_enum(curr_conn.p1));
-				return SLURM_ERROR;
-			}
-
-			switch(curr_conn.p2) {
-			case RM_PORT_S0:
-				curr_conn.p2 = 0;
-				break;
-			case RM_PORT_S3:
-				curr_conn.p2 = 3;
-				break;
-			case RM_PORT_S5:
-				curr_conn.p2 = 5;
-				break;
-			default:
-				error("2 unknown port %d",
-				      _port_enum(curr_conn.p2));
-				return SLURM_ERROR;
-			}
-
-			if (curr_conn.p1 == 1 && dim == X) {
-				if (ba_system_ptr->
-				    grid[geo[X]][geo[Y]][geo[Z]].used) {
-					debug("I have already been to "
-					      "this node %c%c%c",
-					      alpha_num[geo[X]],
-					      alpha_num[geo[Y]],
-					      alpha_num[geo[Z]]);
-					return SLURM_ERROR;
-				}
-				ba_system_ptr->grid[geo[X]][geo[Y]][geo[Z]].
-					used = true;
-			}
-			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-				info("connection going from %d -> %d",
-				     curr_conn.p1, curr_conn.p2);
-
-			if (ba_switch->int_wire[curr_conn.p1].used) {
-				debug("%c%c%c dim %d port %d "
-				      "is already in use",
-				      alpha_num[geo[X]],
-				      alpha_num[geo[Y]],
-				      alpha_num[geo[Z]],
-				      dim,
-				      curr_conn.p1);
-				return SLURM_ERROR;
-			}
-			ba_switch->int_wire[curr_conn.p1].used = 1;
-			ba_switch->int_wire[curr_conn.p1].port_tar
-				= curr_conn.p2;
-
-			if (ba_switch->int_wire[curr_conn.p2].used) {
-				debug("%c%c%c dim %d port %d "
-				      "is already in use",
-				      alpha_num[geo[X]],
-				      alpha_num[geo[Y]],
-				      alpha_num[geo[Z]],
-				      dim,
-				      curr_conn.p2);
-				return SLURM_ERROR;
+					continue;
+
+				/* info("setting %c%c%c dim %d port %d -> %d",*/
+/* 				     alpha_num[ba_node->coord[X]],  */
+/* 				     alpha_num[ba_node->coord[Y]], */
+/* 				     alpha_num[ba_node->coord[Z]],  */
+/* 				     i, */
+/* 				     j, */
+/* 				     ba_switch->int_wire[j].port_tar); */
+				curr_ba_switch->int_wire[j].used = 1;
+				curr_ba_switch->int_wire[j].port_tar
+					= ba_switch->int_wire[j].port_tar;
 			}
-			ba_switch->int_wire[curr_conn.p2].used = 1;
-			ba_switch->int_wire[curr_conn.p2].port_tar
-				= curr_conn.p1;
 		}
 	}
-	return SLURM_SUCCESS;
-
-#else
-	return SLURM_ERROR;
+	rc = SLURM_SUCCESS;
+end_it:
+	list_iterator_destroy(itr);
 #endif
-
+	return rc;
 }
 
 /*
- * get the used wires for a block out of the database and return the
- * node list.  The block_ptr here must be gotten with bridge_get_block
- * not bridge_get_block_info, if you are looking to recover from
- * before.  If you are looking to start clean it doesn't matter.
+ * Used to find, and set up midplanes and the wires in the virtual
+ * system and return them in List results
+ *
+ * IN/OUT results - a list with a NULL destroyer filled in with
+ *        midplanes and wires set to create the block with the api. If
+ *        only interested in the hostlist NULL can be excepted also.
+ * IN start - where to start the allocation.
+ * IN geometry - the requested geometry of the block.
+ * IN conn_type - mesh, torus, or small.
+ *
+ * RET char * - hostlist of midplanes results represent must be
+ *     xfreed.  NULL on failure
  */
-extern List get_and_set_block_wiring(char *bg_block_id,
-				     rm_partition_t *block_ptr)
+extern char *set_bg_block(List results, uint16_t *start,
+			  uint16_t *geometry, uint16_t *conn_type)
 {
-#ifdef HAVE_BG_FILES
-	int rc, i, j;
-	int cnt = 0;
-	int switch_cnt = 0;
-	rm_switch_t *curr_switch = NULL;
-	rm_BP_t *curr_bp = NULL;
-	char *switchid = NULL;
-	rm_connection_t curr_conn;
-	int dim;
-	ba_node_t *ba_node = NULL;
-	ba_switch_t *ba_switch = NULL;
-	uint16_t *geo = NULL;
-	List results = list_create(destroy_ba_node);
-	ListIterator itr = NULL;
+	char *name = NULL;
+	ba_mp_t* ba_node = NULL;
+	int size = 0;
+	int send_results = 0;
+	int found = 0;
 
-	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-		info("getting info for block %s", bg_block_id);
 
-	if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
-				  &switch_cnt)) != STATUS_OK) {
-		error("bridge_get_data(RM_PartitionSwitchNum): %s",
-		      bg_err_str(rc));
-		goto end_it;
-	}
-	if (!switch_cnt) {
-		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-			info("no switch_cnt");
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionFirstBP,
-					  &curr_bp))
-		    != STATUS_OK) {
-			error("bridge_get_data: "
-			      "RM_PartitionFirstBP: %s",
-			      bg_err_str(rc));
-			goto end_it;
-		}
-		if ((rc = bridge_get_data(curr_bp, RM_BPID, &switchid))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			goto end_it;
-		}
+	if (cluster_dims == 1) {
+		if (start[X]>=DIM_SIZE[X])
+			return NULL;
+		size = geometry[X];
+		ba_node = &ba_main_grid[start[X]][0][0];
+	} else {
+		if (start[X]>=DIM_SIZE[X]
+		    || start[Y]>=DIM_SIZE[Y]
+		    || start[Z]>=DIM_SIZE[Z])
+			return NULL;
 
-		geo = find_bp_loc(switchid);
-		if (!geo) {
-			error("find_bp_loc: bpid %s not known", switchid);
-			goto end_it;
+		if (geometry[X] <= 0 || geometry[Y] <= 0 || geometry[Z] <= 0) {
+			error("problem with geometry %c%c%c, needs to be "
+			      "at least 111",
+			      alpha_num[geometry[X]],
+			      alpha_num[geometry[Y]],
+			      alpha_num[geometry[Z]]);
+			return NULL;
 		}
-		ba_node = xmalloc(sizeof(ba_node_t));
-		list_push(results, ba_node);
-		ba_node->coord[X] = geo[X];
-		ba_node->coord[Y] = geo[Y];
-		ba_node->coord[Z] = geo[Z];
-
-		ba_node->used = TRUE;
-		return results;
+		/* info("looking at %d%d%d", geometry[X], */
+		/*      geometry[Y], geometry[Z]); */
+		size = geometry[X] * geometry[Y] * geometry[Z];
+		ba_node = &ba_main_grid[start[X]][start[Y]][start[Z]];
 	}
-	for (i=0; i<switch_cnt; i++) {
-		if (i) {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionNextSwitch,
-						  &curr_switch))
-			    != STATUS_OK) {
-				error("bridge_get_data: "
-				      "RM_PartitionNextSwitch: %s",
-				      bg_err_str(rc));
-				goto end_it;
-			}
-		} else {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionFirstSwitch,
-						  &curr_switch))
-			    != STATUS_OK) {
-				error("bridge_get_data: "
-				      "RM_PartitionFirstSwitch: %s",
-				      bg_err_str(rc));
-				goto end_it;
-			}
-		}
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchDim, &dim))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchDim: %s",
-			      bg_err_str(rc));
-			goto end_it;
-		}
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchBPID,
-					  &switchid))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			goto end_it;
-		}
 
-		geo = find_bp_loc(switchid);
-		if (!geo) {
-			error("find_bp_loc: bpid %s not known", switchid);
-			goto end_it;
-		}
+	if (!ba_node)
+		return NULL;
 
-		if ((rc = bridge_get_data(curr_switch, RM_SwitchConnNum, &cnt))
-		    != STATUS_OK) {
-			error("bridge_get_data: RM_SwitchBPID: %s",
-			      bg_err_str(rc));
-			goto end_it;
-		}
-		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-			info("switch id = %s dim %d conns = %d",
-			     switchid, dim, cnt);
+	if (!results)
+		results = list_create(NULL);
+	else
+		send_results = 1;
 
-		itr = list_iterator_create(results);
-		while ((ba_node = list_next(itr))) {
-			if (ba_node->coord[X] == geo[X] &&
-			    ba_node->coord[Y] == geo[Y] &&
-			    ba_node->coord[Z] == geo[Z])
-				break;	/* we found it */
-		}
-		list_iterator_destroy(itr);
-		if (!ba_node) {
-			ba_node = xmalloc(sizeof(ba_node_t));
+	/* This midplane should have already been checked if it was in
+	   use or not */
+	list_append(results, ba_node);
+	if (conn_type[0] >= SELECT_SMALL) {
+		/* adding the ba_node and ending */
+		ba_node->used |= BA_MP_USED_TRUE;
+		name = xstrdup_printf("%s", ba_node->coord_str);
+		goto end_it;
+	}
+	found = _find_x_path(results, ba_node,
+			     ba_node->coord,
+			     geometry[X],
+			     1,
+			     conn_type[0], BLOCK_ALGO_FIRST);
 
-			list_push(results, ba_node);
-			ba_node->coord[X] = geo[X];
-			ba_node->coord[Y] = geo[Y];
-			ba_node->coord[Z] = geo[Z];
-		}
-		ba_switch = &ba_node->axis_switch[dim];
-		for (j=0; j<cnt; j++) {
-			if (j) {
-				if ((rc = bridge_get_data(
-					     curr_switch,
-					     RM_SwitchNextConnection,
-					     &curr_conn))
-				    != STATUS_OK) {
-					error("bridge_get_data: "
-					      "RM_SwitchNextConnection: %s",
-					      bg_err_str(rc));
-					goto end_it;
-				}
-			} else {
-				if ((rc = bridge_get_data(
-					     curr_switch,
-					     RM_SwitchFirstConnection,
-					     &curr_conn))
-				    != STATUS_OK) {
-					error("bridge_get_data: "
-					      "RM_SwitchFirstConnection: %s",
-					      bg_err_str(rc));
-					goto end_it;
-				}
-			}
-			switch(curr_conn.p1) {
-			case RM_PORT_S1:
-				curr_conn.p1 = 1;
-				break;
-			case RM_PORT_S2:
-				curr_conn.p1 = 2;
-				break;
-			case RM_PORT_S4:
-				curr_conn.p1 = 4;
-				break;
-			default:
-				error("1 unknown port %d",
-				      _port_enum(curr_conn.p1));
-				goto end_it;
-			}
+	if (!found) {
+		bool is_small = 0;
+		if (conn_type[0] == SELECT_SMALL)
+			is_small = 1;
+		debug2("trying less efficient code");
+		remove_block(results, is_small);
+		list_flush(results);
+		list_append(results, ba_node);
+		found = _find_x_path(results, ba_node,
+				     ba_node->coord,
+				     geometry[X],
+				     1,
+				     conn_type[0], BLOCK_ALGO_SECOND);
+	}
+	if (found) {
+		if (cluster_flags & CLUSTER_FLAG_BG) {
+			List start_list = NULL;
+			ListIterator itr;
 
-			switch(curr_conn.p2) {
-			case RM_PORT_S0:
-				curr_conn.p2 = 0;
-				break;
-			case RM_PORT_S3:
-				curr_conn.p2 = 3;
-				break;
-			case RM_PORT_S5:
-				curr_conn.p2 = 5;
-				break;
-			default:
-				error("2 unknown port %d",
-				      _port_enum(curr_conn.p2));
-				goto end_it;
+			start_list = list_create(NULL);
+			itr = list_iterator_create(results);
+			while ((ba_node = (ba_mp_t*) list_next(itr))) {
+				list_append(start_list, ba_node);
 			}
+			list_iterator_destroy(itr);
 
-			if (curr_conn.p1 == 1 && dim == X) {
-				if (ba_node->used) {
-					debug("I have already been to "
-					      "this node %c%c%c",
-					      alpha_num[geo[X]],
-					      alpha_num[geo[Y]],
-					      alpha_num[geo[Z]]);
-					goto end_it;
-				}
-				ba_node->used = true;
-			}
-			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-				info("connection going from %d -> %d",
-				     curr_conn.p1, curr_conn.p2);
-
-			if (ba_switch->int_wire[curr_conn.p1].used) {
-				debug("%c%c%c dim %d port %d "
-				      "is already in use",
-				      alpha_num[geo[X]],
-				      alpha_num[geo[Y]],
-				      alpha_num[geo[Z]],
-				      dim,
-				      curr_conn.p1);
-				goto end_it;
-			}
-			ba_switch->int_wire[curr_conn.p1].used = 1;
-			ba_switch->int_wire[curr_conn.p1].port_tar
-				= curr_conn.p2;
-
-			if (ba_switch->int_wire[curr_conn.p2].used) {
-				debug("%c%c%c dim %d port %d "
-				      "is already in use",
-				      alpha_num[geo[X]],
-				      alpha_num[geo[Y]],
-				      alpha_num[geo[Z]],
-				      dim,
-				      curr_conn.p2);
+			if (!_fill_in_coords(results,
+					     start_list,
+					     geometry,
+					     conn_type[0])) {
+				list_destroy(start_list);
 				goto end_it;
 			}
-			ba_switch->int_wire[curr_conn.p2].used = 1;
-			ba_switch->int_wire[curr_conn.p2].port_tar
-				= curr_conn.p1;
+			list_destroy(start_list);
 		}
+	} else {
+		goto end_it;
 	}
-	return results;
-end_it:
-	list_destroy(results);
-	return NULL;
-#else
-	return NULL;
-#endif
 
-}
-
-/* */
-extern int validate_coord(uint16_t *coord)
-{
-#ifdef HAVE_BG_FILES
-	if (coord[X]>=REAL_DIM_SIZE[X]
-	    || coord[Y]>=REAL_DIM_SIZE[Y]
-	    || coord[Z]>=REAL_DIM_SIZE[Z]) {
-		error("got coord %c%c%c greater than system dims "
-		      "%c%c%c",
-		      alpha_num[coord[X]],
-		      alpha_num[coord[Y]],
-		      alpha_num[coord[Z]],
-		      alpha_num[REAL_DIM_SIZE[X]],
-		      alpha_num[REAL_DIM_SIZE[Y]],
-		      alpha_num[REAL_DIM_SIZE[Z]]);
-		return 0;
+	name = _set_internal_wires(results,
+				   size,
+				   conn_type[0]);
+end_it:
+	if (!send_results && results) {
+		list_destroy(results);
+		results = NULL;
 	}
-
-	if (coord[X]>=DIM_SIZE[X]
-	    || coord[Y]>=DIM_SIZE[Y]
-	    || coord[Z]>=DIM_SIZE[Z]) {
-		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-			info("got coord %c%c%c greater than what we are using "
-			     "%c%c%c",
-			     alpha_num[coord[X]],
-			     alpha_num[coord[Y]],
-			     alpha_num[coord[Z]],
-			     alpha_num[DIM_SIZE[X]],
-			     alpha_num[DIM_SIZE[Y]],
-			     alpha_num[DIM_SIZE[Z]]);
-		return 0;
+	if (name!=NULL) {
+		debug2("name = %s", name);
+	} else {
+		debug2("can't allocate");
+		xfree(name);
 	}
-#endif
-	return 1;
-}
-
-
-/********************* Local Functions *********************/
-
-#ifdef HAVE_BG_FILES
-static void _bp_map_list_del(void *object)
-{
-	ba_bp_map_t *bp_map = (ba_bp_map_t *)object;
 
-	if (bp_map) {
-		xfree(bp_map->bp_id);
-		xfree(bp_map);
-	}
+	return name;
 }
 
-/* translation from the enum to the actual port number */
-static int _port_enum(int port)
+/* Rotate a 3-D geometry array through its six permutations */
+extern void ba_rotate_geo(uint16_t *req_geometry, int rot_cnt)
 {
-	switch(port) {
-	case RM_PORT_S0:
-		return 0;
-		break;
-	case RM_PORT_S1:
-		return 1;
-		break;
-	case RM_PORT_S2:
-		return 2;
-		break;
-	case RM_PORT_S3:
-		return 3;
-		break;
-	case RM_PORT_S4:
-		return 4;
+	uint16_t tmp;
+
+	switch (rot_cnt) {
+	case 0:		/* ABC -> ACB */
+	case 2:		/* CAB -> CBA */
+	case 4:		/* BCA -> BAC */
+		SWAP(req_geometry[Y], req_geometry[Z], tmp);
 		break;
-	case RM_PORT_S5:
-		return 5;
+	case 1:		/* ACB -> CAB */
+	case 3:		/* CBA -> BCA */
+	case 5:		/* BAC -> ABC */
+		SWAP(req_geometry[X], req_geometry[Y], tmp);
 		break;
-	default:
-		return -1;
 	}
 }
 
-#endif
+/********************* Local Functions *********************/
 
 /*
  * This function is here to check options for rotating and elongating
  * and set up the request based on the count of each option
  */
-static int _check_for_options(ba_request_t* ba_request)
+static int _check_for_options(select_ba_request_t* ba_request)
 {
 	int temp;
 	int set=0;
@@ -2892,8 +1168,8 @@ static int _append_geo(uint16_t *geometry, List geos, int rotate)
 static int _fill_in_coords(List results, List start_list,
 			   uint16_t *geometry, int conn_type)
 {
-	ba_node_t *ba_node = NULL;
-	ba_node_t *check_node = NULL;
+	ba_mp_t *ba_node = NULL;
+	ba_mp_t *check_node = NULL;
 	int rc = 1;
 	ListIterator itr = NULL;
 	int y=0, z=0;
@@ -2904,7 +1180,7 @@ static int _fill_in_coords(List results, List start_list,
 		return 0;
 	/* go through the start_list and add all the midplanes */
 	itr = list_iterator_create(start_list);
-	while ((check_node = (ba_node_t*) list_next(itr))) {
+	while ((check_node = (ba_mp_t*) list_next(itr))) {
 		curr_switch = &check_node->axis_switch[X];
 
 		for(y=0; y<geometry[Y]; y++) {
@@ -2917,7 +1193,7 @@ static int _fill_in_coords(List results, List start_list,
 					rc = 0;
 					goto failed;
 				}
-				ba_node = &ba_system_ptr->grid
+				ba_node = &ba_main_grid
 					[check_node->coord[X]]
 					[check_node->coord[Y]+y]
 					[check_node->coord[Z]+z];
@@ -2957,11 +1233,11 @@ static int _fill_in_coords(List results, List start_list,
 	}
 	list_iterator_destroy(itr);
 	itr = list_iterator_create(start_list);
-	check_node = (ba_node_t*) list_next(itr);
+	check_node = (ba_mp_t*) list_next(itr);
 	list_iterator_destroy(itr);
 
 	itr = list_iterator_create(results);
-	while ((ba_node = (ba_node_t*) list_next(itr))) {
+	while ((ba_node = (ba_mp_t*) list_next(itr))) {
 		if (!_find_yz_path(ba_node,
 				   check_node->coord,
 				   geometry,
@@ -2994,9 +1270,9 @@ failed:
  * starting port on a dimension.
  *
  * IN/OUT: nodes - Local list of midplanes you are keeping track of.  If
- *         you visit any new midplanes a copy from ba_system_grid
+ *         you visit any new midplanes a copy from ba_main_grid
  *         will be added to the list.  If NULL the path will be
- *         set in mark_switch of the main virtual system (ba_system_grid).
+ *         set in mark_switch of the main virtual system (ba_main_grid).
  * IN: curr_switch - The switch you want to copy the path of
  * IN/OUT: mark_switch - The switch you want to fill in.  On success
  *         this switch will contain a complete path from the curr_switch
@@ -3011,10 +1287,10 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 			  ba_switch_t *mark_switch,
 			  int source, int dim)
 {
-	uint16_t *node_tar;
-	uint16_t *mark_node_tar;
+	uint16_t *mp_tar;
+	uint16_t *mark_mp_tar;
 	uint16_t *node_curr;
-	int port_tar, port_tar1;
+	uint16_t port_tar, port_tar1;
 	ba_switch_t *next_switch = NULL;
 	ba_switch_t *next_mark_switch = NULL;
 
@@ -3034,8 +1310,8 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	port_tar1 = port_tar;
 
 	/* follow the path */
-	node_curr = curr_switch->ext_wire[0].node_tar;
-	node_tar = curr_switch->ext_wire[port_tar].node_tar;
+	node_curr = curr_switch->ext_wire[0].mp_tar;
+	mp_tar = curr_switch->ext_wire[port_tar].mp_tar;
 	if (mark_switch->int_wire[source].used)
 		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
 			info("setting dim %d %c%c%c %d-> %c%c%c %d",
@@ -3044,9 +1320,9 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 			     alpha_num[node_curr[Y]],
 			     alpha_num[node_curr[Z]],
 			     source,
-			     alpha_num[node_tar[X]],
-			     alpha_num[node_tar[Y]],
-			     alpha_num[node_tar[Z]],
+			     alpha_num[mp_tar[X]],
+			     alpha_num[mp_tar[Y]],
+			     alpha_num[mp_tar[Z]],
 			     port_tar);
 
 	if (port_tar == 1) {
@@ -3058,12 +1334,12 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 		return 1;
 	}
 
-	mark_node_tar = mark_switch->ext_wire[port_tar].node_tar;
+	mark_mp_tar = mark_switch->ext_wire[port_tar].mp_tar;
 	port_tar = curr_switch->ext_wire[port_tar].port_tar;
 
-	if (node_curr[X] == node_tar[X]
-	    && node_curr[Y] == node_tar[Y]
-	    && node_curr[Z] == node_tar[Z]) {
+	if (node_curr[X] == mp_tar[X]
+	    && node_curr[Y] == mp_tar[Y]
+	    && node_curr[Z] == mp_tar[Z]) {
 		/* We are going to the same node! this should never
 		   happen */
 		debug5("something bad happened!! "
@@ -3077,34 +1353,32 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	}
 
 	/* see what the next switch is going to be */
-	next_switch = &ba_system_ptr->
-		grid[node_tar[X]][node_tar[Y]][node_tar[Z]].axis_switch[dim];
+	next_switch = &ba_main_grid[mp_tar[X]][mp_tar[Y]][mp_tar[Z]].
+		axis_switch[dim];
 	if (!nodes) {
 		/* If no nodes then just get the next switch to fill
 		   in from the main system */
-		next_mark_switch = &ba_system_ptr->
-			grid[mark_node_tar[X]]
-			[mark_node_tar[Y]]
-			[mark_node_tar[Z]]
+		next_mark_switch = &ba_main_grid[mark_mp_tar[X]]
+			[mark_mp_tar[Y]]
+			[mark_mp_tar[Z]]
 			.axis_switch[dim];
 	} else {
-		ba_node_t *ba_node = NULL;
+		ba_mp_t *ba_node = NULL;
 		ListIterator itr = list_iterator_create(nodes);
 		/* see if we have already been to this node */
 		while ((ba_node = list_next(itr))) {
-			if (ba_node->coord[X] == mark_node_tar[X] &&
-			    ba_node->coord[Y] == mark_node_tar[Y] &&
-			    ba_node->coord[Z] == mark_node_tar[Z])
+			if (ba_node->coord[X] == mark_mp_tar[X] &&
+			    ba_node->coord[Y] == mark_mp_tar[Y] &&
+			    ba_node->coord[Z] == mark_mp_tar[Z])
 				break;	/* we found it */
 		}
 		list_iterator_destroy(itr);
 		if (!ba_node) {
 			/* If node grab a copy and add it to the list */
-			ba_node = ba_copy_node(&ba_system_ptr->
-					       grid[mark_node_tar[X]]
-					       [mark_node_tar[Y]]
-					       [mark_node_tar[Z]]);
-			_new_ba_node(ba_node, mark_node_tar, false);
+			ba_node = ba_copy_mp(&ba_main_grid[mark_mp_tar[X]]
+					     [mark_mp_tar[Y]]
+					     [mark_mp_tar[Z]]);
+			ba_setup_mp(ba_node, false, false);
 			list_push(nodes, ba_node);
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
 				info("haven't seen %c%c%c adding it",
@@ -3121,11 +1395,11 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 			      port_tar, dim);
 }
 
-static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
+static int _find_yz_path(ba_mp_t *ba_node, uint16_t *first,
 			 uint16_t *geometry, int conn_type)
 {
-	ba_node_t *next_node = NULL;
-	uint16_t *node_tar = NULL;
+	ba_mp_t *next_node = NULL;
+	uint16_t *mp_tar = NULL;
 	ba_switch_t *dim_curr_switch = NULL;
 	ba_switch_t *dim_next_switch = NULL;
 	int i2;
@@ -3146,10 +1420,10 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 				return 0;
 			}
 
-			node_tar = dim_curr_switch->ext_wire[2].node_tar;
+			mp_tar = dim_curr_switch->ext_wire[2].mp_tar;
 
-			next_node = &ba_system_ptr->
-				grid[node_tar[X]][node_tar[Y]][node_tar[Z]];
+			next_node =
+				&ba_main_grid[mp_tar[X]][mp_tar[Y]][mp_tar[Z]];
 			dim_next_switch = &next_node->axis_switch[i2];
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
 				info("%c%c%c port 5",
@@ -3162,7 +1436,7 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 					info("returning here 2");
 				return 0;
 			}
-			debug5("%d %d %d %d",i2, node_tar[i2],
+			debug5("%d %d %d %d",i2, mp_tar[i2],
 			       first[i2], geometry[i2]);
 
 			/* Here we need to see where we are in
@@ -3173,16 +1447,16 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 			 * we need then we go through and finish the
 			 * torus if needed
 			 */
-			if (node_tar[i2] < first[i2])
-				count = node_tar[i2]+(DIM_SIZE[i2]-first[i2]);
+			if (mp_tar[i2] < first[i2])
+				count = mp_tar[i2]+(DIM_SIZE[i2]-first[i2]);
 			else
-				count = (node_tar[i2]-first[i2]);
+				count = (mp_tar[i2]-first[i2]);
 
 			if (count == geometry[i2]) {
 				debug5("found end of me %c%c%c",
-				       alpha_num[node_tar[X]],
-				       alpha_num[node_tar[Y]],
-				       alpha_num[node_tar[Z]]);
+				       alpha_num[mp_tar[X]],
+				       alpha_num[mp_tar[Y]],
+				       alpha_num[mp_tar[Z]]);
 				if (conn_type == SELECT_TORUS) {
 					dim_curr_switch->int_wire[0].used = 1;
 					dim_curr_switch->int_wire[0].port_tar
@@ -3193,7 +1467,7 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 					dim_curr_switch = dim_next_switch;
 
 					if (deny_pass
-					    && (node_tar[i2] != first[i2])) {
+					    && (mp_tar[i2] != first[i2])) {
 						if (i2 == 1)
 							*deny_pass |=
 								PASS_FOUND_Y;
@@ -3201,13 +1475,13 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 							*deny_pass |=
 								PASS_FOUND_Z;
 					}
-					while (node_tar[i2] != first[i2]) {
+					while (mp_tar[i2] != first[i2]) {
 						if (ba_debug_flags
 						    & DEBUG_FLAG_BG_ALGO_DEEP)
 							info("on dim %d at %d "
 							     "looking for %d",
 							     i2,
-							     node_tar[i2],
+							     mp_tar[i2],
 							     first[i2]);
 
 						if (dim_curr_switch->
@@ -3232,13 +1506,12 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 							port_tar = 2;
 
 
-						node_tar = dim_curr_switch->
-							ext_wire[2].node_tar;
-						next_node = &ba_system_ptr->
-							grid
-							[node_tar[X]]
-							[node_tar[Y]]
-							[node_tar[Z]];
+						mp_tar = dim_curr_switch->
+							ext_wire[2].mp_tar;
+						next_node = &ba_main_grid
+							[mp_tar[X]]
+							[mp_tar[Y]]
+							[mp_tar[Z]];
 						dim_curr_switch =
 							&next_node->
 							axis_switch[i2];
@@ -3249,7 +1522,7 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 						info("back to first on dim %d "
 						     "at %d looking for %d",
 						     i2,
-						     node_tar[i2],
+						     mp_tar[i2],
 						     first[i2]);
 
 					dim_curr_switch->
@@ -3268,7 +1541,7 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 			} else if (count < geometry[i2]) {
 				if (conn_type == SELECT_TORUS ||
 				    (conn_type == SELECT_MESH &&
-				     (node_tar[i2] != first[i2]))) {
+				     (mp_tar[i2] != first[i2]))) {
 					dim_curr_switch->
 						int_wire[0].used = 1;
 					dim_curr_switch->
@@ -3306,14 +1579,14 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 			   find out why this was happening in the
 			   first place though.  A reproducer was to
 			   have
-			   BPs=[310x323] Type=TORUS
-			   BPs=[200x233] Type=TORUS
-			   BPs=[300x303] Type=TORUS
-			   BPs=[100x133] Type=TORUS
-			   BPs=[000x033] Type=TORUS
-			   BPs=[400x433] Type=TORUS
+			   MPs=[310x323] Type=TORUS
+			   MPs=[200x233] Type=TORUS
+			   MPs=[300x303] Type=TORUS
+			   MPs=[100x133] Type=TORUS
+			   MPs=[000x033] Type=TORUS
+			   MPs=[400x433] Type=TORUS
 			   and then add
-			   BPs=[330x333] Type=TORUS
+			   MPs=[330x333] Type=TORUS
 			*/
 
 			dim_curr_switch = &ba_node->axis_switch[i2];
@@ -3334,10 +1607,10 @@ static int _find_yz_path(ba_node_t *ba_node, uint16_t *first,
 
 #ifndef HAVE_BG_FILES
 /** */
-static int _emulate_ext_wiring(ba_node_t ***grid)
+static int _emulate_ext_wiring(ba_mp_t ***grid)
 {
 	int x;
-	ba_node_t *source = NULL, *target = NULL;
+	ba_mp_t *source = NULL, *target = NULL;
 	if (cluster_dims == 1) {
 		for(x=0;x<DIM_SIZE[X];x++) {
 			source = &grid[x][0][0];
@@ -3349,8 +1622,6 @@ static int _emulate_ext_wiring(ba_node_t ***grid)
 		}
 	} else {
 		int y,z;
-		init_wires();
-
 		for(x=0;x<DIM_SIZE[X];x++) {
 			for(y=0;y<DIM_SIZE[Y];y++) {
 				for(z=0;z<DIM_SIZE[Z];z++) {
@@ -3390,16 +1661,16 @@ static int _emulate_ext_wiring(ba_node_t ***grid)
 static int _reset_the_path(ba_switch_t *curr_switch, int source,
 			   int target, int dim)
 {
-	uint16_t *node_tar;
+	uint16_t *mp_tar;
 	uint16_t *node_curr;
 	int port_tar, port_tar1;
 	ba_switch_t *next_switch = NULL;
 
-	if (source < 0 || source > NUM_PORTS_PER_NODE) {
+	if (source < 0 || source >= NUM_PORTS_PER_NODE) {
 		fatal("source port was %d can only be 0->%d",
 		      source, NUM_PORTS_PER_NODE);
 	}
-	if (target < 0 || target > NUM_PORTS_PER_NODE) {
+	if (target < 0 || target >= NUM_PORTS_PER_NODE) {
 		fatal("target port was %d can only be 0->%d",
 		      target, NUM_PORTS_PER_NODE);
 	}
@@ -3414,21 +1685,22 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 	}
 	curr_switch->int_wire[source].used = 0;
 	port_tar = curr_switch->int_wire[source].port_tar;
-	if (port_tar < 0 || port_tar > NUM_PORTS_PER_NODE) {
+	if (port_tar < 0 || port_tar >= NUM_PORTS_PER_NODE) {
 		fatal("port_tar port was %d can only be 0->%d",
 		      source, NUM_PORTS_PER_NODE);
+		return 1;
 	}
 
 	port_tar1 = port_tar;
 	curr_switch->int_wire[source].port_tar = source;
 	curr_switch->int_wire[port_tar].used = 0;
 	curr_switch->int_wire[port_tar].port_tar = port_tar;
-	if (port_tar==target) {
+	if (port_tar == target) {
 		return 1;
 	}
 	/* follow the path */
-	node_curr = curr_switch->ext_wire[0].node_tar;
-	node_tar = curr_switch->ext_wire[port_tar].node_tar;
+	node_curr = curr_switch->ext_wire[0].mp_tar;
+	mp_tar = curr_switch->ext_wire[port_tar].mp_tar;
 	port_tar = curr_switch->ext_wire[port_tar].port_tar;
 	if (source == port_tar1) {
 		debug("got this bad one %c%c%c %d %d -> %c%c%c %d",
@@ -3437,9 +1709,9 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 		      alpha_num[node_curr[Z]],
 		      source,
 		      port_tar1,
-		      alpha_num[node_tar[X]],
-		      alpha_num[node_tar[Y]],
-		      alpha_num[node_tar[Z]],
+		      alpha_num[mp_tar[X]],
+		      alpha_num[mp_tar[Y]],
+		      alpha_num[mp_tar[Z]],
 		      port_tar);
 		return 0;
 	}
@@ -3449,92 +1721,135 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 	       alpha_num[node_curr[Z]],
 	       source,
 	       port_tar1,
-	       alpha_num[node_tar[X]],
-	       alpha_num[node_tar[Y]],
-	       alpha_num[node_tar[Z]],
+	       alpha_num[mp_tar[X]],
+	       alpha_num[mp_tar[Y]],
+	       alpha_num[mp_tar[Z]],
 	       port_tar);
-	if (node_curr[X] == node_tar[X]
-	    && node_curr[Y] == node_tar[Y]
-	    && node_curr[Z] == node_tar[Z]) {
+	if (node_curr[X] == mp_tar[X]
+	    && node_curr[Y] == mp_tar[Y]
+	    && node_curr[Z] == mp_tar[Z]) {
 		debug5("%d something bad happened!!", dim);
 		return 0;
 	}
-	next_switch = &ba_system_ptr->
-		grid[node_tar[X]][node_tar[Y]][node_tar[Z]].axis_switch[dim];
+	next_switch =
+		&ba_main_grid[mp_tar[X]][mp_tar[Y]][mp_tar[Z]].axis_switch[dim];
 
 	return _reset_the_path(next_switch, port_tar, target, dim);
 //	return 1;
 }
 
-static void _new_ba_node(ba_node_t *ba_node, uint16_t *coord,
-			 bool track_down_nodes)
+extern void ba_create_system()
 {
-	int i,j;
-	uint16_t node_base_state = ba_node->state & NODE_STATE_BASE;
-
-	if (((node_base_state != NODE_STATE_DOWN)
-	     && !(ba_node->state & NODE_STATE_DRAIN)) || !track_down_nodes)
-		ba_node->used = false;
-
-	for (i=0; i<cluster_dims; i++){
-		ba_node->coord[i] = coord[i];
+	int x,y,z, i = 0;
 
-		for(j=0;j<NUM_PORTS_PER_NODE;j++) {
-			ba_node->axis_switch[i].int_wire[j].used = 0;
-			if (i!=X) {
-				if (j==3 || j==4)
-					ba_node->axis_switch[i].int_wire[j].
-						used = 1;
-			}
-			ba_node->axis_switch[i].int_wire[j].port_tar = j;
-		}
-	}
-}
+	if (ba_main_grid)
+		ba_destroy_system();
 
-static void _create_ba_system(void)
-{
-	int x,y,z;
-	uint16_t coord[cluster_dims];
+	best_count=BEST_COUNT_INIT;
 
-	ba_system_ptr->grid = (ba_node_t***)
-		xmalloc(sizeof(ba_node_t**) * DIM_SIZE[X]);
+	ba_main_grid = (ba_mp_t***)
+		xmalloc(sizeof(ba_mp_t**) * DIM_SIZE[X]);
 	for (x=0; x<DIM_SIZE[X]; x++) {
-		ba_system_ptr->grid[x] = (ba_node_t**)
-			xmalloc(sizeof(ba_node_t*) * DIM_SIZE[Y]);
+		ba_main_grid[x] = (ba_mp_t**)
+			xmalloc(sizeof(ba_mp_t*) * DIM_SIZE[Y]);
 		for (y=0; y<DIM_SIZE[Y]; y++) {
-			ba_system_ptr->grid[x][y] = (ba_node_t*)
-				xmalloc(sizeof(ba_node_t)
+			ba_main_grid[x][y] = (ba_mp_t*)
+				xmalloc(sizeof(ba_mp_t)
 					* DIM_SIZE[Z]);
 			for (z=0; z<DIM_SIZE[Z]; z++){
-				coord[X] = x;
-				coord[Y] = y;
-				coord[Z] = z;
-				_new_ba_node(&ba_system_ptr->grid[x][y][z],
-					     coord, true);
+				ba_mp_t *ba_mp = &ba_main_grid[x][y][z];
+				ba_mp->coord[X] = x;
+				ba_mp->coord[Y] = y;
+				ba_mp->coord[Z] = z;
+				snprintf(ba_mp->coord_str,
+					 sizeof(ba_mp->coord_str),
+					 "%c%c%c",
+					 alpha_num[ba_mp->coord[X]],
+					 alpha_num[ba_mp->coord[Y]],
+					 alpha_num[ba_mp->coord[Z]]);
+				ba_setup_mp(ba_mp, true, false);
+				ba_mp->state = NODE_STATE_IDLE;
+				/* This might get changed
+				   later, but just incase set
+				   it up here.
+				*/
+				ba_mp->index = i++;
 			}
 		}
 	}
+	if ((cluster_flags & CLUSTER_FLAG_BGL) ||
+	    (cluster_flags & CLUSTER_FLAG_BGP)) {
+		init_wires();
+#ifndef HAVE_BG_FILES
+		_emulate_ext_wiring(ba_main_grid);
+#endif
+	}
+
+	path = list_create(_delete_path_list);
+	best_path = list_create(_delete_path_list);
 }
 
 /** */
-static void _delete_ba_system(void)
+extern void ba_destroy_system(void)
 {
 	int x, y;
 
-	if (!ba_system_ptr){
-		return;
+	if (path) {
+		list_destroy(path);
+		path = NULL;
 	}
+	if (best_path) {
+		list_destroy(best_path);
+		best_path = NULL;
+	}
+
+#ifdef HAVE_BG_FILES
+	if (bg)
+		bridge_free_bg(bg);
+#endif
+	_mp_map_initialized = false;
+	_wires_initialized = true;
 
-	if (ba_system_ptr->grid) {
+	if (ba_main_grid) {
 		for (x=0; x<DIM_SIZE[X]; x++) {
 			for (y=0; y<DIM_SIZE[Y]; y++)
-				xfree(ba_system_ptr->grid[x][y]);
+				xfree(ba_main_grid[x][y]);
 
-			xfree(ba_system_ptr->grid[x]);
+			xfree(ba_main_grid[x]);
 		}
-		xfree(ba_system_ptr->grid);
+		xfree(ba_main_grid);
+		ba_main_grid = NULL;
+	}
+}
+
+extern ba_mp_t *ba_pick_sub_block_cnodes(
+	bg_record_t *bg_record, uint32_t *node_count, select_jobinfo_t *jobinfo)
+{
+	/* This shouldn't be called. */
+	xassert(0);
+	return NULL;
+}
+
+extern int ba_clear_sub_block_cnodes(
+	bg_record_t *bg_record, struct step_record *step_ptr)
+{
+	/* this doesn't do anything since above doesn't. */
+	return SLURM_SUCCESS;
+}
+
+extern bitstr_t *ba_create_ba_mp_cnode_bitmap(bg_record_t *bg_record)
+{
+	return NULL;
+}
+
+extern char *ba_set_ionode_str(bitstr_t *bitmap)
+{
+	char bitstring[BITSIZE];
+        if (bitmap) {
+		bit_fmt(bitstring, BITSIZE, bitmap);
+		return xstrdup(bitstring);
 	}
-	xfree(ba_system_ptr);
+	return NULL;
 }
 
 static void _delete_path_list(void *object)
@@ -3550,11 +1865,11 @@ static void _delete_path_list(void *object)
 /**
  * algorithm for finding match
  */
-static int _find_match(ba_request_t *ba_request, List results)
+static int _find_match(select_ba_request_t *ba_request, List results)
 {
 	int x=0;
 	uint16_t start[cluster_dims];
-	ba_node_t *ba_node = NULL;
+	ba_mp_t *ba_node = NULL;
 	char *name=NULL;
 	int startx;
 	uint16_t *geo_ptr;
@@ -3610,7 +1925,7 @@ start_again:
 			     alpha_num[start[Y]],
 			     alpha_num[start[Z]]);
 
-		ba_node = &ba_system_ptr->grid[start[X]][start[Y]][start[Z]];
+		ba_node = &ba_main_grid[start[X]][start[Y]][start[Z]];
 
 		if (!_node_used(ba_node, ba_request->geometry[X])) {
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
@@ -3621,7 +1936,7 @@ start_again:
 				     alpha_num[ba_request->geometry[X]],
 				     alpha_num[ba_request->geometry[Y]],
 				     alpha_num[ba_request->geometry[Z]],
-				     ba_request->conn_type);
+				     ba_request->conn_type[X]);
 			name = set_bg_block(results,
 					    start,
 					    ba_request->geometry,
@@ -3633,10 +1948,11 @@ start_again:
 			}
 
 			if (results) {
-				remove_block(results, color_count,
-					     ba_request->conn_type);
-				list_delete_all(results,
-						&empty_null_destroy_list, "");
+				bool is_small = 0;
+				if (ba_request->conn_type[0] == SELECT_SMALL)
+					is_small = 1;
+				remove_block(results, is_small);
+				list_flush(results);
 			}
 			if (ba_request->start_req)
 				goto requested_end;
@@ -3687,16 +2003,13 @@ requested_end:
  * IN: x_size - How big is the block in the X dim used to see if the
  *     wires are full hence making this midplane unusable.
  */
-static bool _node_used(ba_node_t* ba_node, int x_size)
+static bool _node_used(ba_mp_t* ba_node, int x_size)
 {
 	ba_switch_t* ba_switch = NULL;
 	/* if we've used this node in another block already */
 	if (!ba_node || ba_node->used) {
 		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-			info("node %c%c%c used",
-			     alpha_num[ba_node->coord[X]],
-			     alpha_num[ba_node->coord[Y]],
-			     alpha_num[ba_node->coord[Z]]);
+			info("node %s used", ba_node->coord_str);
 		return true;
 	}
 	/* Check If we've used this node's switches completely in another
@@ -3717,10 +2030,8 @@ static bool _node_used(ba_node_t* ba_node, int x_size)
 		if (ba_switch->int_wire[3].used
 		    && ba_switch->int_wire[5].used) {
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-				info("switch full in the X dim on node %c%c%c!",
-				     alpha_num[ba_node->coord[X]],
-				     alpha_num[ba_node->coord[Y]],
-				     alpha_num[ba_node->coord[Z]]);
+				info("switch full in the X dim on node %s!",
+				     ba_node->coord_str);
 			return true;
 		}
 	}
@@ -3730,7 +2041,7 @@ static bool _node_used(ba_node_t* ba_node, int x_size)
 }
 
 
-static void _switch_config(ba_node_t* source, ba_node_t* target, int dim,
+static void _switch_config(ba_mp_t* source, ba_mp_t* target, int dim,
 			   int port_src, int port_tar)
 {
 	ba_switch_t* config = NULL, *config_tar = NULL;
@@ -3743,10 +2054,10 @@ static void _switch_config(ba_node_t* source, ba_node_t* target, int dim,
 	config_tar = &target->axis_switch[dim];
 	for(i=0;i<cluster_dims;i++) {
 		/* Set the coord of the source target node to the target */
-		config->ext_wire[port_src].node_tar[i] = target->coord[i];
+		config->ext_wire[port_src].mp_tar[i] = target->coord[i];
 
 		/* Set the coord of the target back to the source */
-		config_tar->ext_wire[port_tar].node_tar[i] = source->coord[i];
+		config_tar->ext_wire[port_tar].mp_tar[i] = source->coord[i];
 	}
 
 	/* Set the port of the source target node to the target */
@@ -3756,8 +2067,8 @@ static void _switch_config(ba_node_t* source, ba_node_t* target, int dim,
 	config_tar->ext_wire[port_tar].port_tar = port_src;
 }
 
-static int _set_external_wires(int dim, int count, ba_node_t* source,
-			       ba_node_t* target)
+static int _set_external_wires(int dim, int count, ba_mp_t* source,
+			       ba_mp_t* target)
 {
 
 #ifdef HAVE_BG_FILES
@@ -3781,7 +2092,6 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 	char *wire_id = NULL;
 	int from_port, to_port;
 	int wire_num;
-	uint16_t *coord;
 	char from_node[NODE_LEN];
 	char to_node[NODE_LEN];
 
@@ -3795,7 +2105,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 	}
 
 	if (!bg) {
-		if ((rc = bridge_get_bg(&bg)) != STATUS_OK) {
+		if ((rc = bridge_get_bg(&bg)) != SLURM_SUCCESS) {
 			error("bridge_get_BG(): %d", rc);
 			return -1;
 		}
@@ -3804,29 +2114,30 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 	if (bg == NULL)
 		return -1;
 
-	if ((rc = bridge_get_data(bg, RM_WireNum, &wire_num)) != STATUS_OK) {
+	if ((rc = bridge_get_data(bg, RM_WireNum, &wire_num))
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPNum): %d", rc);
 		wire_num = 0;
 	}
-	/* find out system wires on each bp */
+	/* find out system wires on each mp */
 
 	for (i=0; i<wire_num; i++) {
 
 		if (i) {
 			if ((rc = bridge_get_data(bg, RM_NextWire, &my_wire))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_NextWire): %d", rc);
 				break;
 			}
 		} else {
 			if ((rc = bridge_get_data(bg, RM_FirstWire, &my_wire))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_FirstWire): %d", rc);
 				break;
 			}
 		}
 		if ((rc = bridge_get_data(my_wire, RM_WireID, &wire_id))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_FirstWire): %d", rc);
 			break;
 		}
@@ -3861,63 +2172,51 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		free(wire_id);
 
 		if ((rc = bridge_get_data(my_wire, RM_WireFromPort, &my_port))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_FirstWire): %d", rc);
 			break;
 		}
 		if ((rc = bridge_get_data(my_port, RM_PortID, &from_port))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_PortID): %d", rc);
 			break;
 		}
 		if ((rc = bridge_get_data(my_wire, RM_WireToPort, &my_port))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_WireToPort): %d", rc);
 			break;
 		}
 		if ((rc = bridge_get_data(my_port, RM_PortID, &to_port))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_PortID): %d", rc);
 			break;
 		}
 
-		coord = find_bp_loc(from_node);
-		if (!coord) {
-			error("1 find_bp_loc: bpid %s not known", from_node);
+		source = loc2ba_mp(from_node);
+		if (!source) {
+			error("1 loc2ba_mp: mpid %s not known", from_node);
 			continue;
 		}
-		if (!validate_coord(coord))
+		if (!validate_coord(source->coord))
 			continue;
 
-		source = &ba_system_ptr->
-			grid[coord[X]][coord[Y]][coord[Z]];
-		coord = find_bp_loc(to_node);
-		if (!coord) {
-			error("2 find_bp_loc: bpid %s not known", to_node);
+		target = loc2ba_mp(to_node);
+		if (!target) {
+			error("2 loc2ba_mp: mpid %s not known", to_node);
 			continue;
 		}
-		if (!validate_coord(coord))
+		if (!validate_coord(target->coord))
 			continue;
 
-		target = &ba_system_ptr->
-			grid[coord[X]][coord[Y]][coord[Z]];
-		_switch_config(source,
-			       target,
-			       dim,
-			       _port_enum(from_port),
-			       _port_enum(to_port));
+		_switch_config(source, target, dim, from_port, to_port);
 
 		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
-			info("dim %d from %c%c%c %d -> %c%c%c %d",
+			info("dim %d from %s %d -> %s %d",
 			     dim,
-			     alpha_num[source->coord[X]],
-			     alpha_num[source->coord[Y]],
-			     alpha_num[source->coord[Z]],
-			     _port_enum(from_port),
-			     alpha_num[target->coord[X]],
-			     alpha_num[target->coord[Y]],
-			     alpha_num[target->coord[Z]],
-			     _port_enum(to_port));
+			     source->coord_str,
+			     from_port,
+			     target->coord_str,
+			     to_port);
 	}
 #else
 	_switch_config(source, source, dim, 0, 0);
@@ -3956,7 +2255,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 1:
 			/* 1st Node */
-			target = &ba_system_ptr->grid[0]
+			target = &ba_main_grid[0]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 0th */
@@ -3964,7 +2263,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 2:
 			/* 2nd Node */
-			target = &ba_system_ptr->grid[3]
+			target = &ba_main_grid[3]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 3rd and back */
@@ -3988,7 +2287,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		case 1:
 			/* 1st node */
 			/* change target to 4th node */
-			target = &ba_system_ptr->grid[4]
+			target = &ba_main_grid[4]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 4th */
@@ -3997,7 +2296,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		case 3:
 			/* 3rd node */
 			/* change target to 2th node */
-			target = &ba_system_ptr->grid[2]
+			target = &ba_main_grid[2]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 2nd */
@@ -4006,7 +2305,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		case 4:
 			/* 4th node */
 			/* change target to 1st node */
-			target = &ba_system_ptr->grid[1]
+			target = &ba_main_grid[1]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 1st */
@@ -4028,7 +2327,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		case 1:
 		case 5:
 			/* 1st Node */
-			target = &ba_system_ptr->grid[count-1]
+			target = &ba_main_grid[count-1]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of previous */
@@ -4036,7 +2335,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 2:
 			/* 2nd Node */
-			target = &ba_system_ptr->grid[7]
+			target = &ba_main_grid[7]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of last */
@@ -4044,7 +2343,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 3:
 			/* 3rd Node */
-			target = &ba_system_ptr->grid[6]
+			target = &ba_main_grid[6]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 6th */
@@ -4052,7 +2351,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 6:
 			/* 6th Node */
-			target = &ba_system_ptr->grid[3]
+			target = &ba_main_grid[3]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 3rd */
@@ -4060,7 +2359,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 7:
 			/* 7th Node */
-			target = &ba_system_ptr->grid[2]
+			target = &ba_main_grid[2]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 2nd */
@@ -4084,7 +2383,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 1:
 			/* 1st Node */
-			target = &ba_system_ptr->grid[7]
+			target = &ba_main_grid[7]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 7th and back */
@@ -4093,7 +2392,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 2:
 			/* 2nd Node */
-			target = &ba_system_ptr->grid[6]
+			target = &ba_main_grid[6]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 6th and back */
@@ -4102,7 +2401,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 3:
 			/* 3rd Node */
-			target = &ba_system_ptr->grid[5]
+			target = &ba_main_grid[5]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 5th and back */
@@ -4111,7 +2410,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 8:
 			/* 8th Node */
-			target = &ba_system_ptr->grid[0]
+			target = &ba_main_grid[0]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of 0th */
@@ -4152,7 +2451,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 				fatal("node %d shouldn't go to %d",
 				      count, temp_num);
 
-			target = &ba_system_ptr->grid[temp_num]
+			target = &ba_main_grid[temp_num]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 */
@@ -4162,7 +2461,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			break;
 		case 7:
 			/* 7th Node */
-			target = &ba_system_ptr->grid[count-1]
+			target = &ba_main_grid[count-1]
 				[source->coord[Y]]
 				[source->coord[Z]];
 			/* 4->3 of previous */
@@ -4183,14 +2482,13 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 
 static char *_set_internal_wires(List nodes, int size, int conn_type)
 {
-	ba_node_t* ba_node[size+1];
-	int count=0, i, set=0;
+	ba_mp_t* ba_node[size+1];
+	int count=0, i;
 	uint16_t *start = NULL;
 	uint16_t *end = NULL;
 	char *name = NULL;
 	ListIterator itr;
 	hostlist_t hostlist;
-	char temp_name[4];
 
 	if (!nodes)
 		return NULL;
@@ -4198,14 +2496,10 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 	hostlist = hostlist_create(NULL);
 	itr = list_iterator_create(nodes);
 	while ((ba_node[count] = list_next(itr))) {
-		snprintf(temp_name, sizeof(temp_name), "%c%c%c",
-			 alpha_num[ba_node[count]->coord[X]],
-			 alpha_num[ba_node[count]->coord[Y]],
-			 alpha_num[ba_node[count]->coord[Z]]);
 		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-			info("name = %s", temp_name);
+			info("name = %s", ba_node[count]->coord_str);
+		hostlist_push(hostlist, ba_node[count]->coord_str);
 		count++;
-		hostlist_push(hostlist, temp_name);
 	}
 	list_iterator_destroy(itr);
 
@@ -4216,18 +2510,7 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 
 	for (i=0;i<count;i++) {
 		if (!ba_node[i]->used) {
-			ba_node[i]->used=1;
-			if (ba_node[i]->letter == '.') {
-				ba_node[i]->letter = letters[color_count%62];
-				ba_node[i]->color = colors[color_count%6];
-				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
-					info("count %d setting letter = %c "
-					     "color = %d",
-					     color_count,
-					     ba_node[i]->letter,
-					     ba_node[i]->color);
-				set=1;
-			}
+			ba_node[i]->used |= BA_MP_USED_TRUE;
 		} else {
 			debug("No network connection to create "
 			      "bgblock containing %s", name);
@@ -4243,9 +2526,6 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 			_set_one_dim(start, end, ba_node[i]->coord);
 		}
 
-	if (set)
-		color_count++;
-
 	return name;
 }
 
@@ -4266,7 +2546,7 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
  *
  * RET: 0 on failure, 1 on success
  */
-static int _find_x_path(List results, ba_node_t *ba_node,
+static int _find_x_path(List results, ba_mp_t *ba_node,
 			uint16_t *start, int x_size,
 			int found, int conn_type, block_algo_t algo)
 {
@@ -4278,10 +2558,10 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 	int target_port=1;
 	int broke = 0, not_first = 0;
 	int ports_to_try[2] = {4, 2};
-	uint16_t *node_tar = NULL;
+	uint16_t *mp_tar = NULL;
 	int i = 0;
-	ba_node_t *next_node = NULL;
-	ba_node_t *check_node = NULL;
+	ba_mp_t *next_node = NULL;
+	ba_mp_t *check_node = NULL;
 /* 	int highest_phys_x = x_size - start[X]; */
 /* 	info("highest_phys_x is %d", highest_phys_x); */
 
@@ -4328,8 +2608,8 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 		if (!curr_switch->int_wire[ports_to_try[i]].used) {
 			/* looking at the next node on the switch
 			   and it's port we are going to */
-			node_tar = curr_switch->
-				ext_wire[ports_to_try[i]].node_tar;
+			mp_tar = curr_switch->
+				ext_wire[ports_to_try[i]].mp_tar;
 			port_tar = curr_switch->
 				ext_wire[ports_to_try[i]].port_tar;
 /* 			info("%c%c%c port %d goes to %c%c%c port %d", */
@@ -4337,22 +2617,22 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 /* 			     alpha_num[ba_node->coord[Y]], */
 /* 			     alpha_num[ba_node->coord[Z]], */
 /* 			     ports_to_try[i], */
-/* 			     alpha_num[node_tar[X]], */
-/* 			     alpha_num[node_tar[Y]], */
-/* 			     alpha_num[node_tar[Z]], */
+/* 			     alpha_num[mp_tar[X]], */
+/* 			     alpha_num[mp_tar[Y]], */
+/* 			     alpha_num[mp_tar[Z]], */
 /* 			     port_tar); */
 			/* check to see if we are back at the start of the
 			   block */
-			if ((node_tar[X] == start[X]
-			     && node_tar[Y] == start[Y]
-			     && node_tar[Z] == start[Z])) {
+			if ((mp_tar[X] == start[X]
+			     && mp_tar[Y] == start[Y]
+			     && mp_tar[Z] == start[Z])) {
 				broke = 1;
 				goto broke_it;
 			}
 			/* check to see if the port points to itself */
-			if ((node_tar[X] == ba_node->coord[X]
-			     && node_tar[Y] == ba_node->coord[Y]
-			     && node_tar[Z] == ba_node->coord[Z])) {
+			if ((mp_tar[X] == ba_node->coord[X]
+			     && mp_tar[Y] == ba_node->coord[Y]
+			     && mp_tar[Z] == ba_node->coord[Z])) {
 				continue;
 			}
 			/* check to see if I am going to a place I have
@@ -4366,12 +2646,12 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					     alpha_num[next_node->coord[X]],
 					     alpha_num[next_node->coord[Y]],
 					     alpha_num[next_node->coord[Z]],
-					     alpha_num[node_tar[X]],
-					     alpha_num[node_tar[Y]],
-					     alpha_num[node_tar[Z]]);
-				if ((node_tar[X] == next_node->coord[X]
-				     && node_tar[Y] == next_node->coord[Y]
-				     && node_tar[Z] == next_node->coord[Z])) {
+					     alpha_num[mp_tar[X]],
+					     alpha_num[mp_tar[Y]],
+					     alpha_num[mp_tar[Z]]);
+				if ((mp_tar[X] == next_node->coord[X]
+				     && mp_tar[Y] == next_node->coord[Y]
+				     && mp_tar[Z] == next_node->coord[Z])) {
 					not_first = 1;
 					break;
 				}
@@ -4387,9 +2667,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			not_first = 0;
 
 		broke_it:
-			next_node = &ba_system_ptr->grid[node_tar[X]]
-				[node_tar[Y]]
-				[node_tar[Z]];
+			next_node = &ba_main_grid[mp_tar[X]]
+				[mp_tar[Y]]
+				[mp_tar[Z]];
 			next_switch = &next_node->axis_switch[X];
 
  			if ((conn_type == SELECT_MESH) && (found == (x_size))) {
@@ -4454,16 +2734,16 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					     alpha_num[ba_node->coord[Y]],
 					     alpha_num[ba_node->coord[Z]],
 					     ports_to_try[i],
-					     alpha_num[node_tar[X]],
-					     alpha_num[node_tar[Y]],
-					     alpha_num[node_tar[Z]],
+					     alpha_num[mp_tar[X]],
+					     alpha_num[mp_tar[Y]],
+					     alpha_num[mp_tar[Z]],
 					     port_tar);
 				itr = list_iterator_create(results);
 				while ((check_node = list_next(itr))) {
-					if ((node_tar[X] == check_node->coord[X]
-					     && node_tar[Y] ==
+					if ((mp_tar[X] == check_node->coord[X]
+					     && mp_tar[Y] ==
 					     check_node->coord[Y]
-					     && node_tar[Z] ==
+					     && mp_tar[Z] ==
 					     check_node->coord[Z])) {
 						break;
 					}
@@ -4485,9 +2765,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 						info("Algo(%d) Hey this is "
 						     "already added %c%c%c",
 						     algo,
-						     alpha_num[node_tar[X]],
-						     alpha_num[node_tar[Y]],
-						     alpha_num[node_tar[Z]]);
+						     alpha_num[mp_tar[X]],
+						     alpha_num[mp_tar[Y]],
+						     alpha_num[mp_tar[Z]]);
 					continue;
 				}
 				found++;
@@ -4514,9 +2794,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 							       coord[Z]],
 						     source_port,
 						     ports_to_try[i],
-						     alpha_num[node_tar[X]],
-						     alpha_num[node_tar[Y]],
-						     alpha_num[node_tar[Z]],
+						     alpha_num[mp_tar[X]],
+						     alpha_num[mp_tar[Y]],
+						     alpha_num[mp_tar[Z]],
 						     port_tar,
 						     target_port);
 					curr_switch->int_wire[source_port].used
@@ -4574,17 +2854,20 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
 				info("Algo(%d) yes found next free %d", algo,
 				     best_count);
-			node_tar = _set_best_path();
+			mp_tar = _set_best_path();
 
 			if (deny_pass && (*deny_pass & PASS_DENY_X)
 			    && (*deny_pass & PASS_FOUND_X)) {
 				debug("We don't allow X passthoughs.");
 				return 0;
 			}
-
-			next_node = &ba_system_ptr->grid[node_tar[X]]
-				[node_tar[Y]]
-				[node_tar[Z]];
+			/* info("got here with %c%c%c", */
+			/*      alpha_num[mp_tar[X]], */
+			/*      alpha_num[mp_tar[Y]], */
+			/*      alpha_num[mp_tar[Z]]); */
+			next_node = &ba_main_grid[mp_tar[X]]
+				[mp_tar[Y]]
+				[mp_tar[Z]];
 
 			next_switch = &next_node->axis_switch[X];
 
@@ -4595,9 +2878,9 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				     alpha_num[ba_node->coord[X]],
 				     alpha_num[ba_node->coord[Y]],
 				     alpha_num[ba_node->coord[Z]],
-				     alpha_num[node_tar[X]],
-				     alpha_num[node_tar[Y]],
-				     alpha_num[node_tar[Z]],
+				     alpha_num[mp_tar[X]],
+				     alpha_num[mp_tar[Y]],
+				     alpha_num[mp_tar[Z]],
 				     port_tar);
 
 			list_append(results, next_node);
@@ -4626,31 +2909,31 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 	return 0;
 }
 
-static int _remove_node(List results, uint16_t *node_tar)
+static int _remove_node(List results, uint16_t *mp_tar)
 {
 	ListIterator itr;
-	ba_node_t *ba_node = NULL;
+	ba_mp_t *ba_node = NULL;
 
 	itr = list_iterator_create(results);
-	while ((ba_node = (ba_node_t*) list_next(itr))) {
+	while ((ba_node = (ba_mp_t*) list_next(itr))) {
 
 #ifdef HAVE_BG_L_P
-		if (node_tar[X] == ba_node->coord[X]
-		    && node_tar[Y] == ba_node->coord[Y]
-		    && node_tar[Z] == ba_node->coord[Z]) {
+		if (mp_tar[X] == ba_node->coord[X]
+		    && mp_tar[Y] == ba_node->coord[Y]
+		    && mp_tar[Z] == ba_node->coord[Z]) {
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
 				info("removing %c%c%c from list",
-				     alpha_num[node_tar[X]],
-				     alpha_num[node_tar[Y]],
-				     alpha_num[node_tar[Z]]);
+				     alpha_num[mp_tar[X]],
+				     alpha_num[mp_tar[Y]],
+				     alpha_num[mp_tar[Z]]);
 			list_remove (itr);
 			break;
 		}
 #else
-		if (node_tar[X] == ba_node->coord[X]) {
+		if (mp_tar[X] == ba_node->coord[X]) {
 			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
 				info("removing %d from list",
-				     node_tar[X]);
+				     mp_tar[X]);
 			list_remove (itr);
 			break;
 		}
@@ -4671,17 +2954,16 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 		(ba_path_switch_t *) xmalloc(sizeof(ba_path_switch_t));
 	ba_path_switch_t *path_switch = NULL;
 	ba_path_switch_t *temp_switch = NULL;
-	int port_tar;
+	uint16_t port_tar;
 	int target_port = 0;
 	int port_to_try = 2;
-	uint16_t *node_tar= curr_switch->ext_wire[0].node_tar;
-	uint16_t *node_src = curr_switch->ext_wire[0].node_tar;
+	uint16_t *mp_tar= curr_switch->ext_wire[0].mp_tar;
+	uint16_t *node_src = curr_switch->ext_wire[0].mp_tar;
 	int used = 0;
 	int broke = 0;
-	ba_node_t *ba_node = NULL;
+	ba_mp_t *ba_node = NULL;
 
 	ListIterator itr;
-	static bool found = false;
 
 	path_add->geometry[X] = node_src[X];
 #ifdef HAVE_3D
@@ -4695,10 +2977,10 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 		goto return_0;
 
 	itr = list_iterator_create(nodes);
-	while ((ba_node = (ba_node_t*) list_next(itr))) {
-		if (node_tar[X] == ba_node->coord[X]
-		    && node_tar[Y] == ba_node->coord[Y]
-		    && node_tar[Z] == ba_node->coord[Z]) {
+	while ((ba_node = (ba_mp_t*) list_next(itr))) {
+		if (mp_tar[X] == ba_node->coord[X]
+		    && mp_tar[Y] == ba_node->coord[Y]
+		    && mp_tar[Z] == ba_node->coord[Z]) {
 			broke = 1;
 			break;
 		}
@@ -4706,12 +2988,12 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 	list_iterator_destroy(itr);
 
 	if (!broke && count>0 &&
-	    !ba_system_ptr->grid[node_tar[X]][node_tar[Y]][node_tar[Z]].used) {
+	    !ba_main_grid[mp_tar[X]][mp_tar[Y]][mp_tar[Z]].used) {
 		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
 			info("this one not found %c%c%c",
-			     alpha_num[node_tar[X]],
-			     alpha_num[node_tar[Y]],
-			     alpha_num[node_tar[Z]]);
+			     alpha_num[mp_tar[X]],
+			     alpha_num[mp_tar[Y]],
+			     alpha_num[mp_tar[Z]]);
 		broke = 0;
 
 		if ((source_port%2))
@@ -4719,7 +3001,6 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 
 		list_flush(best_path);
 
-		found = true;
 		path_add->out = target_port;
 		list_push(path, path_add);
 
@@ -4750,7 +3031,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 
 			if (((path_switch->geometry[X] == node_src[X])
 			     && (path_switch->geometry[Y] == node_src[Y])
-			     && (path_switch->geometry[Z] == node_tar[Z]))) {
+			     && (path_switch->geometry[Z] == mp_tar[Z]))) {
 				if ( path_switch->out
 				     == port_to_try) {
 					used = 1;
@@ -4762,23 +3043,23 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 
 		/* check to see if wire 0 is used with this port */
 		if (curr_switch->
-		    ext_wire[port_to_try].node_tar[X]
-		    == curr_switch->ext_wire[0].node_tar[X]
-		    && curr_switch->ext_wire[port_to_try].node_tar[Y]
-		    == curr_switch->ext_wire[0].node_tar[Y]
-		    && curr_switch->ext_wire[port_to_try].node_tar[Z]
-		    == curr_switch->ext_wire[0].node_tar[Z]) {
+		    ext_wire[port_to_try].mp_tar[X]
+		    == curr_switch->ext_wire[0].mp_tar[X]
+		    && curr_switch->ext_wire[port_to_try].mp_tar[Y]
+		    == curr_switch->ext_wire[0].mp_tar[Y]
+		    && curr_switch->ext_wire[port_to_try].mp_tar[Z]
+		    == curr_switch->ext_wire[0].mp_tar[Z]) {
 			used = 1;
 		}
 
 		if (!used) {
 			port_tar = curr_switch->
 				ext_wire[port_to_try].port_tar;
-			node_tar = curr_switch->
-				ext_wire[port_to_try].node_tar;
+			mp_tar = curr_switch->
+				ext_wire[port_to_try].mp_tar;
 
-			next_switch = &ba_system_ptr->
-				grid[node_tar[X]][node_tar[Y]][node_tar[Z]]
+			next_switch = &ba_main_grid
+				[mp_tar[X]][mp_tar[Y]][mp_tar[Z]]
 				.axis_switch[X];
 
 			count++;
@@ -4813,7 +3094,7 @@ return_0:
  *
  * Sets up global variable best_path, and best_count.  On success
  * best_count will be >= BEST_COUNT_INIT you can call _set_best_path
- * to apply this path to the main system (ba_system_ptr)
+ * to apply this path to the main system (ba_main_grid)
  */
 
 static int _finish_torus(List results,
@@ -4824,15 +3105,14 @@ static int _finish_torus(List results,
 	ba_path_switch_t *path_add = xmalloc(sizeof(ba_path_switch_t));
 	ba_path_switch_t *path_switch = NULL;
 	ba_path_switch_t *temp_switch = NULL;
-	int port_tar;
+	uint16_t port_tar;
 	int target_port=0;
 	int ports_to_try[2] = {3,5};
-	uint16_t *node_tar= curr_switch->ext_wire[0].node_tar;
-	uint16_t *node_src = curr_switch->ext_wire[0].node_tar;
+	uint16_t *mp_tar= curr_switch->ext_wire[0].mp_tar;
+	uint16_t *node_src = curr_switch->ext_wire[0].mp_tar;
 	int i;
 	int used=0;
 	ListIterator itr;
-	static bool found = false;
 
 	path_add->geometry[X] = node_src[X];
 	path_add->geometry[Y] = node_src[Y];
@@ -4845,9 +3125,9 @@ static int _finish_torus(List results,
 		xfree(path_add);
 		return 0;
 	}
-	if (node_tar[X] == start[X]
-	    && node_tar[Y] == start[Y]
-	    && node_tar[Z] == start[Z]) {
+	if (mp_tar[X] == start[X]
+	    && mp_tar[Y] == start[Y]
+	    && mp_tar[Z] == start[Z]) {
 
 		if ((source_port%2))
 			target_port=1;
@@ -4855,7 +3135,6 @@ static int _finish_torus(List results,
 
 			list_flush(best_path);
 
-			found = true;
 			path_add->out = target_port;
 			list_push(path, path_add);
 
@@ -4896,7 +3175,7 @@ static int _finish_torus(List results,
 				     && (path_switch->geometry[Y]
 					 == node_src[Y])
 				     && (path_switch->geometry[Z]
-					 == node_tar[Z]))) {
+					 == mp_tar[Z]))) {
 					if ( path_switch->out
 					     == ports_to_try[i]) {
 						used = 1;
@@ -4908,24 +3187,24 @@ static int _finish_torus(List results,
 
 			/* check to see if wire 0 is used with this port */
 			if ((curr_switch->
-			     ext_wire[ports_to_try[i]].node_tar[X] ==
-			     curr_switch->ext_wire[0].node_tar[X] &&
+			     ext_wire[ports_to_try[i]].mp_tar[X] ==
+			     curr_switch->ext_wire[0].mp_tar[X] &&
 			     curr_switch->
-			     ext_wire[ports_to_try[i]].node_tar[Y] ==
-			     curr_switch->ext_wire[0].node_tar[Y] &&
+			     ext_wire[ports_to_try[i]].mp_tar[Y] ==
+			     curr_switch->ext_wire[0].mp_tar[Y] &&
 			     curr_switch->
-			     ext_wire[ports_to_try[i]].node_tar[Z] ==
-			     curr_switch->ext_wire[0].node_tar[Z])) {
+			     ext_wire[ports_to_try[i]].mp_tar[Z] ==
+			     curr_switch->ext_wire[0].mp_tar[Z])) {
 				continue;
 			}
 
 
 			if (!used) {
-				ba_node_t *next_node = NULL;
+				ba_mp_t *next_node = NULL;
 				port_tar = curr_switch->
 					ext_wire[ports_to_try[i]].port_tar;
-				node_tar = curr_switch->
-					ext_wire[ports_to_try[i]].node_tar;
+				mp_tar = curr_switch->
+					ext_wire[ports_to_try[i]].mp_tar;
 
 				/* Check to see if I am going to a place I have
 				   already been before, because even
@@ -4946,13 +3225,13 @@ static int _finish_torus(List results,
 							       coord[Y]],
 						     alpha_num[next_node->
 							       coord[Z]],
-						     alpha_num[node_tar[X]],
-						     alpha_num[node_tar[Y]],
-						     alpha_num[node_tar[Z]]);
-					if ((node_tar[X] == next_node->coord[X])
-					    && (node_tar[Y]
+						     alpha_num[mp_tar[X]],
+						     alpha_num[mp_tar[Y]],
+						     alpha_num[mp_tar[Z]]);
+					if ((mp_tar[X] == next_node->coord[X])
+					    && (mp_tar[Y]
 						== next_node->coord[Y])
-					    && (node_tar[Z]
+					    && (mp_tar[Z]
 						== next_node->coord[Z])) {
 						break;
 					}
@@ -4973,8 +3252,8 @@ static int _finish_torus(List results,
 					continue;
 				}
 
-				next_switch = &ba_system_ptr->grid
-					[node_tar[X]][node_tar[Y]][node_tar[Z]]
+				next_switch = &ba_main_grid
+					[mp_tar[X]][mp_tar[Y]][mp_tar[Z]]
 					.axis_switch[dim];
 
 
@@ -5027,7 +3306,7 @@ static uint16_t *_set_best_path()
 			     path_switch->in, path_switch->out);
 		if (!geo)
 			geo = path_switch->geometry;
-		curr_switch = &ba_system_ptr->grid
+		curr_switch = &ba_main_grid
 			[path_switch->geometry[X]]
 			[path_switch->geometry[Y]]
 			[path_switch->geometry[Z]].
@@ -5053,7 +3332,7 @@ static int _set_one_dim(uint16_t *start, uint16_t *end, uint16_t *coord)
 
 	for(dim=0;dim<cluster_dims;dim++) {
 		if (start[dim]==end[dim]) {
-			curr_switch = &ba_system_ptr->grid
+			curr_switch = &ba_main_grid
 				[coord[X]][coord[Y]][coord[Z]].axis_switch[dim];
 
 			if (!curr_switch->int_wire[0].used
@@ -5074,12 +3353,3 @@ static void _destroy_geo(void *object)
 	xfree(geo_ptr);
 }
 
-static int _coord(char coord)
-{
-	if ((coord >= '0') && (coord <= '9'))
-		return (coord - '0');
-	if ((coord >= 'A') && (coord <= 'Z'))
-		return (coord - 'A');
-	return -1;
-}
-
diff --git a/src/plugins/select/bluegene/plugin/state_test.h b/src/plugins/select/bluegene/ba/block_allocator.h
similarity index 64%
rename from src/plugins/select/bluegene/plugin/state_test.h
rename to src/plugins/select/bluegene/ba/block_allocator.h
index 3b2b5acbf..d9a9f2ed0 100644
--- a/src/plugins/select/bluegene/plugin/state_test.h
+++ b/src/plugins/select/bluegene/ba/block_allocator.h
@@ -1,13 +1,13 @@
 /*****************************************************************************\
- *  state_test.h - header for Blue Gene node and switch state test.
- *  $Id$
+ *  block_allocator.h
+ *
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov> et. al.
+ *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -36,24 +36,40 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _STATE_TEST_H_
-#define _STATE_TEST_H_
+#ifndef _BLOCK_ALLOCATOR_H_
+#define _BLOCK_ALLOCATOR_H_
 
-/* Determine if specific slurm node is already in DOWN or DRAIN ret (1) or
- * FAIL ret (2) state idle ret (0) */
-extern int node_already_down(char *node_name);
+#include "src/common/node_select.h"
+#include "../bridge_linker.h"
+#include "../ba_common.h"
 
-/*
- * Search MMCS for failed switches and nodes. Failed resources are DRAINED in
- * SLURM. This relies upon rm_get_BGL(), which is slow (10+ seconds) so run
- * this test infrequently.
+// #define DEBUG_PA
+#define BIG_MAX 9999
+#define BUFSIZE 4096
+
+#define NUM_PORTS_PER_NODE 6
+
+enum {X, Y, Z};
+
+/* Global */
+extern my_bluegene_t *bg;
+
+extern ba_mp_t ***ba_main_grid;
+
+/* If emulating a system set up a known configuration for wires in a
+ * system of the size given.
+ * If a real bluegene system, query the system and get all wiring
+ * information of the system.
  */
-extern void test_mmcs_failures(void);
+extern void init_wires(void);
 
 /*
- * Search MMCS for failed switches and nodes inside of block.
- * Failed resources are DRAINED in SLURM. This relies upon rm_get_partition(),
+ * get the used wires for a block out of the database and return the
+ * node list.  The block_ptr here must be gotten with bridge_get_block
+ * not bridge_get_block_info, if you are looking to recover from
+ * before.  If you are looking to start clean it doesn't matter.
  */
-extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked);
+extern List get_and_set_block_wiring(char *bg_block_id,
+				     void *block_ptr);
 
-#endif /* _STATE_TEST_H_ */
+#endif /* _BLOCK_ALLOCATOR_H_ */
diff --git a/src/plugins/select/bluegene/block_allocator/wire_test.c b/src/plugins/select/bluegene/ba/wire_test.c
similarity index 81%
rename from src/plugins/select/bluegene/block_allocator/wire_test.c
rename to src/plugins/select/bluegene/ba/wire_test.c
index a444a11ec..b477388fd 100644
--- a/src/plugins/select/bluegene/block_allocator/wire_test.c
+++ b/src/plugins/select/bluegene/ba/wire_test.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,14 +44,71 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <math.h>
+#include "../ba_common.h"
 #include "block_allocator.h"
 #include "src/common/uid.h"
 #include "src/common/timers.h"
 
+/* These are here to avoid linking issues with the bridge for
+ * unresolved symbols.
+ */
+time_t last_job_update;
+time_t last_bg_update;
+bg_config_t *bg_conf = NULL;
+bg_lists_t *bg_lists = NULL;
+pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
+int blocks_are_created = 0;
+int bg_recover = 1;
+bool have_db2 = false;
+
+extern int bridge_init(char *properties_file)
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_fini()
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_get_size(int *size)
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_setup_system()
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_free_bg(my_bluegene_t *bg)
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_get_bg(my_bluegene_t **bg)
+{
+	return SLURM_ERROR;
+}
+
+#ifdef HAVE_BG_FILES
+extern int bridge_get_data(rm_element_t* element,
+			   enum rm_specification field, void *data)
+{
+	return SLURM_ERROR;
+}
+#else
+extern int bridge_get_data(void* element,
+			   int field, void *data)
+{
+	return SLURM_ERROR;
+}
+#endif
+
 /** */
 int main(int argc, char** argv)
 {
-	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t));
+	select_ba_request_t *request = xmalloc(sizeof(select_ba_request_t));
 	log_options_t log_opts = LOG_OPTS_INITIALIZER;
 	int debug_level = 5;
 
@@ -70,7 +127,6 @@ int main(int argc, char** argv)
 
 	slurm_conf_reinit(NULL);
 	ba_init(NULL, 1);
-	init_wires(NULL);
 
 	/* [010x831] */
 /* 	results = list_create(NULL); */
@@ -130,7 +186,7 @@ int main(int argc, char** argv)
 //	request->size = 1;
 	request->rotate = 1;
 	request->elongate = 1;
-	request->conn_type = SELECT_TORUS;
+	request->conn_type[0] = SELECT_TORUS;
 	new_ba_request(request);
 	print_ba_request(request);
 	if (!allocate_block(request, results)) {
@@ -154,12 +210,10 @@ int main(int argc, char** argv)
 	for(x=startx;x<endx;x++) {
 		for(y=starty;y<endy;y++) {
 			for(z=startz;z<endz;z++) {
-				ba_node_t *curr_node =
-					&(ba_system_ptr->grid[x][y][z]);
-				info("Node %c%c%c Used = %d Letter = %c",
+				ba_mp_t *curr_node = &(ba_main_grid[x][y][z]);
+				info("Node %c%c%c Used = %d",
 				     alpha_num[x],alpha_num[y],alpha_num[z],
-				     curr_node->used,
-				     curr_node->letter);
+				     curr_node->used);
 				for(dim=0;dim<1;dim++) {
 					info("Dim %d",dim);
 					ba_switch_t *wire =
@@ -172,15 +226,15 @@ int main(int argc, char** argv)
 						     alpha_num[wire->ext_wire[
 							     wire->int_wire[j].
 							     port_tar].
-							       node_tar[X]],
+							       mp_tar[X]],
 						     alpha_num[wire->ext_wire[
 							     wire->int_wire[j].
 							     port_tar].
-						     node_tar[Y]],
+						     mp_tar[Y]],
 						     alpha_num[wire->ext_wire[
 							     wire->int_wire[j].
 							     port_tar].
-						     node_tar[Z]],
+						     mp_tar[Z]],
 						     wire->ext_wire[
 							     wire->int_wire[j].
 							     port_tar].
diff --git a/src/plugins/select/bluegene/ba_bgq/Makefile.am b/src/plugins/select/bluegene/ba_bgq/Makefile.am
new file mode 100644
index 000000000..f8da9f495
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_bgq/Makefile.am
@@ -0,0 +1,38 @@
+# Makefile.am for block_allocator
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+
+noinst_LTLIBRARIES = libblock_allocator.la
+libblock_allocator_la_SOURCES = block_allocator.c block_allocator.h
+
+libblock_allocator_la_LIBADD  = ../bl_bgq/libbridge_linker.la
+
+libblock_allocator_la_LDFLAGS = $(LIB_LDFLAGS)
+
+total = $(libblock_allocator_la_LDADD)
+
+#to build the debug executable
+noinst_PROGRAMS = wire_test
+
+wire_test_SOURCES = wire_test.c
+
+# compile against the block_allocator.o since we don't really want to
+# link against the bridge_linker.
+wire_test_LDADD = $(top_builddir)/src/api/libslurm.o -ldl \
+	../libba_common.la  $(libblock_allocator_la_OBJECTS)
+
+total += ../libba_common.la $(top_builddir)/src/api/libslurm.o
+
+wire_test_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BG_LDFLAGS)
+
+# force link with g++
+nodist_EXTRA_wire_test_SOURCES = dummy.cxx
+
+force:
+$(total) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/select/bluegene/ba_bgq/Makefile.in b/src/plugins/select/bluegene/ba_bgq/Makefile.in
new file mode 100644
index 000000000..771134d76
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_bgq/Makefile.in
@@ -0,0 +1,672 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am for block_allocator
+
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+noinst_PROGRAMS = wire_test$(EXEEXT)
+subdir = src/plugins/select/bluegene/ba_bgq
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libblock_allocator_la_DEPENDENCIES = ../bl_bgq/libbridge_linker.la
+am_libblock_allocator_la_OBJECTS = block_allocator.lo
+libblock_allocator_la_OBJECTS = $(am_libblock_allocator_la_OBJECTS)
+libblock_allocator_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(libblock_allocator_la_LDFLAGS) $(LDFLAGS) -o $@
+PROGRAMS = $(noinst_PROGRAMS)
+am_wire_test_OBJECTS = wire_test.$(OBJEXT)
+wire_test_OBJECTS = $(am_wire_test_OBJECTS)
+wire_test_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o \
+	../libba_common.la $(libblock_allocator_la_OBJECTS)
+wire_test_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
+	$(CXXFLAGS) $(wire_test_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
+CXXLD = $(CXX)
+CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libblock_allocator_la_SOURCES) $(wire_test_SOURCES) \
+	$(nodist_EXTRA_wire_test_SOURCES)
+DIST_SOURCES = $(libblock_allocator_la_SOURCES) $(wire_test_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+noinst_LTLIBRARIES = libblock_allocator.la
+libblock_allocator_la_SOURCES = block_allocator.c block_allocator.h
+libblock_allocator_la_LIBADD = ../bl_bgq/libbridge_linker.la
+libblock_allocator_la_LDFLAGS = $(LIB_LDFLAGS)
+total = $(libblock_allocator_la_LDADD) ../libba_common.la \
+	$(top_builddir)/src/api/libslurm.o
+wire_test_SOURCES = wire_test.c
+
+# compile against the block_allocator.o since we don't really want to
+# link against the bridge_linker.
+wire_test_LDADD = $(top_builddir)/src/api/libslurm.o -ldl \
+	../libba_common.la  $(libblock_allocator_la_OBJECTS)
+
+wire_test_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BG_LDFLAGS)
+
+# force link with g++
+nodist_EXTRA_wire_test_SOURCES = dummy.cxx
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .cxx .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/ba_bgq/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/ba_bgq/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libblock_allocator.la: $(libblock_allocator_la_OBJECTS) $(libblock_allocator_la_DEPENDENCIES) 
+	$(libblock_allocator_la_LINK)  $(libblock_allocator_la_OBJECTS) $(libblock_allocator_la_LIBADD) $(LIBS)
+
+clean-noinstPROGRAMS:
+	@list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+wire_test$(EXEEXT): $(wire_test_OBJECTS) $(wire_test_DEPENDENCIES) 
+	@rm -f wire_test$(EXEEXT)
+	$(wire_test_LINK) $(wire_test_OBJECTS) $(wire_test_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_allocator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dummy.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wire_test.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+.cxx.o:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ $<
+
+.cxx.obj:
+@am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.cxx.lo:
+@am__fastdepCXX_TRUE@	$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCXX_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCXX_FALSE@	DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCXX_FALSE@	$(LTCXXCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	clean-noinstPROGRAMS mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \
+	ctags distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am
+
+
+force:
+$(total) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/select/bluegene/ba_bgq/block_allocator.c b/src/plugins/select/bluegene/ba_bgq/block_allocator.c
new file mode 100644
index 000000000..09bdb43f2
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_bgq/block_allocator.c
@@ -0,0 +1,2082 @@
+/*****************************************************************************\
+ *  block_allocator.c - Assorted functions for layout of bgq blocks,
+ *	 wiring, mapping for smap, etc.
+ *  $Id$
+ *****************************************************************************
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include "block_allocator.h"
+#include "src/common/uid.h"
+#include "src/common/timers.h"
+#include "src/common/slurmdb_defs.h"
+
+#define DEBUG_PA
+#define BEST_COUNT_INIT 20
+
+/* in Q there are always 5 dimensions in a nodecard/board */
+typedef struct {
+	int start[5];
+	int end[5];
+} ba_nc_coords_t;
+
+#define mp_strip_unaltered(__mp) (__mp & ~BA_MP_USED_ALTERED_PASS)
+
+/* _ba_system is the "current" system that the structures will work
+ *  on */
+ba_mp_t ****ba_main_grid = NULL;
+
+static ba_geo_system_t *ba_main_geo_system = NULL;
+static ba_geo_system_t *ba_mp_geo_system = NULL;
+static uint16_t *deny_pass = NULL;
+static ba_nc_coords_t g_nc_coords[16];
+
+/* increment Y -> Z -> A -> X -> E
+ * used for doing nodecard coords */
+static int ba_nc_dim_order[5] = {Y, Z, A, X, E};
+
+/** internal helper functions */
+/* */
+static int _check_for_options(select_ba_request_t* ba_request);
+
+/* */
+static int _fill_in_coords(List results, int level, ba_mp_t *start_mp,
+			   ba_mp_t **check_mp, int *block_start,
+			   int *block_end, int *coords);
+
+static int _finish_torus(List results, int level, int *block_start,
+			 int *block_end, uint16_t *conn_type, int *coords);
+
+/* */
+static char *_copy_from_main(List main_mps, List ret_list);
+
+/* */
+static char *_reset_altered_mps(List main_mps, bool get_name);
+
+/* */
+static int _copy_ba_switch(ba_mp_t *ba_mp, ba_mp_t *orig_mp, int dim);
+
+/* */
+static int _check_deny_pass(int dim);
+
+/* */
+static int _find_path(List mps, ba_mp_t *start_mp, int dim,
+		      uint16_t geometry, uint16_t conn_type,
+		      int *block_start, int *block_end);
+
+/* */
+static void _setup_next_mps(int level, uint16_t *coords);
+
+/* */
+static void _increment_nc_coords(int dim, int *mp_coords, int *dim_size);
+
+/** */
+static bool _mp_used(ba_mp_t* ba_mp, int dim);
+
+/** */
+static bool _mp_out_used(ba_mp_t* ba_mp, int dim);
+
+extern void ba_create_system()
+{
+	int a,x,y,z, i = 0, dim;
+	uint16_t coords[SYSTEM_DIMENSIONS];
+	int mp_coords[5];
+
+	if (ba_main_grid)
+		ba_destroy_system();
+
+	ba_main_grid = (ba_mp_t****)
+		xmalloc(sizeof(ba_mp_t***) * DIM_SIZE[A]);
+	for (a = 0; a < DIM_SIZE[A]; a++) {
+		ba_main_grid[a] = (ba_mp_t***)
+			xmalloc(sizeof(ba_mp_t**) * DIM_SIZE[X]);
+		for (x = 0; x < DIM_SIZE[X]; x++) {
+			ba_main_grid[a][x] = (ba_mp_t**)
+				xmalloc(sizeof(ba_mp_t*) * DIM_SIZE[Y]);
+			for (y = 0; y < DIM_SIZE[Y]; y++) {
+				ba_main_grid[a][x][y] = (ba_mp_t*)
+					xmalloc(sizeof(ba_mp_t) * DIM_SIZE[Z]);
+				for (z = 0; z < DIM_SIZE[Z]; z++) {
+					ba_mp_t *ba_mp = &ba_main_grid
+						[a][x][y][z];
+					ba_mp->coord[A] = a;
+					ba_mp->coord[X] = x;
+					ba_mp->coord[Y] = y;
+					ba_mp->coord[Z] = z;
+
+					snprintf(ba_mp->coord_str,
+						 sizeof(ba_mp->coord_str),
+						 "%c%c%c%c",
+						 alpha_num[ba_mp->coord[A]],
+						 alpha_num[ba_mp->coord[X]],
+						 alpha_num[ba_mp->coord[Y]],
+						 alpha_num[ba_mp->coord[Z]]);
+					ba_setup_mp(ba_mp, true, false);
+					ba_mp->state = NODE_STATE_IDLE;
+					/* This might get changed
+					   later, but just incase set
+					   it up here.
+					*/
+					ba_mp->index = i++;
+				}
+			}
+		}
+	}
+
+	/* build all the possible geos for the mid planes */
+	ba_main_geo_system =  xmalloc(sizeof(ba_geo_system_t));
+	ba_main_geo_system->dim_count = SYSTEM_DIMENSIONS;
+	ba_main_geo_system->dim_size =
+		xmalloc(sizeof(int) * ba_main_geo_system->dim_count);
+
+	for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++)
+		ba_main_geo_system->dim_size[dim] = DIM_SIZE[dim];
+
+	ba_create_geo_table(ba_main_geo_system);
+	//ba_print_geo_table(ba_main_geo_system);
+
+	/* build all the possible geos for a sub block inside a mid plane */
+	ba_mp_geo_system =  xmalloc(sizeof(ba_geo_system_t));
+	ba_mp_geo_system->dim_count = 5;
+	ba_mp_geo_system->dim_size =
+		xmalloc(sizeof(int) * ba_mp_geo_system->dim_count);
+	/* These will never change. */
+	ba_mp_geo_system->dim_size[0] = 4;
+	ba_mp_geo_system->dim_size[1] = 4;
+	ba_mp_geo_system->dim_size[2] = 4;
+	ba_mp_geo_system->dim_size[3] = 4;
+	ba_mp_geo_system->dim_size[4] = 2;
+	ba_create_geo_table(ba_mp_geo_system);
+	//ba_print_geo_table(ba_mp_geo_system);
+
+	_setup_next_mps(A, coords);
+
+	/* Now set it up to mark the corners of each nodecard.  This
+	   is used if running a sub-block job on a small block later.
+	*/
+	/* This is the basic idea for each small block size origin 00000
+	   32  = 2x2x2x2x2
+	   64  = 2x2x4x2x2
+	   128 = 2x2x4x4x2
+	   256 = 4x2x4x4x2
+	   512 = 4x4x4x4x2
+	*/
+
+	/* 32node boundaries (this is what the following code generates)
+	   N00 - 32  = 00000x11111
+	   N01 - 64  = 00200x11311
+	   N02 - 96  = 00020x11131
+	   N03 - 128 = 00220x11331
+	   N04 - 160 = 20000x31111
+	   N05 - 192 = 20200x31311
+	   N06 - 224 = 20020x31131
+	   N07 - 256 = 20220x31331
+	   N08 - 288 = 02000x13111
+	   N09 - 320 = 02200x13311
+	   N10 - 352 = 02020x13131
+	   N11 - 384 = 02220x13331
+	   N12 - 416 = 22000x33111
+	   N13 - 448 = 22200x33311
+	   N14 - 480 = 22020x33131
+	   N15 - 512 = 22220x33331
+	*/
+	memset(&mp_coords, 0, sizeof(mp_coords));
+	for (i=0; i<16; i++) {
+		/*
+		 * increment Y -> Z -> A -> X
+		 * E always goes from 0->1
+		 */
+		for (dim = 0; dim < 5; dim++) {
+			g_nc_coords[i].start[dim] =
+				g_nc_coords[i].end[dim] = mp_coords[dim];
+			g_nc_coords[i].end[dim]++;
+		}
+		/* info("%d\tgot %c%c%c%c%cx%c%c%c%c%c", */
+		/*      i, */
+		/*      alpha_num[g_nc_coords[i].start[A]], */
+		/*      alpha_num[g_nc_coords[i].start[X]], */
+		/*      alpha_num[g_nc_coords[i].start[Y]], */
+		/*      alpha_num[g_nc_coords[i].start[Z]], */
+		/*      alpha_num[g_nc_coords[i].start[E]], */
+		/*      alpha_num[g_nc_coords[i].end[A]], */
+		/*      alpha_num[g_nc_coords[i].end[X]], */
+		/*      alpha_num[g_nc_coords[i].end[Y]], */
+		/*      alpha_num[g_nc_coords[i].end[Z]], */
+		/*      alpha_num[g_nc_coords[i].end[E]]); */
+		_increment_nc_coords(0, mp_coords, ba_mp_geo_system->dim_size);
+	}
+}
+
+/** */
+extern void ba_destroy_system(void)
+{
+	int a, x, y;
+
+	if (ba_main_grid) {
+		for (a=0; a<DIM_SIZE[A]; a++) {
+			for (x = 0; x < DIM_SIZE[X]; x++) {
+				for (y = 0; y < DIM_SIZE[Y]; y++)
+					xfree(ba_main_grid[a][x][y]);
+				xfree(ba_main_grid[a][x]);
+			}
+			xfree(ba_main_grid[a]);
+		}
+		xfree(ba_main_grid);
+		ba_main_grid = NULL;
+	}
+
+	if (ba_main_geo_system) {
+		ba_free_geo_table(ba_main_geo_system);
+		xfree(ba_main_geo_system->dim_size);
+		xfree(ba_main_geo_system);
+	}
+
+	if (ba_mp_geo_system) {
+		ba_free_geo_table(ba_mp_geo_system);
+		xfree(ba_mp_geo_system->dim_size);
+		xfree(ba_mp_geo_system);
+	}
+
+	memset(DIM_SIZE, 0, sizeof(DIM_SIZE));
+}
+
+/*
+ * create a block request.  Note that if the geometry is given,
+ * then size is ignored.  If elongate is true, the algorithm will try
+ * to fit that a block of cubic shape and then it will try other
+ * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1).
+ *
+ * IN/OUT - ba_request: structure to allocate and fill in.
+ *
+ * ALL below IN's need to be set within the ba_request before the call
+ * if you want them to be used.
+ * ALL below OUT's are set and returned within the ba_request.
+ * IN - avail_mp_bitmap: bitmap of usable midplanes.
+ * IN - blrtsimage: BlrtsImage for this block if not default
+ * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
+ * IN - elongate: if true, will try to fit different geometries of
+ *      same size requests
+ * IN/OUT - geometry: requested/returned geometry of block
+ * IN - linuximage: LinuxImage for this block if not default
+ * IN - mloaderimage: MLoaderImage for this block if not default
+ * IN - nodecards: Number of nodecards in each block in request only
+ *      used of small block allocations.
+ * OUT - passthroughs: if there were passthroughs used in the
+ *       generation of the block.
+ * IN - procs: Number of real processors requested
+ * IN - quarters: Number of midplane quarters in each block in request only
+ *      used of small block allocations.
+ * IN - RamDiskimage: RamDiskImage for this block if not default
+ * IN - rotate: if true, allows rotation of block during fit
+ * OUT - save_name: hostlist of midplanes used in block
+ * IN/OUT - size: requested/returned count of midplanes in block
+ * IN - start: geo location of where to start the allocation
+ * IN - start_req: if set use the start variable to start at
+ * return success of allocation/validation of params
+ */
+extern int new_ba_request(select_ba_request_t* ba_request)
+{
+	int i=0;
+
+	xfree(ba_request->save_name);
+
+	if (ba_request->geometry[0] != (uint16_t)NO_VAL) {
+		for (i=0; i<cluster_dims; i++){
+			if ((ba_request->geometry[i] < 1)
+			    || (ba_request->geometry[i] > DIM_SIZE[i])) {
+				error("new_ba_request Error, "
+				      "request geometry is invalid dim %d "
+				      "can't be %c, largest is %c",
+				      i,
+				      alpha_num[ba_request->geometry[i]],
+				      alpha_num[DIM_SIZE[i]]);
+				return 0;
+			}
+		}
+		ba_request->size = 1;
+		for (i=0; i<cluster_dims; i++)
+			ba_request->size *= ba_request->geometry[i];
+	}
+
+	if (!(cluster_flags & CLUSTER_FLAG_BGQ)) {
+		if (ba_request->size
+		    && (ba_request->geometry[0] == (uint16_t)NO_VAL)) {
+			ba_request->geometry[0] = ba_request->size;
+		} else {
+			error("new_ba_request: "
+			      "No size or geometry given");
+			return 0;
+		}
+		return 1;
+	}
+
+	if (ba_request->deny_pass == (uint16_t)NO_VAL)
+		ba_request->deny_pass = ba_deny_pass;
+
+	deny_pass = &ba_request->deny_pass;
+	return 1;
+}
+
+/**
+ * print a block request
+ */
+extern void print_ba_request(select_ba_request_t* ba_request)
+{
+	int i;
+
+	if (ba_request == NULL){
+		error("print_ba_request Error, request is NULL");
+		return;
+	}
+	debug("  ba_request:");
+	debug("    geometry:\t");
+	for (i=0; i<cluster_dims; i++){
+		debug("%d", ba_request->geometry[i]);
+	}
+	debug("   conn_type:\t");
+	for (i=0; i<cluster_dims; i++){
+		debug("%d", ba_request->conn_type[i]);
+	}
+	debug("        size:\t%d", ba_request->size);
+	debug("      rotate:\t%d", ba_request->rotate);
+	debug("    elongate:\t%d", ba_request->elongate);
+}
+
+extern ba_mp_t *coord2ba_mp(const uint16_t *coord)
+{
+	if ((coord[A] >= DIM_SIZE[A]) || (coord[X] >= DIM_SIZE[X]) ||
+	    (coord[Y] >= DIM_SIZE[Y]) || (coord[Z] >= DIM_SIZE[Z])) {
+		error("Invalid coordinate %d:%d:%d:%d",
+		      coord[A], coord[X], coord[Y], coord[Z]);
+		return NULL;
+	}
+	return &ba_main_grid[coord[A]][coord[X]][coord[Y]][coord[Z]];
+}
+
+
+/*
+ * Try to allocate a block.
+ *
+ * IN - ba_request: allocation request
+ * OUT - results: List of results of the allocation request.  Each
+ * list entry will be a coordinate.  allocate_block will create the
+ * list, but the caller must destroy it.
+ *
+ * return: success or error of request
+ */
+extern int allocate_block(select_ba_request_t* ba_request, List results)
+{
+	uint16_t start[cluster_dims];
+	char *name=NULL;
+	int i, dim, startx;
+	ba_geo_table_t *ba_geo_table;
+	bool found = false;
+
+	if (!ba_initialized){
+		error("Error, configuration not initialized, "
+		      "calling ba_init(NULL, 1)");
+		ba_init(NULL, 1);
+	}
+
+	if (!ba_request){
+		error("allocate_block Error, request not initialized");
+		return 0;
+	}
+
+	if (!(cluster_flags & CLUSTER_FLAG_BG))
+		return 0;
+
+	memset(start, 0, sizeof(start));
+	startx = (start[X]-1);
+
+	if (startx == -1)
+		startx = DIM_SIZE[X]-1;
+	if (ba_request->start_req) {
+		for (dim = 0; dim < cluster_dims; dim++) {
+			if (ba_request->start[dim] >= DIM_SIZE[dim])
+				return 0;
+			start[dim] = ba_request->start[dim];
+		}
+	}
+
+	/* set up the geo_table */
+	if (ba_request->geometry[0] == (uint16_t)NO_VAL) {
+		if (!(ba_request->geo_table =
+		      ba_main_geo_system->geo_table_ptr[ba_request->size])) {
+			error("allocate_block: "
+			      "No geometries for %d midplanes",
+			      ba_request->size);
+			return 0;
+		}
+		ba_geo_table = (ba_geo_table_t *)ba_request->geo_table;
+		if (!ba_geo_table || !ba_geo_table->geometry) {
+			error("allocate_block: no geo table");
+			return 0;
+		}
+
+		memcpy(ba_request->geometry, ba_geo_table->geometry,
+		       sizeof(ba_request->geometry));
+	} else
+		ba_request->geo_table = NULL;
+
+start_again:
+	i = 0;
+	if (i == startx)
+		i = startx-1;
+	while (i != startx) {
+		i++;
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+			info("allocate_block: finding %c%c%c%c try %d",
+			     alpha_num[ba_request->geometry[A]],
+			     alpha_num[ba_request->geometry[X]],
+			     alpha_num[ba_request->geometry[Y]],
+			     alpha_num[ba_request->geometry[Z]],
+			     i);
+	new_mp:
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+			info("allocate_block: starting at %c%c%c%c",
+			     alpha_num[start[A]],
+			     alpha_num[start[X]],
+			     alpha_num[start[Y]],
+			     alpha_num[start[Z]]);
+
+		if ((name = set_bg_block(results, start,
+					 ba_request->geometry,
+					 ba_request->conn_type))) {
+			ba_request->save_name = name;
+			name = NULL;
+			return 1;
+		}
+
+		/* If there was an error set_bg_block resets the
+		   results list */
+		/* if (results && list_count(results)) { */
+		/* 	bool is_small = 0; */
+		/* 	if (ba_request->conn_type[0] == SELECT_SMALL) */
+		/* 		is_small = 1; */
+		/* 	remove_block(results, is_small); */
+		/* 	list_flush(results); */
+		/* } */
+
+		if (ba_request->start_req) {
+			info("start asked for ");
+			goto requested_end;
+		}
+		//exit(0);
+		debug2("allocate_block: trying something else");
+
+		found = false;
+		for (dim = cluster_dims-1; dim >= 0; dim--) {
+			start[dim]++;
+			if (start[dim] < DIM_SIZE[dim]) {
+				found = true;
+				break;
+			}
+			start[dim] = 0;
+		}
+		if (!found) {
+			if (ba_request->size == 1)
+				goto requested_end;
+			if (!_check_for_options(ba_request))
+				return 0;
+			else {
+				memset(start, 0, sizeof(start));
+				goto start_again;
+			}
+		}
+		goto new_mp;
+	}
+
+requested_end:
+	debug2("allocate_block: can't allocate");
+
+	return 0;
+}
+
+
+/*
+ * Admin wants to remove a previous allocation.
+ * will allow Admin to delete a previous allocation retrival by letter code.
+ */
+extern int remove_block(List mps, bool is_small)
+{
+	int dim;
+	ba_mp_t* curr_ba_mp = NULL;
+	ba_mp_t* ba_mp = NULL;
+	ListIterator itr;
+
+	itr = list_iterator_create(mps);
+	while ((curr_ba_mp = (ba_mp_t*) list_next(itr))) {
+		/* since the list that comes in might not be pointers
+		   to the main list we need to point to that main list */
+		ba_mp = &ba_main_grid
+			[curr_ba_mp->coord[A]]
+			[curr_ba_mp->coord[X]]
+			[curr_ba_mp->coord[Y]]
+			[curr_ba_mp->coord[Z]];
+		if (curr_ba_mp->used) {
+			ba_mp->used &= (~BA_MP_USED_TRUE);
+			if (ba_mp->used == BA_MP_USED_FALSE)
+				bit_clear(ba_main_mp_bitmap, ba_mp->index);
+		}
+		ba_mp->used &= (~BA_MP_USED_ALTERED_PASS);
+
+		/* Small blocks don't use wires, and only have 1 mp,
+		   so just break. */
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+			info("remove_block: %s state now %d",
+			     ba_mp->coord_str, ba_mp->used);
+
+		for (dim=0; dim<cluster_dims; dim++) {
+			if (curr_ba_mp == ba_mp) {
+				/* Remove the usage that was altered */
+				/* info("remove_block: %s(%d) %s removing %s", */
+				/*      ba_mp->coord_str, dim, */
+				/*      ba_switch_usage_str( */
+				/* 	     ba_mp->axis_switch[dim].usage), */
+				/*      ba_switch_usage_str( */
+				/* 	     ba_mp->alter_switch[dim].usage)); */
+				ba_mp->axis_switch[dim].usage &=
+					(~ba_mp->alter_switch[dim].usage);
+				/* info("remove_block: %s(%d) is now at %s", */
+				/*      ba_mp->coord_str, dim, */
+				/*      ba_switch_usage_str( */
+				/* 	     ba_mp->axis_switch[dim].usage)); */
+			} else if (curr_ba_mp->axis_switch[dim].usage
+				   != BG_SWITCH_NONE) {
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+					info("remove_block: 2 %s(%d) %s %s "
+					     "removing %s",
+					     ba_mp->coord_str, dim,
+					     curr_ba_mp->coord_str,
+					     ba_switch_usage_str(
+						     ba_mp->axis_switch
+						     [dim].usage),
+					     ba_switch_usage_str(
+						     curr_ba_mp->axis_switch
+						     [dim].usage));
+				/* Just remove the usage set here */
+				ba_mp->axis_switch[dim].usage &=
+					(~curr_ba_mp->axis_switch[dim].usage);
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+					info("remove_block: 2 %s(%d) is "
+					     "now at %s",
+					     ba_mp->coord_str, dim,
+					     ba_switch_usage_str(
+						     ba_mp->axis_switch[dim].
+						     usage));
+			}
+			//ba_mp->alter_switch[dim].usage = BG_SWITCH_NONE;
+		}
+	}
+	list_iterator_destroy(itr);
+
+	return 1;
+}
+
+/*
+ * Used to set a block into a virtual system.  The system can be
+ * cleared first and this function sets all the wires and midplanes
+ * used in the mplist given.  The mplist is a list of ba_mp_t's
+ * that are already set up.  This is very handly to test if there are
+ * any passthroughs used by one block when adding another block that
+ * also uses those wires, and neither use any overlapping
+ * midplanes. Doing a simple bitmap & will not reveal this.
+ *
+ * Returns SLURM_SUCCESS if mplist fits into system without
+ * conflict, and SLURM_ERROR if mplist conflicts with something
+ * already in the system.
+ */
+extern int check_and_set_mp_list(List mps)
+{
+	int rc = SLURM_ERROR;
+	int i;
+	ba_switch_t *ba_switch = NULL, *curr_ba_switch = NULL;
+	ba_mp_t *ba_mp = NULL, *curr_ba_mp = NULL;
+	ListIterator itr = NULL;
+
+	if (!mps)
+		return rc;
+
+	itr = list_iterator_create(mps);
+	while ((ba_mp = list_next(itr))) {
+		/* info("checking %c%c%c", */
+/* 		     ba_mp->coord[X],  */
+/* 		     ba_mp->coord[Y], */
+/* 		     ba_mp->coord[Z]); */
+
+		curr_ba_mp = &ba_main_grid
+			[ba_mp->coord[A]]
+			[ba_mp->coord[X]]
+			[ba_mp->coord[Y]]
+			[ba_mp->coord[Z]];
+
+		if (ba_mp->used && curr_ba_mp->used) {
+			/* Only error if the midplane isn't already
+			 * marked down or in a error state outside of
+			 * the bluegene block.
+			 */
+			uint16_t base_state, mp_flags;
+			base_state = curr_ba_mp->state & NODE_STATE_BASE;
+			mp_flags = curr_ba_mp->state & NODE_STATE_FLAGS;
+			if (!(mp_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL))
+			    && (base_state != NODE_STATE_DOWN)) {
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("check_and_set_mp_list: "
+					     "I have already been to "
+					     "this mp %s %s %d %d",
+					     ba_mp->coord_str,
+					     node_state_string(
+						     curr_ba_mp->state),
+					     ba_mp->used, curr_ba_mp->used);
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+		}
+
+		if (ba_mp->used) {
+			curr_ba_mp->used = ba_mp->used;
+			xassert(!bit_test(ba_main_mp_bitmap, ba_mp->index));
+			bit_set(ba_main_mp_bitmap, ba_mp->index);
+		}
+
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("check_and_set_mp_list: "
+			     "%s is used ?= %d %d",
+			     curr_ba_mp->coord_str,
+			     curr_ba_mp->used, ba_mp->used);
+		for(i=0; i<cluster_dims; i++) {
+			ba_switch = &ba_mp->axis_switch[i];
+			curr_ba_switch = &curr_ba_mp->axis_switch[i];
+			//info("checking dim %d", i);
+
+			if (ba_switch->usage == BG_SWITCH_NONE)
+				continue;
+
+			if (ba_switch->usage & curr_ba_switch->usage) {
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("check_and_set_mp_list: "
+					     "%s(%d) is already in "
+					     "use the way we want to use it.  "
+					     "%s already at %s",
+					     ba_mp->coord_str, i,
+					     ba_switch_usage_str(
+						     ba_switch->usage),
+					     ba_switch_usage_str(
+						     curr_ba_switch->usage));
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("check_and_set_mp_list: "
+				     "setting %s(%d) to from %s to %s",
+				     ba_mp->coord_str, i,
+				     ba_switch_usage_str(curr_ba_switch->usage),
+				     ba_switch_usage_str(curr_ba_switch->usage
+							 | ba_switch->usage));
+			curr_ba_switch->usage |= ba_switch->usage;
+		}
+	}
+	rc = SLURM_SUCCESS;
+end_it:
+	list_iterator_destroy(itr);
+	return rc;
+}
+
+/*
+ * Used to find, and set up midplanes and the wires in the virtual
+ * system and return them in List results
+ *
+ * IN/OUT results - a list with a NULL destroyer filled in with
+ *        midplanes and wires set to create the block with the api. If
+ *        only interested in the hostlist NULL can be excepted also.
+ * IN start - where to start the allocation.
+ * IN geometry - the requested geometry of the block.
+ * IN conn_type - mesh, torus, or small.
+ *
+ * RET char * - hostlist of midplanes results represent must be
+ *     xfreed.  NULL on failure
+ */
+extern char *set_bg_block(List results, uint16_t *start,
+			  uint16_t *geometry, uint16_t *conn_type)
+{
+	List main_mps = NULL;
+	char *name = NULL;
+	ba_mp_t* ba_mp = NULL;
+	ba_mp_t *check_mp[cluster_dims];
+	int size = 1, dim;
+	int block_start[cluster_dims];
+	int block_end[cluster_dims];
+	int coords[cluster_dims];
+	uint16_t local_deny_pass = ba_deny_pass;
+
+	if (!ba_initialized){
+		error("Error, configuration not initialized, "
+		      "calling ba_init(NULL, 1)");
+		ba_init(NULL, 1);
+	}
+
+	if (cluster_dims == 1) {
+		if (start[A] >= DIM_SIZE[A])
+			return NULL;
+		size = geometry[X];
+		ba_mp = &ba_main_grid[start[A]][0][0][0];
+	} else {
+		for (dim=0; dim<cluster_dims; dim++) {
+			if (start[dim] >= DIM_SIZE[dim])
+				return NULL;
+			if (geometry[dim] <= 0) {
+				error("problem with geometry of %c in dim %d, "
+				      "needs to be at least 1",
+				      alpha_num[geometry[dim]], dim);
+				return NULL;
+			}
+			size *= geometry[dim];
+		}
+
+		ba_mp = &ba_main_grid[start[A]][start[X]][start[Y]][start[Z]];
+		/* info("looking at %s", ba_mp->coord_str); */
+	}
+
+	if (!ba_mp)
+		goto end_it;
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+		info("trying mp %s %c%c%c%c %d",
+		     ba_mp->coord_str,
+		     alpha_num[geometry[A]],
+		     alpha_num[geometry[X]],
+		     alpha_num[geometry[Y]],
+		     alpha_num[geometry[Z]],
+		     conn_type[A]);
+
+	/* check just the first dim to see if this node is used for
+	   anything just yet. */
+	if (_mp_used(ba_mp, 0))
+		goto end_it;
+
+	if (conn_type[A] >= SELECT_SMALL) {
+		/* adding the ba_mp and end, we could go through the
+		 * regular logic here, but this is just faster. */
+		if (results) {
+			ba_mp = ba_copy_mp(ba_mp);
+			/* We need to have this node wrapped in Q to handle
+			   wires correctly when creating around the midplane.
+			*/
+			ba_setup_mp(ba_mp, false, true);
+			ba_mp->used = BA_MP_USED_TRUE;
+			list_append(results, ba_mp);
+		}
+		name = xstrdup(ba_mp->coord_str);
+		goto end_it;
+	}
+
+	main_mps = list_create(NULL);
+
+	ba_mp->used |= BA_MP_USED_ALTERED;
+	list_append(main_mps, ba_mp);
+
+	if (!deny_pass)
+		deny_pass = &local_deny_pass;
+
+	/* set the end to the start and the _find_path will increase each dim.*/
+	for (dim=0; dim<cluster_dims; dim++) {
+		block_start[dim] = start[dim];
+		block_end[dim] = start[dim];
+		if (!_find_path(main_mps, ba_mp, dim, geometry[dim],
+				conn_type[dim], &block_start[dim],
+				&block_end[dim])) {
+			goto end_it;
+		}
+	}
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+		info("complete box is %c%c%c%c x %c%c%c%c",
+		     alpha_num[block_start[A]],
+		     alpha_num[block_start[X]],
+		     alpha_num[block_start[Y]],
+		     alpha_num[block_start[Z]],
+		     alpha_num[block_end[A]],
+		     alpha_num[block_end[X]],
+		     alpha_num[block_end[Y]],
+		     alpha_num[block_end[Z]]);
+
+	if (_fill_in_coords(main_mps, A, ba_mp, check_mp,
+			    block_start, block_end, coords) == -1)
+		goto end_it;
+
+	if (_finish_torus(main_mps, A, block_start,
+			  block_end, conn_type, coords) == -1)
+		goto end_it;
+
+	/* Success */
+	if (results)
+		name = _copy_from_main(main_mps, results);
+	else
+		name = _reset_altered_mps(main_mps, 1);
+
+end_it:
+
+	if (main_mps) {
+		/* handle failure */
+		if (!name)
+			_reset_altered_mps(main_mps, 0);
+		list_destroy(main_mps);
+		main_mps = NULL;
+	}
+
+	if (name)
+		debug2("name = %s", name);
+	else
+		debug2("can't allocate");
+
+	if (deny_pass == &local_deny_pass)
+		deny_pass = NULL;
+
+	return name;
+}
+
+extern void ba_rotate_geo(uint16_t *req_geo, int rot_cnt)
+{
+	uint16_t tmp;
+
+	switch (rot_cnt) {
+	case 0:		/* ABCD -> ABDC */
+	case 3:		/* DABC -> DACB */
+	case 6:		/* CDAB -> CDBA */
+	case 9:		/* CADB -> CABD */
+	case 14:	/* DBAC -> DBCA */
+	case 17:	/* ACBD -> ACDB */
+	case 20:	/* BDCA -> BCDA */
+	case 21:	/* BCDA -> BCAD */
+		SWAP(req_geo[Y], req_geo[Z], tmp);
+		break;
+	case 1:		/* ABDC -> ADBC */
+	case 4:		/* DACB -> DCAB */
+	case 7:		/* CDBA -> CBDA */
+	case 10:	/* CABD -> CBAD */
+	case 12:	/* BADC -> BDAC */
+	case 15:	/* DBCA -> DCBA */
+	case 18:	/* ACDB -> ADCB */
+	case 22:	/* BCAD -> BACD */
+		SWAP(req_geo[X], req_geo[Y], tmp);
+		break;
+	case 2:		/* ADBC -> DABC */
+	case 5:		/* DCAB -> CDAB */
+	case 13:	/* BDAC -> DBAC */
+	case 23:	/* BACD -> ABCD */
+		SWAP(req_geo[A], req_geo[X], tmp);
+		break;
+	case 16:	/* DCBA -> ACBD */
+	case 19:	/* ADCB -> BDCA */
+		SWAP(req_geo[A], req_geo[Z], tmp);
+		break;
+	case 8:		/* CBDA -> CADB */
+		SWAP(req_geo[X], req_geo[Z], tmp);
+		break;
+	case 11:	/* CBAD -> BCAD -> BACD -> BADC */
+		SWAP(req_geo[A], req_geo[X], tmp);
+		SWAP(req_geo[X], req_geo[Y], tmp);
+		SWAP(req_geo[Y], req_geo[Z], tmp);
+		break;
+
+	}
+
+}
+
+extern ba_mp_t *ba_pick_sub_block_cnodes(
+	bg_record_t *bg_record, uint32_t *node_count, select_jobinfo_t *jobinfo)
+{
+	ListIterator itr = NULL;
+	ba_mp_t *ba_mp = NULL;
+	ba_geo_table_t *geo_table = NULL;
+	char *tmp_char = NULL;
+	uint32_t orig_node_count = *node_count;
+	int dim;
+	uint32_t max_clear_cnt = 0, clear_cnt;
+
+	xassert(ba_mp_geo_system);
+	xassert(bg_record->ba_mp_list);
+	xassert(jobinfo);
+	xassert(!jobinfo->units_used);
+
+	jobinfo->dim_cnt = ba_mp_geo_system->dim_count;
+
+try_again:
+	while (!(geo_table = ba_mp_geo_system->geo_table_ptr[*node_count])) {
+		debug2("ba_pick_sub_block_cnodes: No geometries of size %u ",
+		       *node_count);
+		(*node_count)++;
+		if (*node_count > bg_record->cnode_cnt)
+			break;
+	}
+	if (*node_count > bg_record->cnode_cnt) {
+		debug("ba_pick_sub_block_cnodes: requested sub-block larger "
+		      "than block");
+		return NULL;
+	}
+
+	if (orig_node_count != *node_count)
+		debug("ba_pick_sub_block_cnodes: user requested %u nodes, "
+		      "but that can't make a block, giving them %d",
+		      orig_node_count, *node_count);
+
+	if (!geo_table) {
+		/* This should never happen */
+		error("ba_pick_sub_block_cnodes: "
+		      "Couldn't place this job size %u tried up to "
+		      "the full size of the block (%u)",
+		      orig_node_count, bg_record->cnode_cnt);
+		return NULL;
+	}
+
+	itr = list_iterator_create(bg_record->ba_mp_list);
+	while ((ba_mp = list_next(itr))) {
+		int cnt = 0;
+
+		if (!ba_mp->used)
+			continue;
+
+		/* Create the bitmap if it doesn't exist.  Since this
+		 * is a copy of the original and the cnode_bitmap is
+		 * only used for sub-block jobs we only create it
+		 * when needed. */
+		if (!ba_mp->cnode_bitmap)
+			ba_mp->cnode_bitmap =
+				ba_create_ba_mp_cnode_bitmap(bg_record);
+		clear_cnt = bit_clear_count(ba_mp->cnode_bitmap);
+		if (clear_cnt < *node_count) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("ba_pick_sub_block_cnodes: "
+				     "only have %d avail in %s need %d",
+				     clear_cnt,
+				     ba_mp->coord_str, *node_count);
+			continue;
+		}
+
+		while (geo_table) {
+			int scan_offset = 0;
+			uint16_t start_loc[ba_mp_geo_system->dim_count];
+
+			/* FIXME: In the current IBM API it doesn't
+			   allow wrapping inside the midplane.  In the
+			   future this will change.  When that happens
+			   there will need to be a flag that is sent
+			   here instead of always true.
+			*/
+			if (ba_geo_test_all(ba_mp->cnode_bitmap,
+					    &jobinfo->units_used,
+					    geo_table, &cnt,
+					    ba_mp_geo_system, NULL,
+					    start_loc, &scan_offset, true)
+			    != SLURM_SUCCESS) {
+				geo_table = geo_table->next_ptr;
+				continue;
+			}
+
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+				info("scan_offset=%d", scan_offset);
+				for (dim = 0;
+				     dim < ba_mp_geo_system->dim_count;
+				     dim++) {
+					info("start_loc[%d]=%u geometry[%d]=%u",
+					     dim, start_loc[dim], dim,
+					     geo_table->geometry[dim]);
+				}
+			}
+
+			bit_or(ba_mp->cnode_bitmap, jobinfo->units_used);
+			jobinfo->ionode_str = ba_node_map_ranged_hostlist(
+				jobinfo->units_used, ba_mp_geo_system);
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+				bit_not(ba_mp->cnode_bitmap);
+				tmp_char = ba_node_map_ranged_hostlist(
+					ba_mp->cnode_bitmap, ba_mp_geo_system);
+				bit_not(ba_mp->cnode_bitmap);
+				info("ba_pick_sub_block_cnodes: "
+				     "using cnodes %s on mp %s "
+				     "leaving '%s' usable in this block (%s)",
+				     jobinfo->ionode_str,
+				     ba_mp->coord_str, tmp_char,
+				     bg_record->bg_block_id);
+				xfree(tmp_char);
+			}
+			for (dim = 0; dim < jobinfo->dim_cnt; dim++) {
+				jobinfo->geometry[dim] =
+					geo_table->geometry[dim];
+				jobinfo->start_loc[dim] = start_loc[dim];
+			}
+			break;
+		}
+
+		if (geo_table)
+			break;
+
+		/* User asked for a bad CPU count or we can't place it
+		   here in this small allocation. */
+		if (jobinfo->cnode_cnt < bg_conf->mp_cnode_cnt) {
+			list_iterator_destroy(itr);
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("We couldn't place a sub block of %d",
+				     *node_count);
+			(*node_count)++;
+			goto try_again;
+		}
+
+		/* Grab the most empty midplane to be used later if we
+		   can't find a spot.
+		*/
+		if (max_clear_cnt < clear_cnt) {
+			max_clear_cnt = clear_cnt;
+		}
+
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("couldn't place it on %s", ba_mp->coord_str);
+		geo_table = ba_mp_geo_system->geo_table_ptr[*node_count];
+	}
+	list_iterator_destroy(itr);
+
+	/* This is to vet we have a good geo on this request.  So if a
+	   person asks for 12 and the only reason they can't get it is
+	   because they can't get that geo and if they would of asked
+	   for 16 then they could run we do that for them.
+	*/
+	if (!ba_mp && (max_clear_cnt > (*node_count)+1)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("trying with a larger size");
+		(*node_count)++;
+		goto try_again;
+	}
+
+	return ba_mp;
+}
+
+extern int ba_clear_sub_block_cnodes(
+	bg_record_t *bg_record, struct step_record *step_ptr)
+{
+	bitoff_t bit;
+	ListIterator itr = NULL;
+	ba_mp_t *ba_mp = NULL;
+	select_jobinfo_t *jobinfo = NULL;
+	char *tmp_char = NULL, *tmp_char2 = NULL;
+
+	xassert(bg_record);
+	xassert(step_ptr);
+
+	jobinfo = step_ptr->select_jobinfo->data;
+	xassert(jobinfo);
+
+	/* If we are using the entire block and the block is larger
+	 * than 1 midplane we don't need to do anything. */
+	if ((jobinfo->cnode_cnt == bg_record->cnode_cnt)
+	    && (bg_record->mp_count != 1))
+		return SLURM_SUCCESS;
+
+	if ((bit = bit_ffs(step_ptr->step_node_bitmap)) == -1) {
+		error("ba_clear_sub_block_cnodes: "
+		      "we couldn't find any bits set");
+		return SLURM_ERROR;
+	}
+
+	itr = list_iterator_create(bg_record->ba_mp_list);
+	while ((ba_mp = list_next(itr))) {
+		if (ba_mp->index != bit)
+			continue;
+		if (!jobinfo->units_used) {
+			/* from older version of slurm */
+			error("ba_clear_sub_block_cnodes: "
+			      "didn't have the units_used bitmap "
+			      "for some reason?");
+			continue;
+		}
+
+		bit_not(jobinfo->units_used);
+		bit_and(ba_mp->cnode_bitmap, jobinfo->units_used);
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+			bit_not(jobinfo->units_used);
+			tmp_char = ba_node_map_ranged_hostlist(
+				jobinfo->units_used, ba_mp_geo_system);
+			bit_not(ba_mp->cnode_bitmap);
+			tmp_char2 = ba_node_map_ranged_hostlist(
+				ba_mp->cnode_bitmap, ba_mp_geo_system);
+			bit_not(ba_mp->cnode_bitmap);
+			info("ba_clear_sub_block_cnodes: "
+			     "cleared cnodes %s on mp %s, making '%s' usable "
+			     "in this block (%s)",
+			     tmp_char, ba_mp->coord_str, tmp_char2,
+			     bg_record->bg_block_id);
+			xfree(tmp_char);
+			xfree(tmp_char2);
+		}
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+extern bitstr_t *ba_create_ba_mp_cnode_bitmap(bg_record_t *bg_record)
+{
+	int start, end, ionode_num;
+	char *tmp_char, *tmp_char2;
+	bitstr_t *cnode_bitmap = bit_alloc(bg_conf->mp_cnode_cnt);
+
+	if (!bg_record->ionode_bitmap
+	    || ((start = bit_ffs(bg_record->ionode_bitmap)) == -1))
+		return cnode_bitmap;
+
+	end = bit_fls(bg_record->ionode_bitmap);
+	for (ionode_num = start; ionode_num <= end; ionode_num++) {
+		int nc_num, nc_start, nc_end;
+		if (!bit_test(bg_record->ionode_bitmap, ionode_num))
+			continue;
+
+		nc_start = ionode_num * (int)bg_conf->nc_ratio;
+		nc_end = nc_start + (int)bg_conf->nc_ratio;
+		for (nc_num = nc_start; nc_num < nc_end; nc_num++)
+			ba_node_map_set_range(cnode_bitmap,
+					      g_nc_coords[nc_num].start,
+					      g_nc_coords[nc_num].end,
+					      ba_mp_geo_system);
+	}
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+		tmp_char = ba_node_map_ranged_hostlist(cnode_bitmap,
+						       ba_mp_geo_system);
+
+	bit_not(cnode_bitmap);
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+		tmp_char2 = ba_node_map_ranged_hostlist(cnode_bitmap,
+							ba_mp_geo_system);
+		info("ba_create_ba_mp_cnode_bitmap: can only use %s cnodes of "
+		     "this midplane leaving %s unusable", tmp_char, tmp_char2);
+		xfree(tmp_char);
+		xfree(tmp_char2);
+	}
+
+	return cnode_bitmap;
+}
+
+
+static int _ba_set_ionode_str_internal(int level, int *coords,
+				       int *start_offset, int *end_offset,
+				       hostlist_t hl)
+{
+	char tmp_char[6];
+
+	xassert(hl);
+
+	if (level > 5)
+		return -1;
+
+	if (level < 5) {
+		for (coords[level] = start_offset[level];
+		     coords[level] <= end_offset[level];
+		     coords[level]++) {
+			/* handle the outter dims here */
+			if (_ba_set_ionode_str_internal(
+				    level+1, coords,
+				    start_offset, end_offset,
+				    hl) == -1)
+				return -1;
+		}
+		return 1;
+	}
+	snprintf(tmp_char, sizeof(tmp_char), "%c%c%c%c%c",
+		 alpha_num[coords[0]],
+		 alpha_num[coords[1]],
+		 alpha_num[coords[2]],
+		 alpha_num[coords[3]],
+		 alpha_num[coords[4]]);
+	hostlist_push_host_dims(hl, tmp_char, 5);
+	return 1;
+}
+
+extern char *ba_set_ionode_str(bitstr_t *ionode_bitmap)
+{
+	char *ionode_str = NULL;
+
+	if (ionode_bitmap) {
+		/* bit_fmt(bitstring, BITSIZE, ionode_bitmap); */
+		/* return xstrdup(bitstring); */
+		int ionode_num;
+		hostlist_t hl = hostlist_create_dims("", 5);
+		int coords[5];
+
+		for (ionode_num = bit_ffs(ionode_bitmap);
+		     ionode_num <= bit_fls(ionode_bitmap);
+		     ionode_num++) {
+			int nc_num, nc_start, nc_end;
+			if (!bit_test(ionode_bitmap, ionode_num))
+				continue;
+
+			nc_start = ionode_num * (int)bg_conf->nc_ratio;
+			nc_end = nc_start + (int)bg_conf->nc_ratio;
+
+			for (nc_num = nc_start; nc_num < nc_end; nc_num++) {
+				if (_ba_set_ionode_str_internal(
+					    0, coords,
+					    g_nc_coords[nc_num].start,
+					    g_nc_coords[nc_num].end,
+					    hl)
+				    == -1) {
+					hostlist_destroy(hl);
+					hl = NULL;
+					break;
+				}
+			}
+		}
+		if (hl) {
+			ionode_str = hostlist_ranged_string_xmalloc_dims(
+				hl, 5, 0);
+			//info("iostring is %s", ionode_str);
+			hostlist_destroy(hl);
+			hl = NULL;
+		}
+	}
+	return ionode_str;
+}
+
+/*
+ * This function is here to check options for rotating and elongating
+ * and set up the request based on the count of each option
+ */
+static int _check_for_options(select_ba_request_t* ba_request)
+{
+	ba_geo_table_t *ba_geo_table;
+
+	if (ba_request->geo_table) {
+		ba_geo_table = ba_request->geo_table;
+		ba_request->geo_table =	ba_geo_table->next_ptr;
+	}
+
+	if (ba_request->geo_table) {
+		ba_geo_table = ba_request->geo_table;
+		memcpy(ba_request->geometry, ba_geo_table->geometry,
+		       sizeof(ba_geo_table->geometry));
+		/* info("now trying %c%c%c%c", */
+		/*      alpha_num[ba_request->geometry[A]], */
+		/*      alpha_num[ba_request->geometry[X]], */
+		/*      alpha_num[ba_request->geometry[Y]], */
+		/*      alpha_num[ba_request->geometry[Z]]); */
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * Fill in the paths and extra midplanes we need for the block.
+ * Basically copy the starting coords sent in starting at block_start
+ * ending with block_end in every midplane for the block.  This
+ * function does not finish torus' (use _finish_torus for that).
+ *
+ * IN/OUT results - total list of midplanes after this function
+ *        returns successfully.
+ * IN level - which dimension we are on.  Since this is a recursive
+ *        function calls to this function should always be 'A' when
+ *        starting.
+ * IN start_mp - starting location of the block, should be the ba_mp
+ *        from the block_start.
+ * IN block_start - starting point of the block.
+ * IN block_end - ending point of the block.
+ * IN coords - Where we are recursively. So this should just be an
+ *        uninitialized int [SYSTEM_DIMENSIONS]
+ *
+ * RET: -1 on failure 1 on success
+ */
+static int _fill_in_coords(List results, int level, ba_mp_t *start_mp,
+			   ba_mp_t **check_mp, int *block_start,
+			   int *block_end, int *coords)
+{
+	int dim;
+	int count_outside = 0;
+	uint16_t used = 0;
+	ba_mp_t *curr_mp;
+
+	if (level > cluster_dims)
+		return -1;
+
+	if (level < cluster_dims) {
+		check_mp[level] = start_mp;
+		coords[level] = start_mp->coord[level];
+		do {
+			/* handle the outter dims here */
+			if (_fill_in_coords(
+				    results, level+1, start_mp,
+				    check_mp, block_start, block_end,
+				    coords) == -1)
+				return -1;
+			if (check_mp[level]->alter_switch[level].usage
+			    & BG_SWITCH_OUT_PASS)
+				check_mp[level] =
+					check_mp[level]->next_mp[level];
+			else {
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("mp %s(%d) isn't connected "
+					     "anymore, we found the end.",
+					     check_mp[level]->coord_str, level);
+				return 0;
+			}
+			if (coords[level] < (DIM_SIZE[level]-1))
+				coords[level]++;
+			else
+				coords[level] = 0;
+		} while (coords[level] != start_mp->coord[level]);
+		return 1;
+	}
+
+	curr_mp = &ba_main_grid[coords[A]][coords[X]][coords[Y]][coords[Z]];
+
+	/* info("looking at %s", curr_mp->coord_str); */
+	for (dim=0; dim<cluster_dims; dim++) {
+		/* If this is only used for passthrough, skip since
+		   the _finish_torus code will catch things there.
+		*/
+		if (check_mp[dim]->used & BA_MP_USED_PASS_BIT) {
+			used = check_mp[dim]->used;
+			break;
+		}
+
+		/* info("inside at %s %d %d %d", check_mp[dim]->coord_str, */
+		/*      dim, check_mp[dim]->used, used); */
+
+		/* If we get over 2 in any dim that we are
+		   greater here we are pass anything we need to
+		   passthrough, so break.
+		*/
+
+		/* info("passthrough %d used %d %d %d %d", dim, used, */
+		/*      curr_mp->coord[dim], block_start[dim], */
+		/*      block_end[dim]); */
+		if ((curr_mp->coord[dim] < block_start[dim])
+		    || (curr_mp->coord[dim] > block_end[dim])) {
+			count_outside++;
+			/* info("yes under %d", count_outside); */
+			if (count_outside > 1)
+				break;
+		}
+	}
+
+	/* info("got used of %d %d", used, count_outside); */
+	if (dim < cluster_dims) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("skipping non-used %s if needed for "
+			     "passthrough it should be handled in "
+			     "_finish_torus",
+			     curr_mp->coord_str);
+		return 1;
+	}
+
+	for (dim=0; dim<cluster_dims; dim++) {
+		int rc;
+
+		/* If we are passing though skip all except the
+		   actual passthrough dim.
+		*/
+		if ((used & BA_MP_USED_PASS_BIT)
+		    && (check_mp[dim]->used != used)) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("skipping here %s(%d)",
+				     curr_mp->coord_str, dim);
+			continue;
+		}
+
+		/* ba_mp_t *orig_mp = check_mp[dim]; */
+		/* ba_mp_t *ba_mp = curr_mp; */
+		/* info("looking to put " */
+		/*      "mp %s(%d) %s onto mp %s(%d) %s", */
+		/*      orig_mp->coord_str, dim, */
+		/*      ba_switch_usage_str(orig_mp->alter_switch[dim].usage),*/
+		/*      ba_mp->coord_str, dim, */
+		/*      ba_switch_usage_str(ba_mp->alter_switch[dim].usage)); */
+
+		/* if 1 is returned we haven't visited this mp yet,
+		   and need to add it to the list
+		*/
+		if ((rc = _copy_ba_switch(curr_mp, check_mp[dim], dim)) == -1)
+			return rc;
+		else if (rc == 1)
+			list_append(results, curr_mp);
+	}
+	return 1;
+}
+
+/*
+ * Finish wiring a block together given start and end points.  All
+ * used nodes should be marked inside those points before this
+ * function is called.
+ *
+ * IN/OUT results - total list of midplanes after this function
+ *        returns successfully.
+ * IN level - which dimension we are on.  Since this is a recursive
+ *        function calls to this function should always be 'A' when
+ *        starting.
+ * IN block_start - starting point of the block.
+ * IN block_end - ending point of the block.
+ * IN conn_type - Mesh or Torus for each Dim.
+ * IN coords - Where we are recursively. So this should just be an
+ *        uninitialized int [SYSTEM_DIMENSIONS]
+ *
+ * RET: -1 on failure 1 on success
+ */
+static int _finish_torus(List results, int level, int *block_start,
+			 int *block_end, uint16_t *conn_type, int *coords)
+{
+	int dim;
+	ba_mp_t *curr_mp, *start_mp;
+
+	if (level > cluster_dims)
+		return -1;
+
+	if (level < cluster_dims) {
+		for (coords[level] = block_start[level];
+		     coords[level] <= block_end[level];
+		     coords[level]++) {
+			/* handle the outter dims here */
+			if (_finish_torus(
+				    results, level+1,
+				    block_start, block_end,
+				    conn_type, coords) == -1)
+				return -1;
+		}
+		return 1;
+	}
+
+	curr_mp = &ba_main_grid[coords[A]][coords[X]][coords[Y]][coords[Z]];
+	if (!(curr_mp->used & BA_MP_USED_ALTERED)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_finish_torus: skipping non-used %s",
+			     curr_mp->coord_str);
+		return 1;
+	}
+	start_mp = curr_mp;
+
+	/* info("_finish_torus: starting with %s", */
+	/*      curr_mp->coord_str); */
+
+	for (dim=0; dim<cluster_dims; dim++) {
+		if (conn_type[dim] != SELECT_TORUS)
+			continue;
+		if (!(start_mp->alter_switch[dim].usage & BG_SWITCH_OUT_PASS)) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("finish_torus: mp %s(%d) already "
+				     "terminated %s",
+				     curr_mp->coord_str, dim,
+				     ba_switch_usage_str(
+					     start_mp->alter_switch->usage));
+			continue;
+		}
+
+		curr_mp = start_mp->next_mp[dim];
+		while (curr_mp != start_mp) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("_finish_torus: looking at %s(%d)",
+				     curr_mp->coord_str, dim);
+			if (!(curr_mp->used & BA_MP_USED_ALTERED)) {
+				ba_switch_t *axis_switch =
+					&curr_mp->axis_switch[dim];
+				ba_switch_t *alter_switch =
+					&curr_mp->alter_switch[dim];
+				if (axis_switch->usage & BG_SWITCH_PASS_USED) {
+					info("_finish_torus: got a bad "
+					     "axis_switch at "
+					     "%s(%d) %s %s",
+					     curr_mp->coord_str, dim,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+					xassert(0);
+				}
+				alter_switch->usage |= BG_SWITCH_PASS;
+				curr_mp->used |= BA_MP_USED_ALTERED_PASS;
+				list_append(results, curr_mp);
+
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("_finish_torus: using mp %s(%d) "
+					     "to finish torus %s added %s",
+					     curr_mp->coord_str, dim,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+			} else if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("_finish_torus: skipping already "
+				     "set %s(%d) %s",
+				     curr_mp->coord_str, dim,
+				     ba_switch_usage_str(
+					     curr_mp->alter_switch[dim].usage));
+			curr_mp = curr_mp->next_mp[dim];
+		}
+		/* info("_finish_torus: ended with %s(%d)", */
+		/*      curr_mp->coord_str, dim); */
+	}
+
+	return 1;
+}
+
+static char *_copy_from_main(List main_mps, List ret_list)
+{
+	ListIterator itr;
+	ba_mp_t *ba_mp;
+	ba_mp_t *new_mp;
+	int dim;
+	char *name = NULL;
+	hostlist_t hostlist = NULL;
+
+	if (!main_mps || !ret_list)
+		return NULL;
+
+	if (!(itr = list_iterator_create(main_mps)))
+		fatal("NULL itr returned");
+	while ((ba_mp = list_next(itr))) {
+		if (!(ba_mp->used & BA_MP_USED_ALTERED)) {
+			error("_copy_from_main: it appears we "
+			      "have a mp %s added that wasn't altered %d",
+			      ba_mp->coord_str, ba_mp->used);
+			continue;
+		}
+
+		new_mp = ba_copy_mp(ba_mp);
+		list_append(ret_list, new_mp);
+		/* copy and reset the path */
+		memcpy(new_mp->axis_switch, new_mp->alter_switch,
+		       sizeof(ba_mp->axis_switch));
+		memset(new_mp->alter_switch, 0, sizeof(new_mp->alter_switch));
+		if (new_mp->used & BA_MP_USED_PASS_BIT) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+				info("_copy_from_main: "
+				     "mp %s is used for passthrough",
+				     new_mp->coord_str);
+			new_mp->used = BA_MP_USED_FALSE;
+		} else {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+				info("_copy_from_main: "
+				     "mp %s is used", new_mp->coord_str);
+			new_mp->used = BA_MP_USED_TRUE;
+			if (hostlist)
+				hostlist_push(hostlist, new_mp->coord_str);
+			else
+				hostlist = hostlist_create(new_mp->coord_str);
+		}
+
+		/* reset the main mp */
+		ba_mp->used &= (~BA_MP_USED_ALTERED_PASS);
+		memset(ba_mp->alter_switch, 0, sizeof(ba_mp->alter_switch));
+		/* Take this away if we decide we don't want
+		   this to setup the main list.
+		*/
+		/* info("got usage of %s %d %d", new_mp->coord_str, */
+		/*      new_mp->used, ba_mp->used); */
+		for (dim=0; dim<cluster_dims; dim++) {
+			ba_mp->axis_switch[dim].usage |=
+				new_mp->axis_switch[dim].usage;
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO)
+				debug("_copy_from_main: dim %d is %s added %s",
+				      dim,
+				      ba_switch_usage_str(
+					      ba_mp->axis_switch[dim].usage),
+				      ba_switch_usage_str(
+					      new_mp->axis_switch[dim].usage));
+		}
+	}
+	list_iterator_destroy(itr);
+
+	if (hostlist) {
+		name = hostlist_ranged_string_xmalloc(hostlist);
+		hostlist_destroy(hostlist);
+	}
+
+	return name;
+}
+
+static char *_reset_altered_mps(List main_mps, bool get_name)
+{
+	ListIterator itr = NULL;
+	ba_mp_t *ba_mp;
+	char *name = NULL;
+	hostlist_t hostlist = NULL;
+
+	xassert(main_mps);
+
+	if (!(itr = list_iterator_create(main_mps)))
+		fatal("got NULL list iterator");
+	while ((ba_mp = list_next(itr))) {
+		if (!(ba_mp->used & BA_MP_USED_ALTERED)) {
+			error("_reset_altered_mps: it appears we "
+			      "have a mp %s added that wasn't altered",
+			      ba_mp->coord_str);
+			continue;
+		}
+
+		if (ba_mp->used & BA_MP_USED_PASS_BIT) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("_reset_altered_mps: "
+				     "mp %s is used for passthrough %d",
+				     ba_mp->coord_str, ba_mp->used);
+		} else {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("_reset_altered_mps: "
+				     "mp %s is used %d", ba_mp->coord_str,
+				     ba_mp->used);
+			if (get_name) {
+				if (hostlist)
+					hostlist_push(hostlist,
+						      ba_mp->coord_str);
+				else
+					hostlist = hostlist_create(
+						ba_mp->coord_str);
+			}
+		}
+
+		ba_mp->used &= (~BA_MP_USED_ALTERED_PASS);
+		memset(ba_mp->alter_switch, 0, sizeof(ba_mp->alter_switch));
+	}
+	list_iterator_destroy(itr);
+
+	if (hostlist) {
+		name = hostlist_ranged_string_xmalloc(hostlist);
+		hostlist_destroy(hostlist);
+	}
+
+	return name;
+}
+
+static int _copy_ba_switch(ba_mp_t *ba_mp, ba_mp_t *orig_mp, int dim)
+{
+	int rc = 0;
+	if (ba_mp->alter_switch[dim].usage != BG_SWITCH_NONE) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_copy_ba_switch: "
+			     "switch already set %s(%d)",
+			     ba_mp->coord_str, dim);
+		return 0;
+	}
+
+	if (orig_mp->alter_switch[dim].usage == BG_SWITCH_NONE) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_copy_ba_switch: "
+			     "switch not needed %s(%d)",
+			     ba_mp->coord_str, dim);
+		return 0;
+	}
+
+	if ((orig_mp->used & BA_MP_USED_PASS_BIT)
+	    || (ba_mp->used & BA_MP_USED_PASS_BIT)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_copy_ba_switch: "
+			     "pass bit set %d %d",
+			     orig_mp->alter_switch[dim].usage
+			     & BG_SWITCH_PASS_FLAG,
+			     ba_mp->alter_switch[dim].usage
+			     & BG_SWITCH_PASS_FLAG);
+		if (!(orig_mp->alter_switch[dim].usage & BG_SWITCH_PASS_FLAG)) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("_copy_ba_switch: "
+				     "skipping %s(%d)", ba_mp->coord_str, dim);
+			return 0;
+		}
+	} else if (_mp_used(ba_mp, dim)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_copy_ba_switch: "
+			     "%s is already used", ba_mp->coord_str);
+		return -1;
+	}
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+		info("_copy_ba_switch: "
+		     "mapping %s(%d) %s to %s(%d) %s",
+		     orig_mp->coord_str, dim,
+		     ba_switch_usage_str(orig_mp->alter_switch[dim].usage),
+		     ba_mp->coord_str, dim,
+		     ba_switch_usage_str(ba_mp->alter_switch[dim].usage));
+
+	if (ba_mp->axis_switch[dim].usage & orig_mp->alter_switch[dim].usage) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_copy_ba_switch: "
+			     "can't use %s(%d) switch %s "
+			     "overlapped with request %s",
+			     ba_mp->coord_str, dim,
+			     ba_switch_usage_str(
+				     ba_mp->axis_switch[dim].usage),
+			     ba_switch_usage_str(
+				     orig_mp->alter_switch[dim].usage));
+		return -1;
+	}
+
+	/* If we return 1 it means we haven't yet looked at this
+	 * midplane so add it to the list */
+	if (!(ba_mp->used & BA_MP_USED_ALTERED))
+		rc = 1;
+
+	/* set up the usage of the midplane */
+	if (orig_mp->used & BA_MP_USED_PASS_BIT)
+		ba_mp->used |= BA_MP_USED_ALTERED_PASS;
+	else
+		ba_mp->used |= BA_MP_USED_ALTERED;
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+		info("_copy_ba_switch: "
+		     "mp %s(%d) adds %s to mp %s(%d) %s %d",
+		     orig_mp->coord_str, dim,
+		     ba_switch_usage_str(orig_mp->alter_switch[dim].usage),
+		     ba_mp->coord_str, dim,
+		     ba_switch_usage_str(ba_mp->alter_switch[dim].usage),
+		     ba_mp->used);
+	ba_mp->alter_switch[dim].usage |= orig_mp->alter_switch[dim].usage;
+
+	return rc;
+}
+
+static int _check_deny_pass(int dim)
+{
+	if (!deny_pass || !*deny_pass)
+		return 0;
+
+	switch (dim) {
+	case A:
+		*deny_pass |= PASS_FOUND_A;
+		if (*deny_pass & PASS_DENY_A) {
+			debug("We don't allow A passthoughs");
+			return 1;
+		}
+		break;
+	case X:
+		*deny_pass |= PASS_FOUND_X;
+		if (*deny_pass & PASS_DENY_X) {
+			debug("We don't allow X passthoughs");
+			return 1;
+		}
+		break;
+	case Y:
+		*deny_pass |= PASS_FOUND_Y;
+		if (*deny_pass & PASS_DENY_Y) {
+			debug("We don't allow Y passthoughs");
+			return 1;
+		}
+		break;
+	case Z:
+		*deny_pass |= PASS_FOUND_Z;
+		if (*deny_pass & PASS_DENY_Z) {
+			debug("We don't allow Z passthoughs");
+			return 1;
+		}
+		break;
+	default:
+		error("unknown dim %d", dim);
+		return 1;
+		break;
+	}
+	return 0;
+}
+
+static int _find_path(List mps, ba_mp_t *start_mp, int dim,
+		      uint16_t geometry, uint16_t conn_type,
+		      int *block_start, int *block_end)
+{
+	ba_mp_t *curr_mp = start_mp->next_mp[dim];
+	ba_switch_t *axis_switch = NULL;
+	ba_switch_t *alter_switch = NULL;
+	int count = 1;
+	int add = 0;
+
+	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+		info("_find_path: at mp %s(%d) geo %d switches at %s and %s",
+		     start_mp->coord_str, dim, geometry,
+		     ba_switch_usage_str(start_mp->axis_switch[dim].usage),
+		     ba_switch_usage_str(start_mp->alter_switch[dim].usage));
+
+	if (_mp_used(start_mp, dim))
+		return 0;
+
+	axis_switch = &start_mp->axis_switch[dim];
+	alter_switch = &start_mp->alter_switch[dim];
+	if (geometry == 1) {
+		/* Always check MESH here since we only care about the
+		   IN/OUT ports.
+		*/
+		start_mp->used |= BA_MP_USED_ALTERED;
+		/* all 1 dimensions need a TORUS */
+		alter_switch->usage |= BG_SWITCH_WRAPPED;
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("_find_path: using mp %s(%d) in 1 geo %s added %s",
+			     start_mp->coord_str, dim,
+			     ba_switch_usage_str(axis_switch->usage),
+			     ba_switch_usage_str(alter_switch->usage));
+		return 1;
+	}
+	if (_mp_out_used(start_mp, dim))
+		return 0;
+	start_mp->used |= BA_MP_USED_ALTERED;
+	alter_switch->usage |= BG_SWITCH_OUT;
+	alter_switch->usage |= BG_SWITCH_OUT_PASS;
+
+	while (curr_mp != start_mp) {
+		add = 0;
+		xassert(curr_mp);
+		axis_switch = &curr_mp->axis_switch[dim];
+		alter_switch = &curr_mp->alter_switch[dim];
+
+		/* This should never happen since we got here
+		   from an unused mp */
+		if (axis_switch->usage & BG_SWITCH_IN_PASS) {
+			info("_find_path: got a bad axis_switch at %s %d %s %s",
+			     curr_mp->coord_str, dim,
+			     ba_switch_usage_str(axis_switch->usage),
+			     ba_switch_usage_str(alter_switch->usage));
+			xassert(0);
+		}
+
+		if ((count < geometry) && !_mp_used(curr_mp, dim)) {
+			/* if (curr_mp->coord[dim] < start_mp->coord[dim]) { */
+			/* 	if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) */
+			/* 		info("Available mp %s(%d) is less " */
+			/* 		     "than our starting point " */
+			/* 		     "of %s(%d) since we already " */
+			/* 		     "looked at this return.", */
+			/* 		     curr_mp->coord_str, dim, */
+			/* 		     start_mp->coord_str, dim); */
+			/* 	return 0; */
+			/* } */
+			if (curr_mp->coord[dim] < *block_start)
+				*block_start = curr_mp->coord[dim];
+
+			if (curr_mp->coord[dim] > *block_end)
+				*block_end = curr_mp->coord[dim];
+			count++;
+			if (!(curr_mp->used & BA_MP_USED_ALTERED)) {
+				add = 1;
+				curr_mp->used |= BA_MP_USED_ALTERED;
+			}
+			alter_switch->usage |= BG_SWITCH_IN_PASS;
+			alter_switch->usage |= BG_SWITCH_IN;
+			if ((count < geometry) || (conn_type == SELECT_TORUS)) {
+				alter_switch->usage |= BG_SWITCH_OUT;
+				alter_switch->usage |= BG_SWITCH_OUT_PASS;
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("_find_path: using mp %s(%d) "
+					     "%d(%d) %s added %s",
+					     curr_mp->coord_str, dim,
+					     count, geometry,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+			} else if (conn_type == SELECT_MESH) {
+				if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+					info("_find_path: using mp %s(%d) "
+					     "%d(%d) %s added %s",
+					     curr_mp->coord_str, dim,
+					     count, geometry,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+				if (add)
+					list_append(mps, curr_mp);
+				return 1;
+			}
+		} else if (!_mp_out_used(curr_mp, dim)
+			   && !_check_deny_pass(dim)) {
+			if (!(curr_mp->used & BA_MP_USED_ALTERED)) {
+				add = 1;
+				curr_mp->used |= BA_MP_USED_ALTERED_PASS;
+			}
+			alter_switch->usage |= BG_SWITCH_PASS;
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+				if (count == geometry) {
+					info("_find_path: using mp %s(%d) to "
+					     "finish torus %s added %s",
+					     curr_mp->coord_str, dim,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+				} else {
+					info("_find_path: using mp %s(%d) as "
+					     "passthrough %s added %s",
+					     curr_mp->coord_str, dim,
+					     ba_switch_usage_str(
+						     axis_switch->usage),
+					     ba_switch_usage_str(
+						     alter_switch->usage));
+				}
+			}
+		} else {
+			/* we can't use this so return with a nice 0 */
+			info("_find_path: we can't use this so return");
+			return 0;
+		}
+
+		if (add)
+			list_append(mps, curr_mp);
+		curr_mp = curr_mp->next_mp[dim];
+	}
+
+	if (count != geometry)
+		return 0;
+
+	if (curr_mp == start_mp) {
+		axis_switch = &curr_mp->axis_switch[dim];
+		alter_switch = &curr_mp->alter_switch[dim];
+		/* This should never happen since we got here
+		   from an unused mp */
+		if (axis_switch->usage & BG_SWITCH_IN_PASS) {
+			info("_find_path: 2 got a bad axis_switch at %s %d %s",
+			     curr_mp->coord_str, dim,
+			     ba_switch_usage_str(axis_switch->usage));
+			xassert(0);
+		}
+
+		alter_switch->usage |= BG_SWITCH_IN_PASS;
+		alter_switch->usage |= BG_SWITCH_IN;
+	}
+
+	return 1;
+}
+
+static void _setup_next_mps(int level, uint16_t *coords)
+{
+	ba_mp_t *curr_mp;
+	uint16_t next_coords[SYSTEM_DIMENSIONS];
+	uint16_t prev_coords[SYSTEM_DIMENSIONS];
+	int dim;
+
+	if (level > cluster_dims)
+		return;
+
+	if (level < cluster_dims) {
+		for (coords[level] = 0;
+		     coords[level] < DIM_SIZE[level];
+		     coords[level]++) {
+			/* handle the outer dims here */
+			_setup_next_mps(level+1, coords);
+		}
+		return;
+	}
+	curr_mp = coord2ba_mp(coords);
+	if (!curr_mp)
+		return;
+	for (dim = 0; dim < cluster_dims; dim++) {
+		memcpy(next_coords, coords, sizeof(next_coords));
+		memcpy(prev_coords, coords, sizeof(prev_coords));
+		if (next_coords[dim] < (DIM_SIZE[dim]-1))
+			next_coords[dim]++;
+		else
+			next_coords[dim] = 0;
+
+		if (prev_coords[dim] > 0)
+			prev_coords[dim]--;
+		else
+			prev_coords[dim] = DIM_SIZE[dim]-1;
+		curr_mp->next_mp[dim] = coord2ba_mp(next_coords);
+		curr_mp->prev_mp[dim] = coord2ba_mp(prev_coords);
+	}
+}
+
+/* Used to set up the next nodecard we are going to look at.  Setting
+ * mp_coords to 00000 each time this is called will increment
+ * mp_coords to the next starting point of the next nodecard.
+ */
+static void _increment_nc_coords(int dim, int *mp_coords, int *dim_size)
+{
+	if (dim >= 5)
+		return;
+
+	mp_coords[ba_nc_dim_order[dim]]+=2;
+	if (mp_coords[ba_nc_dim_order[dim]] >= dim_size[ba_nc_dim_order[dim]]) {
+		mp_coords[ba_nc_dim_order[dim]] = 0;
+		_increment_nc_coords(dim+1, mp_coords, dim_size);
+	}
+}
+
+/*
+ * Used to check if midplane is usable in the block we are creating
+ *
+ * IN: ba_mp - mp to check if is used
+ * IN: dim - dimension we are checking.
+  */
+static bool _mp_used(ba_mp_t* ba_mp, int dim)
+{
+	xassert(ba_mp);
+
+	/* if we've used this mp in another block already */
+	if (mp_strip_unaltered(ba_mp->used)
+	    || (ba_mp->axis_switch[dim].usage & BG_SWITCH_WRAPPED)
+	    || (ba_mp->alter_switch[dim].usage & BG_SWITCH_WRAPPED)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("mp %s(%d) used (%d, %s/%s)",
+			     ba_mp->coord_str, dim,
+			     mp_strip_unaltered(ba_mp->used),
+			     ba_switch_usage_str(
+				     ba_mp->axis_switch[dim].usage),
+			     ba_switch_usage_str(
+				     ba_mp->alter_switch[dim].usage));
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Used to check if we can leave a midplane
+ *
+ * IN: ba_mp - mp to check if is used
+ * IN: dim - dimension we are checking.
+ */
+static bool _mp_out_used(ba_mp_t* ba_mp, int dim)
+{
+	xassert(ba_mp);
+
+	/* If the mp is already used just check the PASS_USED. */
+	if ((ba_mp->axis_switch[dim].usage & BG_SWITCH_PASS_USED)
+	    || (ba_mp->alter_switch[dim].usage & BG_SWITCH_PASS_USED)) {
+		if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+			info("mp %s(%d) has passthroughs used (%s)",
+			     ba_mp->coord_str, dim, ba_switch_usage_str(
+				     ba_mp->axis_switch[dim].usage));
+		return true;
+	}
+
+	return false;
+}
+
diff --git a/src/plugins/select/bluegene/plugin/bg_boot_time.h b/src/plugins/select/bluegene/ba_bgq/block_allocator.h
similarity index 69%
rename from src/plugins/select/bluegene/plugin/bg_boot_time.h
rename to src/plugins/select/bluegene/ba_bgq/block_allocator.h
index 5a83177a8..134e80bd0 100644
--- a/src/plugins/select/bluegene/plugin/bg_boot_time.h
+++ b/src/plugins/select/bluegene/ba_bgq/block_allocator.h
@@ -1,13 +1,14 @@
 /*****************************************************************************\
- *  bg_boot_time.h - Block boot time parameters for use by slurm_prolog
- *	and slurmctld
+ *  block_allocator.h
+ *
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
+ *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -36,20 +37,19 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _BG_BOOT_TIME_H_
-#define _BG_BOOT_TIME_H_
+#ifndef _BLOCK_ALLOCATOR_H_
+#define _BLOCK_ALLOCATOR_H_
+
+#include "src/common/node_select.h"
+#include "../bridge_linker.h"
+#include "../ba_common.h"
+#include "../bg_job_info.h"
+
+// #define DEBUG_PA
 
-/*
- * Total time to boot a bglblock should not exceed
- * BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
- * (BG_INCR_BLOCK_BOOT * base partition count).
- * For example, if BG_MIN_BLOCK_BOOT=300, BG_MIN_BLOCK_BOOT=200,
- * BG_INCR_BLOCK_BOOT=20 and there are 4 blocks being booted,
- * wait up to 580 seconds (300 + 200 (20 * 4)).
- */
+enum {A, X, Y, Z, E};
 
-#define BG_FREE_PREVIOUS_BLOCK 300 	/* time in seconds */
-#define BG_MIN_BLOCK_BOOT  300		/* time in seconds */
-#define BG_INCR_BLOCK_BOOT 20		/* time in seconds per BP */
+/* Global */
+extern ba_mp_t ****ba_main_grid;
 
-#endif /* _BG_BOOT_TIME_H_ */
+#endif /* _BLOCK_ALLOCATOR_H_ */
diff --git a/src/plugins/select/bluegene/ba_bgq/wire_test.c b/src/plugins/select/bluegene/ba_bgq/wire_test.c
new file mode 100644
index 000000000..04a836aa2
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_bgq/wire_test.c
@@ -0,0 +1,233 @@
+/*****************************************************************************\
+ *  wire_test.c - used to debug and test wires on any given system.
+ *
+ *  $Id: block_allocator.c 17495 2009-05-14 16:49:52Z da $
+ *****************************************************************************
+ *  Copyright (C) 2004 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include "../ba_common.h"
+#include "block_allocator.h"
+#include "src/common/uid.h"
+#include "src/common/timers.h"
+
+/* These are here to avoid linking issues with the bridge for
+ * unresolved symbols.
+ */
+time_t last_job_update;
+time_t last_bg_update;
+bg_config_t *bg_conf;
+bg_lists_t *bg_lists;
+pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
+int bg_recover = 1;
+
+extern int bridge_init(char *properties_file)
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_fini()
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_get_size(int *size)
+{
+	return SLURM_ERROR;
+}
+
+extern int bridge_setup_system()
+{
+	return SLURM_ERROR;
+}
+
+/** */
+int main(int argc, char** argv)
+{
+	select_ba_request_t *request = xmalloc(sizeof(select_ba_request_t));
+	log_options_t log_opts = LOG_OPTS_INITIALIZER;
+	int debug_level = 5;
+	uint16_t ba_debug_flags = 0;
+
+	List results;
+//	List results2;
+//	int i,j;
+	log_opts.stderr_level  = (log_level_t)debug_level;
+	log_opts.logfile_level = (log_level_t)debug_level;
+	log_opts.syslog_level  = (log_level_t)debug_level;
+
+	ba_debug_flags |= DEBUG_FLAG_BG_ALGO;
+	ba_debug_flags |= DEBUG_FLAG_BG_ALGO_DEEP;
+	log_alter(log_opts, (log_facility_t)LOG_DAEMON, "/dev/null");
+
+	DIM_SIZE[A]=0;
+	DIM_SIZE[X]=0;
+	DIM_SIZE[Y]=0;
+	DIM_SIZE[Z]=0;
+
+	slurm_conf_reinit(NULL);
+	ba_init(NULL, 1);
+	set_ba_debug_flags(ba_debug_flags);
+
+	/* [010x831] */
+	results = list_create(NULL);
+	request->geometry[0] = 1;
+	request->geometry[1] = 1;
+	request->geometry[2] = 1;
+	request->geometry[3] = 1;
+	request->start[0] = 0;
+	request->start[1] = 1;
+	request->start[2] = 0;
+	request->start[3] = 0;
+	request->start_req = 1;
+//	request->size = 16;
+	request->rotate = 0;
+	request->elongate = 0;
+	request->conn_type[A] = SELECT_TORUS;
+	request->conn_type[X] = SELECT_TORUS;
+	request->conn_type[Y] = SELECT_TORUS;
+	request->conn_type[Z] = SELECT_TORUS;
+	new_ba_request(request);
+	print_ba_request(request);
+	if (!allocate_block(request, results)) {
+       		debug("couldn't allocate %c%c%c",
+		       alpha_num[request->geometry[0]],
+		       alpha_num[request->geometry[1]],
+		       alpha_num[request->geometry[2]]);
+	} else
+		info("got back mps %s\n", request->save_name);
+
+	list_destroy(results);
+
+/* 	/\* [001x801] *\/ */
+/* 	results = list_create(NULL); */
+/* 	request->geometry[0] = 9; */
+/* 	request->geometry[1] = 1; */
+/* 	request->geometry[2] = 1; */
+/* 	request->start[0] = 0; */
+/* 	request->start[1] = 0; */
+/* 	request->start[2] = 1; */
+/* 	request->start_req = 1; */
+/* //	request->size = 1; */
+/* 	request->rotate = 0; */
+/* 	request->elongate = 0; */
+/* 	request->conn_type = SELECT_TORUS; */
+/* 	new_ba_request(request); */
+/* 	print_ba_request(request); */
+/* 	if (!allocate_block(request, results)) { */
+/*        		debug("couldn't allocate %c%c%c", */
+/* 		       request->geometry[0], */
+/* 		       request->geometry[1], */
+/* 		       request->geometry[2]); */
+/* 	} */
+/* 	list_destroy(results); */
+
+	/* [001x801] */
+	results = list_create(NULL);
+	request->geometry[0] = 1;
+	request->geometry[1] = 2;
+	request->geometry[2] = 4;
+	request->geometry[3] = 1;
+	request->start[0] = 0;
+	request->start[1] = 0;
+	request->start[2] = 0;
+	request->start[3] = 0;
+	request->start_req = 0;
+//	request->size = 1;
+	request->rotate = 1;
+	request->elongate = 1;
+	request->conn_type[A] = SELECT_TORUS;
+	request->conn_type[X] = SELECT_TORUS;
+	request->conn_type[Y] = SELECT_TORUS;
+	request->conn_type[Z] = SELECT_TORUS;
+	new_ba_request(request);
+	print_ba_request(request);
+	if (!allocate_block(request, results)) {
+       		debug("couldn't allocate %c%c%c%c",
+		       request->geometry[0],
+		       request->geometry[1],
+		       request->geometry[2],
+		       request->geometry[3]);
+	} else
+		info("got back mps %s\n", request->save_name);
+	list_destroy(results);
+
+	int dim;
+	int a,b,c,d;
+	int starta=0;
+	int startb=0;
+	int startc=0;
+	int startd=0;
+	int enda=1;//DIM_SIZE[A];
+	int endb=DIM_SIZE[X];
+	int endc=DIM_SIZE[Y];
+	int endd=1;//DIM_SIZE[Z];
+
+	for(a=starta;a<enda;a++) {
+		for(b=startb;b<endb;b++) {
+			for(c=startc;c<endc;c++) {
+				for(d=startd;d<endd;d++) {
+					ba_mp_t *curr_mp =
+						&ba_main_grid[a][b][c][d];
+					info("Node %c%c%c%c Used = %d",
+					     alpha_num[a],alpha_num[b],
+					     alpha_num[c],alpha_num[d],
+					     curr_mp->used);
+					for(dim=0; dim<4; dim++) {
+						info("\tDim %d usage is %s ",
+						     dim,
+						     ba_switch_usage_str(
+							     curr_mp->
+							     axis_switch[dim].
+							     usage));
+					}
+				}
+			}
+		}
+	}
+	/* list_destroy(results); */
+
+/* 	ba_fini(); */
+
+/* 	delete_ba_request(request); */
+
+	return 0;
+}
diff --git a/src/plugins/select/bluegene/ba_common.c b/src/plugins/select/bluegene/ba_common.c
new file mode 100644
index 000000000..944a0c28e
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_common.c
@@ -0,0 +1,1587 @@
+/*****************************************************************************\
+ *  ba_common.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "ba_common.h"
+#include "bg_node_info.h"
+
+#define DISPLAY_FULL_DIM 1
+
+#if (SYSTEM_DIMENSIONS == 1)
+int cluster_dims = 1;
+int cluster_base = 10;
+#else
+int cluster_dims = 3;
+int cluster_base = 36;
+#endif
+uint32_t cluster_flags = 0;
+uint16_t ba_deny_pass = 0;
+
+ba_geo_combos_t geo_combos[LONGEST_BGQ_DIM_LEN];
+
+bool ba_initialized = false;
+uint32_t ba_debug_flags = 0;
+int DIM_SIZE[HIGHEST_DIMENSIONS];
+bitstr_t *ba_main_mp_bitmap = NULL;
+
+static void _pack_ba_connection(ba_connection_t *ba_connection,
+				Buf buffer, uint16_t protocol_version)
+{
+	int dim;
+	for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+		pack16(ba_connection->mp_tar[dim], buffer);
+	pack16(ba_connection->port_tar, buffer);
+	pack16(ba_connection->used, buffer);
+}
+
+static int _unpack_ba_connection(ba_connection_t *ba_connection,
+				 Buf buffer, uint16_t protocol_version)
+{
+	int dim;
+
+	for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+		safe_unpack16(&ba_connection->mp_tar[dim], buffer);
+	safe_unpack16(&ba_connection->port_tar, buffer);
+	safe_unpack16(&ba_connection->used, buffer);
+
+	return SLURM_SUCCESS;
+unpack_error:
+	return SLURM_ERROR;
+}
+
+static void _pack_ba_switch(ba_switch_t *ba_switch,
+			    Buf buffer, uint16_t protocol_version)
+{
+	int i;
+
+	if ((cluster_flags & CLUSTER_FLAG_BGL)
+	    || (cluster_flags & CLUSTER_FLAG_BGP)) {
+		for (i=0; i< NUM_PORTS_PER_NODE; i++) {
+			_pack_ba_connection(&ba_switch->int_wire[i],
+					    buffer, protocol_version);
+			_pack_ba_connection(&ba_switch->ext_wire[i],
+					    buffer, protocol_version);
+		}
+	}
+	pack16(ba_switch->usage, buffer);
+}
+
+static int _unpack_ba_switch(ba_switch_t *ba_switch,
+			     Buf buffer, uint16_t protocol_version)
+{
+	int i;
+
+	if ((cluster_flags & CLUSTER_FLAG_BGL)
+	    || (cluster_flags & CLUSTER_FLAG_BGP)) {
+		for (i=0; i< NUM_PORTS_PER_NODE; i++) {
+			if(_unpack_ba_connection(&ba_switch->int_wire[i],
+						 buffer, protocol_version)
+			   != SLURM_SUCCESS)
+				goto unpack_error;
+			if(_unpack_ba_connection(&ba_switch->ext_wire[i],
+						 buffer, protocol_version)
+			   != SLURM_SUCCESS)
+				goto unpack_error;
+		}
+	}
+	safe_unpack16(&ba_switch->usage, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	return SLURM_ERROR;
+}
+
+
+/*
+ * Increment a geometry index array, return false after reaching the last entry
+ */
+static bool _incr_geo(int *geo, ba_geo_system_t *my_geo_system)
+{
+	int dim, i;
+
+	for (dim = my_geo_system->dim_count - 1; dim >= 0; dim--) {
+		if (geo[dim] < my_geo_system->dim_size[dim]) {
+			geo[dim]++;
+			for (i = dim + 1; i < my_geo_system->dim_count; i++)
+				geo[i] = 1;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/* Translate a multi-dimension coordinate (3-D, 4-D, 5-D, etc.) into a 1-D
+ * offset in the cnode* bitmap */
+static void _ba_node_xlate_to_1d(int *offset_1d, int *full_offset,
+				 ba_geo_system_t *my_geo_system)
+{
+	int i, map_offset;
+
+	xassert(offset_1d);
+	xassert(full_offset);
+	i = my_geo_system->dim_count - 1;
+	map_offset = full_offset[i];
+	for (i-- ; i >= 0; i--) {
+		map_offset *= my_geo_system->dim_size[i];
+		map_offset += full_offset[i];
+	}
+	*offset_1d = map_offset;
+}
+
+#if DISPLAY_FULL_DIM
+/* Translate a 1-D offset in the cnode bitmap to a multi-dimension
+ * coordinate (3-D, 4-D, 5-D, etc.) */
+static void _ba_node_xlate_from_1d(int offset_1d, int *full_offset,
+				   ba_geo_system_t *my_system_geo)
+{
+	int i, map_offset;
+
+	xassert(full_offset);
+	map_offset = offset_1d;
+	for (i = 0; i < my_system_geo->dim_count; i++) {
+		full_offset[i] = map_offset % my_system_geo->dim_size[i];
+		map_offset /= my_system_geo->dim_size[i];
+	}
+}
+#endif
+
+static int _ba_node_map_set_range_internal(int level, int *coords,
+					   int *start_offset, int *end_offset,
+					   bitstr_t *node_bitmap,
+					   ba_geo_system_t *my_geo_system)
+{
+	int offset_1d;
+
+	xassert(my_geo_system);
+
+	if (level > my_geo_system->dim_count)
+		return -1;
+
+	if (level < my_geo_system->dim_count) {
+		for (coords[level] = start_offset[level];
+		     coords[level] <= end_offset[level];
+		     coords[level]++) {
+			/* handle the outter dims here */
+			if (_ba_node_map_set_range_internal(
+				    level+1, coords,
+				    start_offset, end_offset,
+				    node_bitmap, my_geo_system) == -1)
+				return -1;
+		}
+		return 1;
+	}
+
+	_ba_node_xlate_to_1d(&offset_1d, coords, my_geo_system);
+	bit_set(node_bitmap, offset_1d);
+	return 1;
+}
+
+static ba_geo_combos_t *_build_geo_bitmap_arrays(int size)
+{
+	int i, j;
+	ba_geo_combos_t *combos;
+	int gap_start, max_gap_start;
+	int gap_count, gap_len, max_gap_len;
+
+	xassert(size > 0);
+	combos = &geo_combos[size-1];
+	combos->elem_count = (1 << size) - 1;
+	combos->gap_count       = xmalloc(sizeof(int) * combos->elem_count);
+	combos->has_wrap        = xmalloc(sizeof(bool) * combos->elem_count);
+	combos->set_count_array = xmalloc(sizeof(int) * combos->elem_count);
+	combos->set_bits_array  = xmalloc(sizeof(bitstr_t *) *
+					  combos->elem_count);
+	combos->start_coord = xmalloc(sizeof(uint16_t *) * combos->elem_count);
+	combos->block_size  = xmalloc(sizeof(uint16_t *) * combos->elem_count);
+
+	for (i = 1; i <= combos->elem_count; i++) {
+		bool some_bit_set = false, some_gap_set = false;
+		combos->set_bits_array[i-1] = bit_alloc(size);
+		if (combos->set_bits_array[i-1] == NULL)
+			fatal("bit_alloc: malloc failure");
+
+		gap_count = 0;
+		gap_start = -1;
+		max_gap_start = -1;
+		gap_len = 0;
+		max_gap_len = 0;
+		for (j = 0; j < size; j++) {
+			if (((i >> j) & 0x1) == 0) {
+				if (gap_len++ == 0) {
+					gap_count++;
+					gap_start = j;
+				}
+				if (some_bit_set)  /* ignore leading gap */
+					some_gap_set = true;
+				continue;
+			}
+			if (gap_len > max_gap_len) {
+				max_gap_len = gap_len;
+				max_gap_start = gap_start;
+			}
+			gap_len = 0;
+			bit_set(combos->set_bits_array[i-1], j);
+			combos->set_count_array[i-1]++;
+			if (some_bit_set && some_gap_set)
+				combos->has_wrap[i-1] = true;
+			some_bit_set = true;
+		}
+		if (gap_len) {	/* test for wrap in gap */
+			for (j = 0; j < size; j++) {
+				if (bit_test(combos->set_bits_array[i-1], j))
+					break;
+				if (j == 0)
+					gap_count--;
+				gap_len++;
+			}
+			if (gap_len >= max_gap_len) {
+				max_gap_len = gap_len;
+				max_gap_start = gap_start;
+			}
+		}
+
+		if (max_gap_len == 0) {
+			combos->start_coord[i-1] = 0;
+		} else {
+			combos->start_coord[i-1] = (max_gap_start +
+						    max_gap_len) % size;
+		}
+		combos->block_size[i-1] = size - max_gap_len;
+		combos->gap_count[i-1]  = gap_count;
+	}
+
+#if 0
+	info("geometry size=%d", size);
+	for (i = 0; i < combos->elem_count; i++) {
+		char buf[64];
+		bit_fmt(buf, sizeof(buf), combos->set_bits_array[i]);
+		info("cnt:%d bits:%10s start_coord:%u block_size:%u "
+		     "gap_count:%d has_wrap:%d",
+		     combos->set_count_array[i], buf,
+		     combos->start_coord[i], combos->block_size[i],
+		     combos->gap_count[i], (int)combos->has_wrap[i]);
+	}
+	info("\n\n");
+#endif
+
+	return combos;
+}
+
+static void _free_geo_bitmap_arrays(void)
+{
+	int i, j;
+	ba_geo_combos_t *combos;
+
+	for (i = 1; i <= LONGEST_BGQ_DIM_LEN; i++) {
+		combos = &geo_combos[i-1];
+		for (j = 0; j < combos->elem_count; j++) {
+			if (combos->set_bits_array[j])
+				bit_free(combos->set_bits_array[j]);
+		}
+		xfree(combos->gap_count);
+		xfree(combos->has_wrap);
+		xfree(combos->set_count_array);
+		xfree(combos->set_bits_array);
+		xfree(combos->start_coord);
+		xfree(combos->block_size);
+	}
+}
+
+/* Find the next element in the geo_combinations array in a given dimension
+ * that contains req_bit_cnt elements to use. Return -1 if none found. */
+static int _find_next_geo_inx(ba_geo_combos_t *geo_combo_ptr,
+			      int last_inx, uint16_t req_bit_cnt,
+			      bool deny_pass, bool deny_wrap)
+{
+	while (++last_inx < geo_combo_ptr->elem_count) {
+		if ((req_bit_cnt == geo_combo_ptr->set_count_array[last_inx])&&
+		    (!deny_pass || (geo_combo_ptr->gap_count[last_inx] < 2)) &&
+		    (!deny_wrap || !geo_combo_ptr->has_wrap[last_inx]))
+			return last_inx;
+	}
+	return -1;
+}
+
+/* Determine if a specific set of elements in each dimension is available.
+ * Return a bitmap of that set of elements if free, NULL otherwise. */
+static bitstr_t * _test_geo(bitstr_t *node_bitmap,
+			    ba_geo_system_t *my_geo_system,
+			    ba_geo_combos_t **geo_array, int *geo_array_inx)
+{
+	int i;
+	bitstr_t *alloc_node_bitmap;
+	int offset[my_geo_system->dim_count];
+
+	alloc_node_bitmap = bit_alloc(my_geo_system->total_size);
+	memset(offset, 0, sizeof(offset));
+	while (1) {
+		/* Test if this coordinate is required in every dimension */
+		for (i = 0; i < my_geo_system->dim_count; i++) {
+			if (!bit_test(geo_array[i]->
+				      set_bits_array[geo_array_inx[i]],
+				      offset[i]))
+				break;	/* not needed */
+		}
+		/* Test if this coordinate is available for use */
+		if (i >= my_geo_system->dim_count) {
+			if (ba_node_map_test(node_bitmap,offset,my_geo_system))
+				break;	/* not available */
+			/* Set it in our bitmap for this job */
+			ba_node_map_set(alloc_node_bitmap, offset,
+					my_geo_system);
+		}
+		/* Go to next coordinate */
+		for (i = 0; i < my_geo_system->dim_count; i++) {
+			if (++offset[i] < my_geo_system->dim_size[i])
+				break;
+			offset[i] = 0;
+		}
+		if (i >= my_geo_system->dim_count) {
+			/* All bits in every dimension tested */
+			return alloc_node_bitmap;
+		}
+	}
+	bit_free(alloc_node_bitmap);
+	return NULL;
+}
+
+/* Attempt to place an allocation of a specific required geomemtry (geo_req)
+ * into a bitmap of available resources (node_bitmap). The resource allocation
+ * may contain gaps in multiple dimensions. */
+static int _geo_test_maps(bitstr_t *node_bitmap,
+			  bitstr_t **alloc_node_bitmap,
+			  ba_geo_table_t *geo_req, int *attempt_cnt,
+			  ba_geo_system_t *my_geo_system, uint16_t *deny_pass,
+			  uint16_t *start_pos, int *scan_offset,
+			  bool deny_wrap)
+{
+	int i, current_offset = -1;
+	ba_geo_combos_t *geo_array[my_geo_system->dim_count];
+	int geo_array_inx[my_geo_system->dim_count];
+	bool dim_deny_pass;
+
+	for (i = 0; i < my_geo_system->dim_count; i++) {
+		if (my_geo_system->dim_size[i] > LONGEST_BGQ_DIM_LEN) {
+			error("System geometry specification larger than "
+			      "configured LONGEST_BGQ_DIM_LEN. Increase "
+			      "LONGEST_BGQ_DIM_LEN (%d)", LONGEST_BGQ_DIM_LEN);
+			return SLURM_ERROR;
+		}
+		if (deny_pass)
+			dim_deny_pass = (bool) deny_pass[i];
+		else	/* No passthru allowed by default */
+			dim_deny_pass = true;
+		geo_array[i] = &geo_combos[my_geo_system->dim_size[i] - 1];
+		geo_array_inx[i] = _find_next_geo_inx(geo_array[i], -1,
+						      geo_req->geometry[i],
+						      dim_deny_pass,
+						      deny_wrap);
+		if (geo_array_inx[i] == -1) {
+			error("Request to allocate %u nodes in dimension %d, "
+			      "which only has %d elements",
+			      geo_req->geometry[i], i,
+			      my_geo_system->dim_size[i]);
+			return SLURM_ERROR;
+		}
+	}
+
+	*alloc_node_bitmap = (bitstr_t *) NULL;
+	while (1) {
+		current_offset++;
+		if (!scan_offset || (current_offset >= *scan_offset)) {
+			(*attempt_cnt)++;
+			*alloc_node_bitmap = _test_geo(node_bitmap,
+						       my_geo_system,
+						       geo_array,
+						       geo_array_inx);
+			if (*alloc_node_bitmap)
+				break;
+		}
+
+		/* Increment offsets */
+		for (i = 0; i < my_geo_system->dim_count; i++) {
+			if (deny_pass)
+				dim_deny_pass = (bool) deny_pass[i];
+			else	/* No passthru allowed by default */
+				dim_deny_pass = true;
+			geo_array_inx[i] = _find_next_geo_inx(geo_array[i],
+							geo_array_inx[i],
+						     	geo_req->geometry[i],
+						     	dim_deny_pass,
+						     	deny_wrap);
+			if (geo_array_inx[i] != -1)
+				break;
+			geo_array_inx[i] = _find_next_geo_inx(geo_array[i], -1,
+							geo_req->geometry[i],
+						     	dim_deny_pass,
+						     	deny_wrap);
+		}
+		if (i >= my_geo_system->dim_count)
+			return SLURM_ERROR;
+	}
+
+	if (start_pos) {
+		for (i = 0; i < my_geo_system->dim_count; i++) {
+			start_pos[i] = geo_array[i]->
+				       start_coord[geo_array_inx[i]];
+		}
+	}
+	if (scan_offset)
+		*scan_offset = current_offset + 1;
+	return SLURM_SUCCESS;
+}
+
+static void _internal_removable_set_mps(int level, bitstr_t *bitmap,
+					uint16_t *coords, bool mark,
+					bool except)
+{
+	ba_mp_t *curr_mp;
+	int is_set;
+
+	if (level > cluster_dims)
+		return;
+
+	if (level < cluster_dims) {
+		for (coords[level] = 0;
+		     coords[level] < DIM_SIZE[level];
+		     coords[level]++) {
+			/* handle the outer dims here */
+			_internal_removable_set_mps(
+				level+1, bitmap, coords, mark, except);
+		}
+		return;
+	}
+	curr_mp = coord2ba_mp(coords);
+	if (!curr_mp)
+		return;
+	if (bitmap)
+		is_set = bit_test(bitmap, curr_mp->index);
+	if (!bitmap || (is_set && !except) || (!is_set && except)) {
+		if (mark) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP)
+				info("can't use %s", curr_mp->coord_str);
+			curr_mp->used |= BA_MP_USED_TEMP;
+			bit_set(ba_main_mp_bitmap, curr_mp->index);
+		} else {
+			curr_mp->used &= (~BA_MP_USED_TEMP);
+			if (curr_mp->used == BA_MP_USED_FALSE)
+				bit_clear(ba_main_mp_bitmap, curr_mp->index);
+		}
+	}
+}
+
+static void _internal_reset_ba_system(int level, uint16_t *coords,
+				      bool track_down_mps)
+{
+	ba_mp_t *curr_mp;
+
+	if (level > cluster_dims)
+		return;
+
+	if (level < cluster_dims) {
+		for (coords[level] = 0;
+		     coords[level] < DIM_SIZE[level];
+		     coords[level]++) {
+			/* handle the outer dims here */
+			_internal_reset_ba_system(
+				level+1, coords, track_down_mps);
+		}
+		return;
+	}
+	curr_mp = coord2ba_mp(coords);
+	if (!curr_mp)
+		return;
+	ba_setup_mp(curr_mp, track_down_mps, false);
+	bit_clear(ba_main_mp_bitmap, curr_mp->index);
+}
+
+#if defined HAVE_BG_FILES
+static ba_mp_t *_internal_loc2ba_mp(int level, uint16_t *coords,
+				    const char *check)
+{
+	ba_mp_t *curr_mp = NULL;
+
+	if (!check || (level > cluster_dims))
+		return NULL;
+
+	if (level < cluster_dims) {
+		for (coords[level] = 0;
+		     coords[level] < DIM_SIZE[level];
+		     coords[level]++) {
+			/* handle the outer dims here */
+			if ((curr_mp = _internal_loc2ba_mp(
+				     level+1, coords, check)))
+				break;
+		}
+		return curr_mp;
+	}
+
+	curr_mp = coord2ba_mp(coords);
+	if (!curr_mp)
+		return NULL;
+	if (strcasecmp(check, curr_mp->loc))
+		curr_mp = NULL;
+
+	return curr_mp;
+}
+#endif
+
+/**
+ * Initialize internal structures by either reading previous block
+ * configurations from a file or by running the graph solver.
+ *
+ * IN: node_info_msg_t * can be null,
+ *     should be from slurm_load_node().
+ *
+ * return: void.
+ */
+extern void ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	node_info_t *node_ptr = NULL;
+	int number, count;
+	char *numeric = NULL;
+	int i, j, k;
+	slurm_conf_node_t **ptr_array;
+	int coords[HIGHEST_DIMENSIONS];
+	char *p = '\0';
+	int real_dims[HIGHEST_DIMENSIONS];
+	char dim_str[HIGHEST_DIMENSIONS+1];
+
+	/* We only need to initialize once, so return if already done so. */
+	if (ba_initialized)
+		return;
+
+	cluster_dims = slurmdb_setup_cluster_dims();
+	cluster_flags = slurmdb_setup_cluster_flags();
+	set_ba_debug_flags(slurm_get_debug_flags());
+	if (bg_recover != NOT_FROM_CONTROLLER)
+		bridge_init("");
+
+	memset(coords, 0, sizeof(coords));
+	memset(DIM_SIZE, 0, sizeof(DIM_SIZE));
+	memset(real_dims, 0, sizeof(real_dims));
+	memset(dim_str, 0, sizeof(dim_str));
+	/* cluster_dims is already set up off of working_cluster_rec */
+	if (cluster_dims == 1) {
+		if (node_info_ptr) {
+			real_dims[0] = DIM_SIZE[0]
+				= node_info_ptr->record_count;
+			for (i=1; i<cluster_dims; i++)
+				real_dims[i] = DIM_SIZE[i] = 1;
+		}
+		goto setup_done;
+	} else if (working_cluster_rec && working_cluster_rec->dim_size) {
+		for(i=0; i<cluster_dims; i++) {
+			real_dims[i] = DIM_SIZE[i] =
+				working_cluster_rec->dim_size[i];
+		}
+		goto setup_done;
+	}
+
+
+	if (node_info_ptr) {
+		for (i = 0; i < (int)node_info_ptr->record_count; i++) {
+			node_ptr = &node_info_ptr->node_array[i];
+			number = 0;
+
+			if (!node_ptr->name) {
+				memset(DIM_SIZE, 0, sizeof(DIM_SIZE));
+				goto node_info_error;
+			}
+
+			numeric = node_ptr->name;
+			while (numeric) {
+				if (numeric[0] < '0' || numeric[0] > 'D'
+				    || (numeric[0] > '9'
+					&& numeric[0] < 'A')) {
+					numeric++;
+					continue;
+				}
+				number = xstrntol(numeric, &p, cluster_dims,
+						  cluster_base);
+				break;
+			}
+			hostlist_parse_int_to_array(
+				number, coords, cluster_dims, cluster_base);
+			memcpy(DIM_SIZE, coords, sizeof(DIM_SIZE));
+		}
+		for (j=0; j<cluster_dims; j++) {
+			DIM_SIZE[j]++;
+			/* this will probably be reset below */
+			real_dims[j] = DIM_SIZE[j];
+		}
+	}
+node_info_error:
+	for (j=0; j<cluster_dims; j++)
+		if (!DIM_SIZE[j])
+			break;
+
+	if (j < cluster_dims) {
+		debug("Setting dimensions from slurm.conf file");
+		count = slurm_conf_nodename_array(&ptr_array);
+		if (count == 0)
+			fatal("No NodeName information available!");
+
+		for (i = 0; i < count; i++) {
+			char *nodes = ptr_array[i]->nodenames;
+			j = 0;
+			while (nodes[j] != '\0') {
+				int mid = j   + cluster_dims + 1;
+				int fin = mid + cluster_dims + 1;
+
+				if (((nodes[j] == '[') || (nodes[j] == ','))
+				    && ((nodes[mid] == 'x')
+					|| (nodes[mid] == '-'))
+				    && ((nodes[fin] == ']')
+					|| (nodes[fin] == ',')))
+					j = mid + 1; /* goto the mid
+						      * and skip it */
+				else if ((nodes[j] >= '0' && nodes[j] <= '9')
+					 || (nodes[j] >= 'A'
+					     && nodes[j] <= 'Z')) {
+					/* suppose to be blank, just
+					   making sure this is the
+					   correct alpha num
+					*/
+				} else {
+					j++;
+					continue;
+				}
+
+				for (k = 0; k < cluster_dims; k++, j++)
+					DIM_SIZE[k] = MAX(DIM_SIZE[k],
+							  select_char2coord(
+								  nodes[j]));
+				if (nodes[j] != ',')
+					break;
+			}
+		}
+
+		for (j=0; j<cluster_dims; j++)
+			if (DIM_SIZE[j])
+				break;
+
+		if (j >= cluster_dims)
+			info("are you sure you only have 1 midplane? %s",
+			     ptr_array[0]->nodenames);
+
+		for (j=0; j<cluster_dims; j++) {
+			DIM_SIZE[j]++;
+			/* this will probably be reset below */
+			real_dims[j] = DIM_SIZE[j];
+		}
+	}
+
+	/* sanity check.  We can only request part of the system, but
+	   we don't want to allow more than we have. */
+	if (sanity_check && (bg_recover != NOT_FROM_CONTROLLER)) {
+		verbose("Attempting to contact MMCS");
+		if (bridge_get_size(real_dims) == SLURM_SUCCESS) {
+			char real_dim_str[cluster_dims+1];
+			memset(real_dim_str, 0, sizeof(real_dim_str));
+			for (i=0; i<cluster_dims; i++) {
+				dim_str[i] = alpha_num[DIM_SIZE[i]];
+				real_dim_str[i] = alpha_num[real_dims[i]];
+			}
+			verbose("BlueGene configured with %s midplanes",
+				real_dim_str);
+			for (i=0; i<cluster_dims; i++)
+				if (DIM_SIZE[i] > real_dims[i])
+					fatal("You requested a %s system, "
+					      "but we only have a "
+					      "system of %s.  "
+					      "Change your slurm.conf.",
+					      dim_str, real_dim_str);
+		}
+	}
+
+setup_done:
+	if (cluster_dims == 1) {
+		if (!DIM_SIZE[0]) {
+			debug("Setting default system dimensions");
+			real_dims[0] = DIM_SIZE[0] = 100;
+			for (i=1; i<cluster_dims; i++)
+				real_dims[i] = DIM_SIZE[i] = 1;
+		}
+	} else {
+		for (i=0; i<cluster_dims; i++)
+			dim_str[i] = alpha_num[DIM_SIZE[i]];
+		debug("We are using %s of the system.", dim_str);
+	}
+
+	ba_initialized = true;
+
+	if (bg_recover != NOT_FROM_CONTROLLER)
+		ba_setup_wires();
+}
+
+
+/**
+ * destroy all the internal (global) data structs.
+ */
+extern void ba_fini(void)
+{
+	if (!ba_initialized)
+		return;
+
+	if (bg_recover != NOT_FROM_CONTROLLER) {
+		bridge_fini();
+		ba_destroy_system();
+		_free_geo_bitmap_arrays();
+	}
+
+	if (ba_main_mp_bitmap)
+		FREE_NULL_BITMAP(ba_main_mp_bitmap);
+
+	ba_initialized = false;
+
+//	debug3("pa system destroyed");
+}
+
+extern void ba_setup_wires(void)
+{
+	int num_mps, i;
+	static bool wires_setup = 0;
+
+	if (!ba_initialized || wires_setup)
+		return;
+
+	wires_setup = 1;
+
+	num_mps = 1;
+	for (i=0; i<cluster_dims; i++)
+		num_mps *= DIM_SIZE[i];
+
+	ba_main_mp_bitmap = bit_alloc(num_mps);
+
+	ba_create_system();
+	bridge_setup_system();
+
+	for (i = 1; i <= LONGEST_BGQ_DIM_LEN; i++)
+		_build_geo_bitmap_arrays(i);
+}
+
+extern void destroy_ba_mp(void *ptr)
+{
+	ba_mp_t *ba_mp = (ba_mp_t *)ptr;
+	if (ba_mp) {
+		FREE_NULL_BITMAP(ba_mp->cnode_bitmap);
+		xfree(ba_mp->loc);
+		if (ba_mp->nodecard_loc) {
+			int i;
+			for (i=0; i<bg_conf->mp_nodecard_cnt; i++)
+				xfree(ba_mp->nodecard_loc[i]);
+			xfree(ba_mp->nodecard_loc);
+		}
+		xfree(ba_mp);
+	}
+}
+
+extern void pack_ba_mp(ba_mp_t *ba_mp, Buf buffer, uint16_t protocol_version)
+{
+	int dim;
+
+	xassert(ba_mp);
+
+	for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+		_pack_ba_switch(&ba_mp->axis_switch[dim], buffer,
+				protocol_version);
+		pack16(ba_mp->coord[dim], buffer);
+		/* No need to pack the coord_str, we can figure that
+		   out from the coords packed.
+		*/
+	}
+	pack_bit_fmt(ba_mp->cnode_bitmap, buffer);
+	pack16(ba_mp->used, buffer);
+	/* These are only used on the original, not in the block ba_mp's.
+	   ba_mp->alter_switch, ba_mp->index, ba_mp->loc, ba_mp->next_mp,
+	   ba_mp->nodecard_loc, ba_mp->prev_mp, ba_mp->state
+	*/
+}
+
+extern int unpack_ba_mp(ba_mp_t **ba_mp_pptr,
+			Buf buffer, uint16_t protocol_version)
+{
+	int dim;
+	ba_mp_t *orig_mp = NULL;
+	ba_mp_t *ba_mp = xmalloc(sizeof(ba_mp_t));
+	char *bit_char;
+	uint32_t uint32_tmp;
+
+	*ba_mp_pptr = ba_mp;
+
+	for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+		if (_unpack_ba_switch(&ba_mp->axis_switch[dim], buffer,
+				      protocol_version)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+		safe_unpack16(&ba_mp->coord[dim], buffer);
+		ba_mp->coord_str[dim] = alpha_num[ba_mp->coord[dim]];
+	}
+	ba_mp->coord_str[dim] = '\0';
+
+	safe_unpackstr_xmalloc(&bit_char, &uint32_tmp, buffer);
+	if (bit_char) {
+		ba_mp->cnode_bitmap = bit_alloc(bg_conf->mp_cnode_cnt);
+		bit_unfmt(ba_mp->cnode_bitmap, bit_char);
+		xfree(bit_char);
+	}
+	safe_unpack16(&ba_mp->used, buffer);
+
+	/* Since index could of changed here we will go figure it out again. */
+	orig_mp = coord2ba_mp(ba_mp->coord);
+	if (!orig_mp)
+		goto unpack_error;
+	ba_mp->index = orig_mp->index;
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_ba_mp(ba_mp);
+	*ba_mp_pptr = NULL;
+	return SLURM_ERROR;
+}
+
+
+extern ba_mp_t *str2ba_mp(const char *coords)
+{
+	uint16_t coord[cluster_dims];
+	int len, dim;
+
+	if (!coords)
+		return NULL;
+	len = strlen(coords) - cluster_dims;
+	if (len < 0)
+		return NULL;
+
+	for (dim = 0; dim < cluster_dims; dim++, len++) {
+		coord[dim] = select_char2coord(coords[len]);
+		if (coord[dim] > DIM_SIZE[dim])
+			break;
+	}
+
+	if (dim < cluster_dims) {
+		char tmp_char[cluster_dims+1];
+		memset(tmp_char, 0, sizeof(tmp_char));
+		for (dim=0; dim<cluster_dims; dim++)
+			tmp_char[dim] = alpha_num[DIM_SIZE[dim]];
+		error("This location %s is not possible in our system %s",
+		      coords, tmp_char);
+		return NULL;
+	}
+
+	return coord2ba_mp(coord);
+}
+
+/*
+ * find a base blocks bg location (rack/midplane)
+ */
+extern ba_mp_t *loc2ba_mp(const char* mp_id)
+{
+#if defined HAVE_BG_FILES
+	char *check = NULL;
+	ba_mp_t *ba_mp = NULL;
+	uint16_t coords[SYSTEM_DIMENSIONS];
+
+	if (bridge_setup_system() == -1)
+		return NULL;
+
+	check = xstrdup(mp_id);
+	/* with BGP they changed the names of the rack midplane action from
+	 * R000 to R00-M0 so we now support both formats for each of the
+	 * systems */
+#ifdef HAVE_BGL
+	if (check[3] == '-') {
+		if (check[5]) {
+			check[3] = check[5];
+			check[4] = '\0';
+		}
+	}
+
+	if ((check[1] < '0' || check[1] > '9')
+	    || (check[2] < '0' || check[2] > '9')
+	    || (check[3] < '0' || check[3] > '9')) {
+		error("%s is not a valid Rack-Midplane (i.e. R000)", mp_id);
+		goto cleanup;
+	}
+
+#else
+	if (check[3] != '-') {
+		xfree(check);
+		check = xstrdup_printf("R%c%c-M%c",
+				       mp_id[1], mp_id[2], mp_id[3]);
+	}
+
+	if ((select_char2coord(check[1]) == -1)
+	    || (select_char2coord(check[2]) == -1)
+	    || (select_char2coord(check[5]) == -1)) {
+		error("%s is not a valid Rack-Midplane (i.e. R00-M0)", mp_id);
+		goto cleanup;
+	}
+#endif
+
+	ba_mp = _internal_loc2ba_mp(0, coords, check);
+cleanup:
+	xfree(check);
+
+	return ba_mp;
+#else
+	return NULL;
+#endif
+}
+
+extern void ba_setup_mp(ba_mp_t *ba_mp, bool track_down_mps, bool wrap_it)
+{
+	int i;
+	uint16_t node_base_state = ba_mp->state & NODE_STATE_BASE;
+
+	if (!track_down_mps ||((node_base_state != NODE_STATE_DOWN)
+			       && !(ba_mp->state & NODE_STATE_DRAIN)))
+		ba_mp->used = BA_MP_USED_FALSE;
+
+	for (i=0; i<cluster_dims; i++){
+#ifdef HAVE_BG_L_P
+		int j;
+		for (j=0;j<NUM_PORTS_PER_NODE;j++) {
+			ba_mp->axis_switch[i].int_wire[j].used = 0;
+			if (i!=0) {
+				if (j==3 || j==4)
+					ba_mp->axis_switch[i].int_wire[j].
+						used = 1;
+			}
+			ba_mp->axis_switch[i].int_wire[j].port_tar = j;
+		}
+#endif
+		if (wrap_it)
+			ba_mp->axis_switch[i].usage = BG_SWITCH_WRAPPED;
+		else
+			ba_mp->axis_switch[i].usage = BG_SWITCH_NONE;
+		ba_mp->alter_switch[i].usage = BG_SWITCH_NONE;
+	}
+}
+
+/*
+ * copy info from a ba_mp, a direct memcpy of the ba_mp_t
+ *
+ * IN ba_mp: mp to be copied
+ * Returned ba_mp_t *: copied info must be freed with destroy_ba_mp
+ */
+extern ba_mp_t *ba_copy_mp(ba_mp_t *ba_mp)
+{
+	ba_mp_t *new_ba_mp = (ba_mp_t *)xmalloc(sizeof(ba_mp_t));
+
+	memcpy(new_ba_mp, ba_mp, sizeof(ba_mp_t));
+	/* we have to set this or we would be pointing to the original */
+	memset(new_ba_mp->next_mp, 0, sizeof(new_ba_mp->next_mp));
+	/* we have to set this or we would be pointing to the original */
+	memset(new_ba_mp->prev_mp, 0, sizeof(new_ba_mp->prev_mp));
+	/* These are only used on the original as well. */
+	new_ba_mp->nodecard_loc = NULL;
+	new_ba_mp->loc = NULL;
+	new_ba_mp->cnode_bitmap = NULL;
+
+	return new_ba_mp;
+}
+
+/*
+ * Print a linked list of geo_table_t entries.
+ * IN geo_ptr - first geo_table entry to print
+ * IN header - message header
+ * IN my_geo_system - system geometry specification
+ */
+extern int ba_geo_list_print(ba_geo_table_t *geo_ptr, char *header,
+			     ba_geo_system_t *my_geo_system)
+{
+	int i;
+	char dim_buf[64], full_buf[128];
+
+	full_buf[0] = '\0';
+	for (i = 0; i < my_geo_system->dim_count; i++) {
+		snprintf(dim_buf, sizeof(dim_buf), "%2u ",
+			 geo_ptr->geometry[i]);
+		strcat(full_buf, dim_buf);
+	}
+	snprintf(dim_buf, sizeof(dim_buf),
+		 ": size:%u : full_dim_cnt:%u passthru_cnt:%u",
+		 geo_ptr->size, geo_ptr->full_dim_cnt, geo_ptr->passthru_cnt);
+	strcat(full_buf, dim_buf);
+	info("%s%s", header, full_buf);
+
+	return 0;
+}
+
+/*
+ * Print the contents of all geo_table_t entries.
+ */
+extern void ba_print_geo_table(ba_geo_system_t *my_geo_system)
+{
+	int i;
+	ba_geo_table_t *geo_ptr;
+
+	xassert(my_geo_system->geo_table_ptr);
+	for (i = 1; i <= my_geo_system->total_size; i++) {
+		geo_ptr = my_geo_system->geo_table_ptr[i];
+		while (geo_ptr) {
+			ba_geo_list_print(geo_ptr, "", my_geo_system);
+			geo_ptr = geo_ptr->next_ptr;
+		}
+	}
+}
+
+extern void ba_create_geo_table(ba_geo_system_t *my_geo_system)
+{
+	ba_geo_table_t *geo_ptr;
+	int dim, inx[my_geo_system->dim_count], passthru, product;
+	struct ba_geo_table **last_pptr;
+
+	if (my_geo_system->geo_table_ptr)
+		return;
+
+	xassert(my_geo_system->dim_count);
+	my_geo_system->total_size = 1;
+	for (dim = 0; dim < my_geo_system->dim_count; dim++) {
+		if (my_geo_system->dim_size[dim] < 1)
+			fatal("dim_size[%d]= %d", dim,
+			      my_geo_system->dim_size[dim]);
+		my_geo_system->total_size *= my_geo_system->dim_size[dim];
+		inx[dim] = 1;
+	}
+
+	my_geo_system->geo_table_ptr = xmalloc(sizeof(ba_geo_table_t *) *
+					       (my_geo_system->total_size+1));
+
+	do {
+		/* Store new value */
+		geo_ptr = xmalloc(sizeof(ba_geo_table_t));
+		geo_ptr->geometry = xmalloc(sizeof(uint16_t) *
+					    my_geo_system->dim_count);
+		product = 1;
+		for (dim = 0; dim < my_geo_system->dim_count; dim++) {
+			geo_ptr->geometry[dim] = inx[dim];
+			product *= inx[dim];
+			passthru = inx[dim] - my_geo_system->dim_size[dim];
+			if (passthru == 0)
+				geo_ptr->full_dim_cnt++;
+			else if (passthru > 1)
+				geo_ptr->passthru_cnt += passthru;
+		}
+		geo_ptr->size = product;
+		xassert(product <= my_geo_system->total_size);
+		my_geo_system->geo_table_size++;
+		/* Insert record into linked list so that geometries
+		 * with full dimensions appear first */
+		last_pptr = &my_geo_system->geo_table_ptr[product];
+		while (*last_pptr) {
+			if (geo_ptr->full_dim_cnt > (*last_pptr)->full_dim_cnt)
+				break;
+			if ((geo_ptr->full_dim_cnt ==
+			     (*last_pptr)->full_dim_cnt) &&
+			    (geo_ptr->passthru_cnt <
+			     (*last_pptr)->passthru_cnt))
+				break;
+			last_pptr = &((*last_pptr)->next_ptr);
+		}
+		geo_ptr->next_ptr = *last_pptr;
+		*last_pptr = geo_ptr;
+	} while (_incr_geo(inx, my_geo_system));   /* Generate next geometry */
+}
+
+/*
+ * Free memory allocated by ba_create_geo_table().
+ * IN my_geo_system - System geometry specification.
+ */
+extern void ba_free_geo_table(ba_geo_system_t *my_geo_system)
+{
+	ba_geo_table_t *geo_ptr, *next_ptr;
+	int i;
+
+	for (i = 0; i <= my_geo_system->total_size; i++) {
+		geo_ptr = my_geo_system->geo_table_ptr[i];
+		my_geo_system->geo_table_ptr[i] = NULL;
+		while (geo_ptr) {
+			next_ptr = geo_ptr->next_ptr;
+			xfree(geo_ptr->geometry);
+			xfree(geo_ptr);
+			geo_ptr = next_ptr;
+		}
+	}
+	my_geo_system->geo_table_size = 0;
+	xfree(my_geo_system->geo_table_ptr);
+}
+
+/*
+ * Allocate a multi-dimensional node bitmap. Use ba_node_map_free() to free
+ * IN my_geo_system - system geometry specification
+ */
+extern bitstr_t *ba_node_map_alloc(ba_geo_system_t *my_geo_system)
+{
+	bitstr_t *cnode_map = bit_alloc(my_geo_system->total_size);
+	if (cnode_map == NULL)
+		fatal("bit_alloc: malloc failure");
+	return cnode_map;
+}
+
+/*
+ * Free a node map created by ba_node_map_alloc()
+ * IN node_bitmap - bitmap of currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_free(bitstr_t *node_bitmap,
+			     ba_geo_system_t *my_geo_system)
+{
+	xassert(bit_size(node_bitmap) == my_geo_system->total_size);
+	FREE_NULL_BITMAP(node_bitmap);
+}
+
+/*
+ * Set the contents of the specified position in the bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN full_offset - N-dimension zero-origin offset to set
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_set(bitstr_t *node_bitmap, int *full_offset,
+			    ba_geo_system_t *my_geo_system)
+{
+	int offset_1d;
+
+	_ba_node_xlate_to_1d(&offset_1d, full_offset, my_geo_system);
+	bit_set(node_bitmap, offset_1d);
+}
+
+/*
+ * Set the contents of the specified position in the bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN start_offset - N-dimension zero-origin offset to start setting at
+ * IN end_offset - N-dimension zero-origin offset to start setting at
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_set_range(bitstr_t *node_bitmap,
+				  int *start_offset, int *end_offset,
+				  ba_geo_system_t *my_geo_system)
+{
+	int coords[5];
+
+	_ba_node_map_set_range_internal(0, coords, start_offset, end_offset,
+					node_bitmap, my_geo_system);
+}
+
+/*
+ * Return the contents of the specified position in the bitmap
+ * IN node_bitmap - bitmap of currently allocated nodes
+ * IN full_offset - N-dimension zero-origin offset to test
+ * IN my_geo_system - system geometry specification
+ */
+extern int ba_node_map_test(bitstr_t *node_bitmap, int *full_offset,
+			    ba_geo_system_t *my_geo_system)
+{
+	int offset_1d;
+
+	_ba_node_xlate_to_1d(&offset_1d, full_offset, my_geo_system);
+	return bit_test(node_bitmap, offset_1d);
+}
+
+/*
+ * Add a new allocation's node bitmap to that of the currently
+ *	allocated bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN alloc_bitmap - bitmap of nodes to be added fromtonode_bitmap
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_add(bitstr_t *node_bitmap, bitstr_t *alloc_bitmap,
+			    ba_geo_system_t *my_geo_system)
+{
+	xassert(bit_size(node_bitmap) == my_geo_system->total_size);
+	xassert(bit_size(alloc_bitmap) == my_geo_system->total_size);
+	bit_or(node_bitmap, alloc_bitmap);
+}
+
+/*
+ * Remove a terminating allocation's node bitmap from that of the currently
+ *	allocated bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN alloc_bitmap - bitmap of nodes to be removed from node_bitmap
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_rm(bitstr_t *node_bitmap, bitstr_t *alloc_bitmap,
+			   ba_geo_system_t *my_geo_system)
+{
+	xassert(bit_size(node_bitmap) == my_geo_system->total_size);
+	xassert(bit_size(alloc_bitmap) == my_geo_system->total_size);
+	bit_not(alloc_bitmap);
+	bit_and(node_bitmap, alloc_bitmap);
+	bit_not(alloc_bitmap);
+}
+
+/*
+ * Print the contents of a node map created by ba_node_map_alloc() or
+ *	ba_geo_test_all(). Output may be in one-dimension or more depending
+ *	upon configuration.
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *                  for currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_print(bitstr_t *node_bitmap,
+			      ba_geo_system_t *my_geo_system)
+{
+#if DISPLAY_1D
+{
+	char out_buf[256];
+	bit_fmt(out_buf, sizeof(out_buf), node_bitmap);
+	info("%s", out_buf);
+}
+#endif
+#if DISPLAY_FULL_DIM
+{
+	int i, j, offset[my_geo_system->dim_count];
+
+	xassert(node_bitmap);
+	xassert(bit_size(node_bitmap) == my_geo_system->total_size);
+
+	for (i = 0; i < my_geo_system->total_size; i++) {
+		if (bit_test(node_bitmap, i)) {
+			char dim_buf[16], full_buf[64];
+			full_buf[0] = '\0';
+			_ba_node_xlate_from_1d(i, offset, my_geo_system);
+			for (j = 0; j < my_geo_system->dim_count; j++) {
+				snprintf(dim_buf, sizeof(dim_buf), "%2d ",
+					 offset[j]);
+				strcat(full_buf, dim_buf);
+			}
+			info("%s   inx:%d", full_buf, i);
+		}
+	}
+}
+#endif
+}
+
+/*
+ * give a hostlist version of the contents of a node map created by
+ *	ba_node_map_alloc() or
+ *	ba_geo_test_all(). Output may be in one-dimension or more depending
+ *	upon configuration.
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *                  for currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ * OUT char * - needs to be xfreed from caller.
+ */
+extern char *ba_node_map_ranged_hostlist(bitstr_t *node_bitmap,
+					 ba_geo_system_t *my_geo_system)
+{
+#if DISPLAY_1D
+{
+	char out_buf[256];
+	bit_fmt(out_buf, sizeof(out_buf), node_bitmap);
+	return xstrdup(out_buf);
+}
+#endif
+#if DISPLAY_FULL_DIM
+{
+	int i, j, offset[my_geo_system->dim_count];
+	hostlist_t hl = NULL;
+	char *ret_str = NULL;
+
+	xassert(node_bitmap);
+	xassert(bit_size(node_bitmap) == my_geo_system->total_size);
+
+	for (i = 0; i < my_geo_system->total_size; i++) {
+		if (bit_test(node_bitmap, i)) {
+			char dim_buf[my_geo_system->dim_count+1];
+
+			_ba_node_xlate_from_1d(i, offset, my_geo_system);
+			for (j = 0; j < my_geo_system->dim_count; j++) {
+				dim_buf[j] = alpha_num[offset[j]];
+			}
+			dim_buf[j] = '\0';
+			/* info("pushing %s", dim_buf); */
+			if (hl)
+				hostlist_push_host_dims(
+					hl, dim_buf, my_geo_system->dim_count);
+			else
+				hl = hostlist_create_dims(
+					dim_buf, my_geo_system->dim_count);
+		}
+	}
+	if (hl) {
+		ret_str = hostlist_ranged_string_xmalloc_dims(
+			hl, my_geo_system->dim_count, 0);
+		/* info("ret is %s", ret_str); */
+		hostlist_destroy(hl);
+		hl = NULL;
+	}
+	return ret_str;
+}
+#endif
+}
+
+/*
+ * Attempt to place a new allocation into an existing node state.
+ * Do not rotate or change the requested geometry, but do attempt to place
+ * it using all possible starting locations.
+ *
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *		for currently allocated nodes
+ * OUT alloc_node_bitmap - bitmap representing where to place the allocation
+ *		set only if RET == SLURM_SUCCESS
+ * IN geo_req - geometry required for the new allocation
+ * OUT attempt_cnt - number of job placements attempted
+ * IN my_geo_system - system geometry specification
+ * IN deny_pass - if set, then do not allow gaps in a specific dimension, any
+ *		gap applies to all elements at that position in that dimension,
+ *		one value per dimension, default value prevents gaps in any
+ *		dimension
+ * IN/OUT start_pos - input is pointer to array having same size as
+ *		dimension count or NULL. Set to starting coordinates of
+ *		the allocation in each dimension.
+ * IN/OUT scan_offset - Location in search table from which to continue
+ *		searching for resources. Initial value should be zero. If the
+ *		allocation selected by the algorithm is not acceptable, call
+ *		the function repeatedly with the previous output value of
+ *		scan_offset
+ * IN deny_wrap - If set then do not permit the allocation to wrap (i.e. in
+ *		a dimension with a count of 4, 3 does not connect to 0)
+ * RET - SLURM_SUCCESS if allocation can be made, otherwise SLURM_ERROR
+ */
+extern int ba_geo_test_all(bitstr_t *node_bitmap,
+			   bitstr_t **alloc_node_bitmap,
+			   ba_geo_table_t *geo_req, int *attempt_cnt,
+			   ba_geo_system_t *my_geo_system, uint16_t *deny_pass,
+			   uint16_t *start_pos, int *scan_offset,
+			   bool deny_wrap)
+{
+	int rc;
+
+	xassert(node_bitmap);
+	xassert(alloc_node_bitmap);
+	xassert(geo_req);
+	xassert(attempt_cnt);
+
+	*attempt_cnt = 0;
+	rc = _geo_test_maps(node_bitmap, alloc_node_bitmap, geo_req,
+			    attempt_cnt, my_geo_system, deny_pass,
+			    start_pos, scan_offset, deny_wrap);
+
+	return rc;
+}
+
+/*
+ * Used to set all midplanes in a special used state except the ones
+ * we are able to use in a new allocation.
+ *
+ * IN: hostlist of midplanes we do not want
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Note: Need to call ba_reset_all_removed_mps before starting another
+ * allocation attempt after
+ */
+extern int ba_set_removable_mps(bitstr_t* bitmap, bool except)
+{
+	uint16_t coords[SYSTEM_DIMENSIONS];
+
+	if (!bitmap)
+		return SLURM_ERROR;
+
+	/* return on empty sets */
+	if (except) {
+		if (bit_ffc(bitmap) == -1)
+			return SLURM_SUCCESS;
+	} else if (bit_ffs(bitmap) == -1)
+		return SLURM_SUCCESS;
+
+	_internal_removable_set_mps(0, bitmap, coords, 1, except);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Resets the virtual system to the pervious state before calling
+ * removable_set_mps, or set_all_mps_except.
+ */
+extern int ba_reset_all_removed_mps(void)
+{
+	uint16_t coords[SYSTEM_DIMENSIONS];
+	_internal_removable_set_mps(0, NULL, coords, 0, 0);
+	return SLURM_SUCCESS;
+}
+/*
+ * set the mp in the internal configuration as in, or not in use,
+ * along with the current state of the mp.
+ *
+ * IN ba_mp: ba_mp_t to update state
+ * IN state: new state of ba_mp_t
+ */
+extern void ba_update_mp_state(ba_mp_t *ba_mp, uint16_t state)
+{
+	uint16_t mp_base_state = state & NODE_STATE_BASE;
+	uint16_t mp_flags = state & NODE_STATE_FLAGS;
+
+	if (!ba_initialized){
+		error("Error, configuration not initialized, "
+		      "calling ba_init(NULL, 1)");
+		ba_init(NULL, 1);
+	}
+
+	debug2("ba_update_mp_state: new state of [%s] is %s",
+	       ba_mp->coord_str, node_state_string(state));
+
+	/* basically set the mp as used */
+	if ((mp_base_state == NODE_STATE_DOWN)
+	    || (mp_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)))
+		ba_mp->used |= BA_MP_USED_TRUE;
+	else
+		ba_mp->used &= (~BA_MP_USED_TRUE);
+
+	ba_mp->state = state;
+}
+
+/* */
+extern int validate_coord(uint16_t *coord)
+{
+	int dim, i;
+	char coord_str[cluster_dims+1];
+	char dim_str[cluster_dims+1];
+
+	for (dim=0; dim < cluster_dims; dim++) {
+		if (coord[dim] >= DIM_SIZE[dim]) {
+			if (ba_debug_flags & DEBUG_FLAG_BG_ALGO_DEEP) {
+				for (i=0; i<cluster_dims; i++) {
+					coord_str[i] = alpha_num[coord[i]];
+					dim_str[i] = alpha_num[DIM_SIZE[i]];
+				}
+				coord_str[i] = '\0';
+				dim_str[i] = '\0';
+
+				info("got coord %s greater than what "
+				     "we are using %s", coord_str, dim_str);
+			}
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+extern char *ba_switch_usage_str(uint16_t usage)
+{
+	switch (usage) {
+	case BG_SWITCH_NONE:
+		return "None";
+	case BG_SWITCH_WRAPPED_PASS:
+		return "WrappedPass";
+	case BG_SWITCH_TORUS:
+		return "FullTorus";
+	case BG_SWITCH_PASS:
+		return "Passthrough";
+	case BG_SWITCH_WRAPPED:
+		return "Wrapped";
+	case (BG_SWITCH_OUT | BG_SWITCH_OUT_PASS):
+		return "OutLeaving";
+	case BG_SWITCH_OUT:
+		return "Out";
+	case (BG_SWITCH_IN | BG_SWITCH_IN_PASS):
+		return "InComming";
+	case BG_SWITCH_IN:
+		return "In";
+	default:
+		error("unknown switch usage %u", usage);
+		xassert(0);
+		break;
+	}
+	return "unknown";
+}
+
+extern void set_ba_debug_flags(uint32_t debug_flags)
+{
+	ba_debug_flags = debug_flags;
+}
+
+/*
+ * Resets the virtual system to a virgin state.  If track_down_mps is set
+ * then those midplanes are not set to idle, but kept in a down state.
+ */
+extern void reset_ba_system(bool track_down_mps)
+{
+	uint16_t coords[SYSTEM_DIMENSIONS];
+
+	_internal_reset_ba_system(0, coords, track_down_mps);
+}
+
+extern char *ba_passthroughs_string(uint16_t passthrough)
+{
+	char *pass = NULL;
+
+	if (passthrough & PASS_FOUND_A)
+		xstrcat(pass, "A");
+	if (passthrough & PASS_FOUND_X) {
+		if (pass)
+			xstrcat(pass, ",X");
+		else
+			xstrcat(pass, "X");
+	}
+	if (passthrough & PASS_FOUND_Y) {
+		if (pass)
+			xstrcat(pass, ",Y");
+		else
+			xstrcat(pass, "Y");
+	}
+	if (passthrough & PASS_FOUND_Z) {
+		if (pass)
+			xstrcat(pass, ",Z");
+		else
+			xstrcat(pass, "Z");
+	}
+
+	return pass;
+}
+
+/* This is defined here so we can get it on non-bluegene systems since
+ * it is needed in pack/unpack functions, and bluegene.c isn't
+ * compiled for non-bluegene machines, and it didn't make since to
+ * compile the whole file just for this one function.
+ */
+extern char *give_geo(uint16_t *int_geo, int dims, bool with_sep)
+{
+	char *geo = NULL;
+	int i;
+
+	for (i=0; i<dims; i++) {
+		if (geo && with_sep)
+			xstrcat(geo, "x");
+		xstrfmtcat(geo, "%c", alpha_num[int_geo[i]]);
+	}
+	return geo;
+}
+
diff --git a/src/plugins/select/bluegene/ba_common.h b/src/plugins/select/bluegene/ba_common.h
new file mode 100644
index 000000000..8b9344f9b
--- /dev/null
+++ b/src/plugins/select/bluegene/ba_common.h
@@ -0,0 +1,563 @@
+/*****************************************************************************\
+ *  ba_common.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BLOCK_ALLOCATOR_COMMON_H_
+#define _BLOCK_ALLOCATOR_COMMON_H_
+
+#include "src/common/node_select.h"
+#include "bridge_linker.h"
+
+#define BIG_MAX 9999
+#define BUFSIZE 4096
+
+#define SWAP(a,b,t)				\
+	_STMT_START {				\
+		(t) = (a);			\
+		(a) = (b);			\
+		(b) = (t);			\
+	} _STMT_END
+
+/* This is only used on L and P hense the 6 count */
+#define NUM_PORTS_PER_NODE 6
+
+extern int DIM_SIZE[HIGHEST_DIMENSIONS]; /* how many midplanes in
+					  * each dimension */
+
+#define PASS_DENY_A    0x0001
+#define PASS_DENY_X    0x0002
+#define PASS_DENY_Y    0x0004
+#define PASS_DENY_Z    0x0008
+#define PASS_DENY_ALL  0x00ff
+
+#define PASS_FOUND_A   0x0100
+#define PASS_FOUND_X   0x0200
+#define PASS_FOUND_Y   0x0400
+#define PASS_FOUND_Z   0x0800
+#define PASS_FOUND_ANY 0xff00
+
+#define BA_MP_USED_FALSE          0x0000
+#define BA_MP_USED_TRUE           0x0001
+#define BA_MP_USED_TEMP           0x0002
+#define BA_MP_USED_ALTERED        0x0100
+#define BA_MP_USED_PASS_BIT       0x1000
+#define BA_MP_USED_ALTERED_PASS   0x1100 // This should overlap
+					 // BA_MP_USED_ALTERED and
+					 // BA_MP_USED_PASS_BIT
+
+/* This data structure records all possible combinations of bits which can be
+ * set in a bitmap of a specified size. Each bit is equivalent to another and
+ * there is no consideration of wiring. Increase LONGEST_BGQ_DIM_LEN as needed
+ * to support larger systems. */
+#ifndef LONGEST_BGQ_DIM_LEN
+#define LONGEST_BGQ_DIM_LEN 8
+#endif
+
+typedef struct ba_geo_table {
+	uint16_t size;			/* Total object count */
+	uint16_t *geometry;		/* Size in each dimension */
+	uint16_t full_dim_cnt;		/* Fully occupied dimension count */
+	uint16_t passthru_cnt;		/* Count of nodes lost for passthru */
+	struct ba_geo_table *next_ptr;	/* Next geometry of this size */
+} ba_geo_table_t;
+
+typedef struct {
+	uint16_t dim_count;		/* Number of system dimensions */
+	int *dim_size;	        	/* System size in each dimension */
+	uint16_t total_size;		/* Total number of nodes in system */
+
+	ba_geo_table_t **geo_table_ptr;	/* Pointers to possible geometries.
+					 * Index is request size */
+	uint16_t geo_table_size;	/* Number of ba_geo_table_t records */
+} ba_geo_system_t;
+
+/*
+ * structure that holds the configuration settings for each connection
+ *
+ * - mp_tar - coords of where the next hop is externally
+ *              interanlly - nothing.
+ *              exteranlly - location of next hop.
+ * - port_tar - which port the connection is going to
+ *              interanlly - always going to something within the switch.
+ *              exteranlly - always going to the next hop outside the switch.
+ * - used     - weather or not the connection is used.
+ *
+ */
+typedef struct {
+	/* target label */
+	uint16_t mp_tar[HIGHEST_DIMENSIONS];
+	/* target port */
+	uint16_t port_tar;
+	uint16_t used;
+} ba_connection_t;
+
+/*
+ * structure that holds the configuration settings for each switch
+ * which pretty much means the wiring information
+ * - int_wire - keeps details of where the wires are attached
+ *   interanlly.
+ * - ext_wire - keeps details of where the wires are attached
+ *   exteranlly.
+ *
+ */
+typedef struct {
+	ba_connection_t int_wire[NUM_PORTS_PER_NODE];
+	ba_connection_t ext_wire[NUM_PORTS_PER_NODE];
+	uint16_t usage;
+} ba_switch_t;
+
+/*
+ * ba_mp_t: mp within the allocation system.
+ */
+typedef struct block_allocator_mp {
+	/* altered wires in the switch */
+	ba_switch_t alter_switch[HIGHEST_DIMENSIONS];
+	/* a switch for each dimensions */
+	ba_switch_t axis_switch[HIGHEST_DIMENSIONS];
+	/* Bitmap of available cnodes */
+	bitstr_t *cnode_bitmap;
+	/* coordinates of midplane */
+	uint16_t coord[HIGHEST_DIMENSIONS];
+	/* coordinates of midplane in str format */
+	char coord_str[HIGHEST_DIMENSIONS+1];
+	/* midplane index used for easy look up of the miplane */
+	uint32_t index;
+	/* rack-midplane location. */
+	char *loc;
+	struct block_allocator_mp *next_mp[HIGHEST_DIMENSIONS];
+	char **nodecard_loc;
+	struct block_allocator_mp *prev_mp[HIGHEST_DIMENSIONS];
+	int state;
+	/* set if using this midplane in a block */
+	uint16_t used;
+} ba_mp_t;
+
+typedef struct {
+	int elem_count;			/* length of arrays set_count_array
+					 * and set_bits_array */
+	int *gap_count;			/* number of gaps in this array */
+	bool *has_wrap;			/* true if uses torus to wrap alloc,
+					 * implies gap_count <= 1 */
+	int *set_count_array;		/* number of set bits in this array */
+	bitstr_t **set_bits_array;	/* bitmap rows to use */
+	uint16_t *start_coord;		/* array of lowest coord in block */
+	uint16_t *block_size;		/* dimension size in block */
+} ba_geo_combos_t;
+
+extern ba_geo_combos_t geo_combos[LONGEST_BGQ_DIM_LEN];
+
+extern uint16_t ba_deny_pass;
+extern int cluster_dims;
+extern uint32_t cluster_flags;
+extern int cluster_base;
+extern bool ba_initialized;
+extern uint32_t ba_debug_flags;
+extern bitstr_t *ba_main_mp_bitmap;
+
+/*
+ * Initialize internal structures by either reading previous block
+ * configurations from a file or by running the graph solver.
+ *
+ * IN: node_info_msg_t * can be null,
+ *     should be from slurm_load_node().
+ * IN: load_bridge: whiether or not to get bridge information
+ *
+ * return: void.
+ */
+extern void ba_init(node_info_msg_t *node_info_ptr, bool load_bridge);
+
+/*
+ * destroy all the internal (global) data structs.
+ */
+extern void ba_fini(void);
+
+/* setup the wires for the system */
+extern void ba_setup_wires(void);
+
+extern void destroy_ba_mp(void *ptr);
+extern void pack_ba_mp(ba_mp_t *ba_mp, Buf buffer, uint16_t protocol_version);
+extern int unpack_ba_mp(ba_mp_t **ba_mp_pptr, Buf buffer,
+			uint16_t protocol_version);
+
+/* translate a string of at least AXYZ into a ba_mp_t ptr */
+extern ba_mp_t *str2ba_mp(const char *coords);
+/*
+ * find a base blocks bg location (rack/midplane)
+ */
+extern ba_mp_t *loc2ba_mp(const char* mp_id);
+extern ba_mp_t *coord2ba_mp(const uint16_t *coord);
+
+/*
+ * setup the ports and what not for a midplane.
+ */
+extern void ba_setup_mp(ba_mp_t *ba_mp, bool track_down_mps, bool wrap_it);
+
+/*
+ * copy info from a ba_mp, a direct memcpy of the ba_mp_t
+ *
+ * IN ba_mp: mp to be copied
+ * Returned ba_mp_t *: copied info must be freed with destroy_ba_mp
+ */
+extern ba_mp_t *ba_copy_mp(ba_mp_t *ba_mp);
+
+/*
+ * Print a linked list of ba_geo_table_t entries.
+ * IN geo_ptr - first geo_table entry to print
+ * IN header - message header
+ * IN my_geo_system - system geometry specification
+ */
+extern int ba_geo_list_print(ba_geo_table_t *geo_ptr, char *header,
+			     ba_geo_system_t *my_geo_system);
+
+/*
+ * Print the contents of all ba_geo_table_t entries.
+ */
+extern void ba_print_geo_table(ba_geo_system_t *my_geo_system);
+
+/*
+ * Create a geo_table of possible unique geometries
+ * IN/OUT my_geo_system - system geometry specification.
+ *		Set dim_count and dim_size. Other fields should be NULL.
+ *		This function will set total_size, geo_table_ptr, and
+ *		geo_table_size.
+ * Release memory using ba_free_geo_table().
+ */
+extern void ba_create_geo_table(ba_geo_system_t *my_geo_system);
+
+/*
+ * Free memory allocated by ba_create_geo_table().
+ * IN my_geo_system - System geometry specification.
+ */
+extern void ba_free_geo_table(ba_geo_system_t *my_geo_system);
+
+/*
+ * Allocate a multi-dimensional node bitmap. Use ba_node_map_free() to free
+ * IN my_geo_system - system geometry specification
+ */
+extern bitstr_t *ba_node_map_alloc(ba_geo_system_t *my_geo_system);
+
+/*
+ * Free a node map created by ba_node_map_alloc()
+ * IN node_bitmap - bitmap of currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_free(bitstr_t *node_bitmap,
+			     ba_geo_system_t *my_geo_system);
+
+/*
+ * Set the contents of the specified position in the bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN full_offset - N-dimension zero-origin offset to set
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_set(bitstr_t *node_bitmap, int *full_offset,
+			    ba_geo_system_t *my_geo_system);
+
+/*
+ * Set the contents of the specified position in the bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN start_offset - N-dimension zero-origin offset to start setting at
+ * IN end_offset - N-dimension zero-origin offset to start setting at
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_set_range(bitstr_t *node_bitmap,
+				  int *start_offset, int *end_offset,
+				  ba_geo_system_t *my_geo_system);
+
+/*
+ * Return the contents of the specified position in the bitmap
+ * IN node_bitmap - bitmap of currently allocated nodes
+ * IN full_offset - N-dimension zero-origin offset to test
+ * IN my_geo_system - system geometry specification
+ */
+extern int ba_node_map_test(bitstr_t *node_bitmap, int *full_offset,
+			    ba_geo_system_t *my_geo_system);
+
+/*
+ * Add a new allocation's node bitmap to that of the currently
+ *	allocated bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN alloc_bitmap - bitmap of nodes to be added fromtonode_bitmap
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_add(bitstr_t *node_bitmap, bitstr_t *alloc_bitmap,
+			    ba_geo_system_t *my_geo_system);
+
+/*
+ * Remove a terminating allocation's node bitmap from that of the currently
+ *	allocated bitmap
+ * IN/OUT node_bitmap - bitmap of currently allocated nodes
+ * IN alloc_bitmap - bitmap of nodes to be removed from node_bitmap
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_rm(bitstr_t *node_bitmap, bitstr_t *alloc_bitmap,
+			   ba_geo_system_t *my_geo_system);
+
+/*
+ * Print the contents of a node map created by ba_node_map_alloc() or
+ *	ba_geo_test_all(). Output may be in one-dimension or more depending
+ *	upon configuration.
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *                  for currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ */
+extern void ba_node_map_print(bitstr_t *node_bitmap,
+			      ba_geo_system_t *my_geo_system);
+
+/*
+ * give a hostlist version of the contents of a node map created by
+ *	ba_node_map_alloc() or
+ *	ba_geo_test_all(). Output may be in one-dimension or more depending
+ *	upon configuration.
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *                  for currently allocated nodes
+ * IN my_geo_system - system geometry specification
+ * OUT char * - needs to be xfreed from caller.
+ */
+extern char *ba_node_map_ranged_hostlist(bitstr_t *node_bitmap,
+					 ba_geo_system_t *my_geo_system);
+
+/*
+ * Attempt to place a new allocation into an existing node state.
+ * Do not rotate or change the requested geometry, but do attempt to place
+ * it using all possible starting locations.
+ *
+ * IN node_bitmap - bitmap representing current system state, bits are set
+ *		for currently allocated nodes
+ * OUT alloc_node_bitmap - bitmap representing where to place the allocation
+ *		set only if RET == SLURM_SUCCESS
+ * IN geo_req - geometry required for the new allocation
+ * OUT attempt_cnt - number of job placements attempted
+ * IN my_geo_system - system geometry specification
+ * IN deny_pass - if set, then do not allow gaps in a specific dimension, any
+ *		gap applies to all elements at that position in that dimension,
+ *		one value per dimension, default value prevents gaps in any
+ *		dimension
+ * IN/OUT start_pos - input is pointer to array having same size as
+ *		dimension count or NULL. Set to starting coordinates of
+ *		the allocation in each dimension.
+ * IN/OUT scan_offset - Location in search table from which to continue
+ *		searching for resources. Initial value should be zero. If the
+ *		allocation selected by the algorithm is not acceptable, call
+ *		the function repeatedly with the previous output value of
+ *		scan_offset
+ * IN deny_wrap - If set then do not permit the allocation to wrap (i.e. do
+ *		not treat as having a torus interconnect)
+ * RET - SLURM_SUCCESS if allocation can be made, otherwise SLURM_ERROR
+ */
+extern int ba_geo_test_all(bitstr_t *node_bitmap,
+			   bitstr_t **alloc_node_bitmap,
+			   ba_geo_table_t *geo_req, int *attempt_cnt,
+			   ba_geo_system_t *my_geo_system, uint16_t *deny_pass,
+			   uint16_t *start_pos, int *scan_offset,
+			   bool deny_wrap);
+
+/*
+ * Used to set all midplanes in a special used state except the ones
+ * we are able to use in a new allocation.
+ *
+ * IN: bitmap of midplanes we do or do not want
+ * IN: except - If true set all midplanes not set in the bitmap else
+ *              set all midplanes that are set in the bitmap.
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Note: Need to call ba_reset_all_removed_mps before starting another
+ * allocation attempt after
+ */
+extern int ba_set_removable_mps(bitstr_t *bitmap, bool except);
+
+/*
+ * Resets the virtual system to the pervious state before calling
+ * ba_set_removable_mps.
+ */
+extern int ba_reset_all_removed_mps(void);
+
+/*
+ * set the mp in the internal configuration as in, or not in use,
+ * along with the current state of the mp.
+ *
+ * IN ba_mp: ba_mp_t to update state
+ * IN state: new state of ba_mp_t
+ */
+extern void ba_update_mp_state(ba_mp_t *ba_mp, uint16_t state);
+
+/* make sure a node is in the system return 1 if it is 0 if not */
+extern int validate_coord(uint16_t *coord);
+
+extern char *ba_switch_usage_str(uint16_t usage);
+
+extern void set_ba_debug_flags(uint32_t debug_flags);
+/*
+ * Resets the virtual system to a virgin state.  If track_down_mps is set
+ * then those midplanes are not set to idle, but kept in a down state.
+ */
+extern void reset_ba_system(bool track_down_mps);
+
+/* in the respective block_allocator.c */
+extern void ba_create_system(void);
+extern void ba_destroy_system(void);
+
+/*
+ * create a block request.  Note that if the geometry is given,
+ * then size is ignored.  If elongate is true, the algorithm will try
+ * to fit that a block of cubic shape and then it will try other
+ * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1).
+ *
+ * IN/OUT - ba_request: structure to allocate and fill in.
+ *
+ * ALL below IN's need to be set within the ba_request before the call
+ * if you want them to be used.
+ * ALL below OUT's are set and returned within the ba_request.
+ * IN - avail_mp_bitmap: bitmap of usable midplanes.
+ * IN - blrtsimage: BlrtsImage for this block if not default
+ * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
+ * IN - elongate: if true, will try to fit different geometries of
+ *      same size requests
+ * IN/OUT - geometry: requested/returned geometry of block
+ * IN - linuximage: LinuxImage for this block if not default
+ * IN - mloaderimage: MLoaderImage for this block if not default
+ * OUT - passthroughs: if there were passthroughs used in the
+ *       generation of the block.
+ * IN - procs: Number of real processors requested
+ * IN - RamDiskimage: RamDiskImage for this block if not default
+ * IN - rotate: if true, allows rotation of block during fit
+ * OUT - save_name: hostlist of midplanes used in block
+ * IN/OUT - size: requested/returned count of midplanes in block
+ * IN - start: geo location of where to start the allocation
+ * IN - start_req: if set use the start variable to start at
+ * return success of allocation/validation of params
+ */
+extern int new_ba_request(select_ba_request_t* ba_request);
+
+/*
+ * print a block request
+ */
+extern void print_ba_request(select_ba_request_t* ba_request);
+
+#ifdef HAVE_BG_L_P
+/*
+ * copy the path of the nodes given
+ *
+ * IN nodes List of ba_mp_t *'s: nodes to be copied
+ * OUT dest_nodes List of ba_mp_t *'s: filled in list of nodes
+ * wiring.
+ * Return on success SLURM_SUCCESS, on error SLURM_ERROR
+ */
+extern int copy_node_path(List nodes, List *dest_nodes);
+#endif
+
+/*
+ * Try to allocate a block.
+ *
+ * IN - ba_request: allocation request
+ * OUT - results: List of results of the allocation request.  Each
+ * list entry will be a coordinate.  allocate_block will create the
+ * list, but the caller must destroy it.
+ *
+ * return: success or error of request
+ */
+extern int allocate_block(select_ba_request_t* ba_request, List results);
+
+/*
+ * Admin wants to remove a previous allocation.
+ * will allow Admin to delete a previous allocation retrival by letter code.
+ */
+extern int remove_block(List mps, bool is_small);
+
+/*
+ * Used to set a block into a virtual system.  The system can be
+ * cleared first and this function sets all the wires and midplanes
+ * used in the mplist given.  The mplist is a list of ba_mp_t's
+ * that are already set up.  This is very handly to test if there are
+ * any passthroughs used by one block when adding another block that
+ * also uses those wires, and neither use any overlapping
+ * midplanes. Doing a simple bitmap & will not reveal this.
+ *
+ * Returns SLURM_SUCCESS if mplist fits into system without
+ * conflict, and SLURM_ERROR if mplist conflicts with something
+ * already in the system.
+ */
+extern int check_and_set_mp_list(List mps);
+
+/*
+ * Used to find, and set up midplanes and the wires in the virtual
+ * system and return them in List results
+ *
+ * IN/OUT results - a list with a NULL destroyer filled in with
+ *        midplanes and wires set to create the block with the api. If
+ *        only interested in the hostlist NULL can be excepted also.
+ * IN start - where to start the allocation.
+ * IN geometry - the requested geometry of the block.
+ * IN conn_type - mesh, torus, or small.
+ * RET char * - hostlist of midplanes results represent must be
+ *     xfreed.  NULL on failure
+ */
+extern char *set_bg_block(List results, uint16_t *start,
+			  uint16_t *geometry, uint16_t *conn_type);
+
+/*
+ * Set up the map for resolving
+ */
+extern int set_mp_locations(void);
+
+/*
+ * set the used wires in the virtual system for a block from the real system
+ */
+extern int load_block_wiring(char *bg_block_id);
+
+extern void ba_rotate_geo(uint16_t *req_geo, int rot_cnt);
+
+extern ba_mp_t *ba_pick_sub_block_cnodes(
+	bg_record_t *bg_record, uint32_t *node_count,
+	select_jobinfo_t *jobinfo);
+
+extern int ba_clear_sub_block_cnodes(
+	bg_record_t *bg_record, struct step_record *step_ptr);
+
+extern bitstr_t *ba_create_ba_mp_cnode_bitmap(bg_record_t *bg_record);
+
+/* set the ionode str based off the block allocator, either ionodes
+ * or cnode coords */
+extern char *ba_set_ionode_str(bitstr_t *bitmap);
+
+/* Convert PASS_FOUND_* into equivalent string
+ * Caller MUST xfree() the returned value */
+extern char *ba_passthroughs_string(uint16_t passthrough);
+
+extern char *give_geo(uint16_t *int_geo, int dims, bool with_sep);
+
+#endif
diff --git a/src/plugins/select/bluegene/bg_core.c b/src/plugins/select/bluegene/bg_core.c
new file mode 100644
index 000000000..2f82707be
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_core.c
@@ -0,0 +1,604 @@
+/*****************************************************************************\
+ *  bg_core.c - blue gene node configuration processing module.
+ *
+ *  $Id$
+ *****************************************************************************
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <auble1@llnl.gov> et. al.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "bg_read_config.h"
+#include "bg_core.h"
+#include "bg_defined_block.h"
+#include "src/slurmctld/locks.h"
+#include <fcntl.h>
+#ifdef HAVE_BG_L_P
+#include "bl/bridge_status.h"
+#endif
+#define MAX_FREE_RETRIES           200 /* max number of
+					* FREE_SLEEP_INTERVALS to wait
+					* before putting a
+					* deallocating block into
+					* error state.
+					*/
+#define FREE_SLEEP_INTERVAL        3 /* When freeing a block wait this
+				      * long before looking at state
+				      * again.
+				      */
+#define HUGE_BUF_SIZE (1024*16)
+
+#define _DEBUG 0
+
+typedef struct {
+	List track_list;
+	uint32_t job_id;
+	bool destroy;
+} bg_free_block_list_t;
+
+static int _post_block_free(bg_record_t *bg_record, bool restore);
+static void *_track_freeing_blocks(void *args);
+
+/* block_state_mutex should be locked before calling this */
+static int _post_block_free(bg_record_t *bg_record, bool restore)
+{
+	int rc = SLURM_SUCCESS;
+
+	if (bg_record->magic != BLOCK_MAGIC) {
+		error("block already destroyed %p", bg_record);
+		xassert(0);
+		return SLURM_ERROR;
+	}
+
+	bg_record->free_cnt--;
+
+	if (bg_record->free_cnt == -1) {
+		info("we got a negative 1 here for %s",
+		     bg_record->bg_block_id);
+		xassert(0);
+		return SLURM_SUCCESS;
+	} else if (bg_record->modifying) {
+		info("%d others are modifing this block %s",
+		     bg_record->free_cnt, bg_record->bg_block_id);
+		return SLURM_SUCCESS;
+	} else if (bg_record->free_cnt) {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("%d others are trying to destroy this block %s",
+			     bg_record->free_cnt, bg_record->bg_block_id);
+		return SLURM_SUCCESS;
+	}
+
+	if (!(bg_record->state & BG_BLOCK_ERROR_FLAG)
+	    && (bg_record->state != BG_BLOCK_FREE)) {
+		/* Something isn't right, go mark this one in an error
+		   state. */
+		update_block_msg_t block_msg;
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("_post_block_free: block %s is not in state "
+			     "free (%s), putting it in error state.",
+			     bg_record->bg_block_id,
+			     bg_block_state_string(bg_record->state));
+		slurm_init_update_block_msg(&block_msg);
+		block_msg.bg_block_id = bg_record->bg_block_id;
+		block_msg.state = BG_BLOCK_ERROR_FLAG;
+		block_msg.reason = "Block would not deallocate";
+		slurm_mutex_unlock(&block_state_mutex);
+		select_g_update_block(&block_msg);
+		slurm_mutex_lock(&block_state_mutex);
+		return SLURM_SUCCESS;
+	}
+
+	/* A bit of a sanity check to make sure blocks are being
+	   removed out of all the lists.
+	*/
+	remove_from_bg_list(bg_lists->booted, bg_record);
+	if (remove_from_bg_list(bg_lists->job_running, bg_record)
+	    == SLURM_SUCCESS)
+		num_unused_cpus += bg_record->cpu_cnt;
+
+	if (restore)
+		return SLURM_SUCCESS;
+
+	if (remove_from_bg_list(bg_lists->main, bg_record) != SLURM_SUCCESS) {
+		/* This should only happen if called from
+		 * bg_job_place.c where the block was never added to
+		 * the list. */
+		debug("_post_block_free: It appears this block %s isn't "
+		      "in the main list anymore.",
+		      bg_record->bg_block_id);
+	}
+
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("_post_block_free: removing %s from database",
+		     bg_record->bg_block_id);
+
+	rc = bridge_block_remove(bg_record);
+	if (rc != SLURM_SUCCESS) {
+		if (rc == BG_ERROR_BLOCK_NOT_FOUND) {
+			debug("_post_block_free: block %s is not found",
+			      bg_record->bg_block_id);
+		} else {
+			error("_post_block_free: "
+			      "bridge_block_remove(%s): %s",
+			      bg_record->bg_block_id,
+			      bg_err_str(rc));
+		}
+	} else if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("_post_block_free: done %s(%p)",
+		     bg_record->bg_block_id, bg_record);
+
+	destroy_bg_record(bg_record);
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("_post_block_free: destroyed");
+
+	return SLURM_SUCCESS;
+}
+
+static void *_track_freeing_blocks(void *args)
+{
+	bg_free_block_list_t *bg_free_list = (bg_free_block_list_t *)args;
+	List track_list = bg_free_list->track_list;
+	bool destroy = bg_free_list->destroy;
+	uint32_t job_id = bg_free_list->job_id;
+	int retry_cnt = 0;
+	int free_cnt = 0, track_cnt = list_count(track_list);
+	ListIterator itr = list_iterator_create(track_list);
+	bg_record_t *bg_record;
+	bool restore = true;
+
+	debug("_track_freeing_blocks: Going to free %d for job %u",
+	      track_cnt, job_id);
+	while (retry_cnt < MAX_FREE_RETRIES) {
+		free_cnt = 0;
+		slurm_mutex_lock(&block_state_mutex);
+#ifdef HAVE_BG_L_P
+		/* just to make sure state is updated */
+		bridge_status_update_block_list_state(track_list);
+#endif
+		list_iterator_reset(itr);
+		/* just incase this changes from the update function */
+		track_cnt = list_count(track_list);
+		while ((bg_record = list_next(itr))) {
+			if (bg_record->magic != BLOCK_MAGIC) {
+				/* update_block_list_state should
+				   remove this already from the list
+				   so we shouldn't ever have this.
+				*/
+				error("_track_freeing_blocks: block was "
+				      "already destroyed %p", bg_record);
+				xassert(0);
+				free_cnt++;
+				continue;
+			}
+#ifndef HAVE_BG_FILES
+			/* Fake a free since we are n deallocating
+			   state before this.
+			*/
+			if (!(bg_record->state & BG_BLOCK_ERROR_FLAG)
+			    && (retry_cnt >= 3))
+				bg_record->state = BG_BLOCK_FREE;
+#endif
+			if ((bg_record->state == BG_BLOCK_FREE)
+			    || (bg_record->state & BG_BLOCK_ERROR_FLAG))
+				free_cnt++;
+			else if (bg_record->state != BG_BLOCK_TERM)
+				bg_free_block(bg_record, 0, 1);
+		}
+		slurm_mutex_unlock(&block_state_mutex);
+		if (free_cnt == track_cnt)
+			break;
+		debug("_track_freeing_blocks: freed %d of %d for job %u",
+		      free_cnt, track_cnt, job_id);
+		sleep(FREE_SLEEP_INTERVAL);
+		retry_cnt++;
+	}
+	debug("_track_freeing_blocks: Freed them all for job %u", job_id);
+
+	if ((bg_conf->layout_mode == LAYOUT_DYNAMIC) || destroy)
+		restore = false;
+
+	/* If there is a block in error state we need to keep all
+	 * these blocks around. */
+	slurm_mutex_lock(&block_state_mutex);
+	list_iterator_reset(itr);
+	while ((bg_record = list_next(itr))) {
+		/* block no longer exists */
+		if (bg_record->magic != BLOCK_MAGIC)
+			continue;
+		if (bg_record->state != BG_BLOCK_FREE) {
+			restore = true;
+			break;
+		}
+	}
+
+	list_iterator_reset(itr);
+	while ((bg_record = list_next(itr)))
+		_post_block_free(bg_record, restore);
+	slurm_mutex_unlock(&block_state_mutex);
+	last_bg_update = time(NULL);
+	list_iterator_destroy(itr);
+	list_destroy(track_list);
+	xfree(bg_free_list);
+	return NULL;
+}
+
+/*
+ * block_state_mutex should be locked before calling this function
+ */
+extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
+{
+	/* deal with large blocks here */
+	if ((rec_a->mp_count > 1) && (rec_b->mp_count > 1)) {
+		/* check for overlap. */
+		if (rec_a->mp_bitmap && rec_b->mp_bitmap
+		    && bit_overlap(rec_a->mp_bitmap, rec_b->mp_bitmap))
+			return true;
+		/* Test for conflicting passthroughs */
+		reset_ba_system(false);
+		check_and_set_mp_list(rec_a->ba_mp_list);
+		if (check_and_set_mp_list(rec_b->ba_mp_list) == SLURM_ERROR)
+			return true;
+		return false;
+	}
+
+	/* now deal with at least one of these being a small block */
+	if (rec_a->mp_bitmap && rec_b->mp_bitmap
+	    && !bit_overlap(rec_a->mp_bitmap, rec_b->mp_bitmap))
+		return false;
+
+	if ((rec_a->cnode_cnt >= bg_conf->mp_cnode_cnt)
+	    || (rec_b->cnode_cnt >= bg_conf->mp_cnode_cnt))
+		return true;
+
+	if (rec_a->ionode_bitmap && rec_b->ionode_bitmap
+	    && !bit_overlap(rec_a->ionode_bitmap, rec_b->ionode_bitmap))
+		return false;
+
+	return true;
+}
+
+/* block_state_mutex must be unlocked before calling this. */
+extern void bg_requeue_job(uint32_t job_id, bool wait_for_start)
+{
+	int rc;
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
+
+	/* Wait for the slurmd to begin the batch script, slurm_fail_job()
+	   is a no-op if issued prior to the script initiation do
+	   clean up just incase the fail job isn't ran. */
+	if (wait_for_start)
+		sleep(2);
+
+	lock_slurmctld(job_write_lock);
+	if ((rc = job_requeue(0, job_id, -1, (uint16_t)NO_VAL, false))) {
+		error("Couldn't requeue job %u, failing it: %s",
+		      job_id, slurm_strerror(rc));
+		job_fail(job_id);
+	}
+	unlock_slurmctld(job_write_lock);
+}
+
+/* if SLURM_ERROR you will need to fail the job with
+   slurm_fail_job(bg_record->job_running);
+*/
+
+/**
+ * sort the partitions by increasing size
+ */
+extern void sort_bg_record_inc_size(List records){
+	if (records == NULL)
+		return;
+	list_sort(records, (ListCmpF) bg_record_cmpf_inc);
+	last_bg_update = time(NULL);
+}
+
+extern int bg_free_block(bg_record_t *bg_record, bool wait, bool locked)
+{
+	int rc = SLURM_SUCCESS;
+	int count = 0;
+
+	if (!bg_record) {
+		error("bg_free_block: there was no bg_record");
+		return SLURM_ERROR;
+	}
+
+	if (!locked)
+		slurm_mutex_lock(&block_state_mutex);
+
+	while (count < MAX_FREE_RETRIES) {
+		/* block was removed */
+		if (bg_record->magic != BLOCK_MAGIC) {
+			error("block was removed while freeing it here");
+			xassert(0);
+			if (!locked)
+				slurm_mutex_unlock(&block_state_mutex);
+			return SLURM_SUCCESS;
+		}
+		/* Reset these here so we don't try to reboot it
+		   when the state goes to free.
+		*/
+		bg_record->boot_state = 0;
+		bg_record->boot_count = 0;
+		/* Here we don't need to check if the block is still
+		 * in exsistance since this function can't be called on
+		 * the same block twice.  It may
+		 * had already been removed at this point also.
+		 */
+#ifdef HAVE_BG_FILES
+		if (bg_record->state != BG_BLOCK_FREE
+		    && bg_record->state != BG_BLOCK_TERM) {
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+				info("bridge_destroy %s",
+				     bg_record->bg_block_id);
+			rc = bridge_block_free(bg_record);
+			if (rc != SLURM_SUCCESS) {
+				if (rc == BG_ERROR_BLOCK_NOT_FOUND) {
+					debug("block %s is not found",
+					      bg_record->bg_block_id);
+					bg_record->state = BG_BLOCK_FREE;
+					break;
+				} else if (rc == BG_ERROR_INVALID_STATE) {
+#ifndef HAVE_BGL
+					/* If the state is error and
+					   we get an incompatible
+					   state back here, it means
+					   we set it ourselves so
+					   break out.
+					*/
+					if (bg_record->state
+					    & BG_BLOCK_ERROR_FLAG)
+						break;
+#endif
+					if (bg_conf->slurm_debug_flags
+					    & DEBUG_FLAG_SELECT_TYPE)
+						info("bridge_block_free"
+						     "(%s): %s State = %d",
+						     bg_record->bg_block_id,
+						     bg_err_str(rc),
+						     bg_record->state);
+#ifdef HAVE_BGQ
+					if (bg_record->state != BG_BLOCK_FREE
+					    && bg_record->state
+					    != BG_BLOCK_TERM)
+					bg_record->state = BG_BLOCK_TERM;
+#endif
+				} else {
+					error("bridge_block_free"
+					      "(%s): %s State = %d",
+					      bg_record->bg_block_id,
+					      bg_err_str(rc),
+					      bg_record->state);
+				}
+			}
+		}
+#else
+		/* Fake a free since we are n deallocating
+		   state before this.
+		*/
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG) {
+			/* This will set the state to ERROR(Free)
+			 * just incase the state was ERROR(SOMETHING ELSE) */
+			bg_record->state = BG_BLOCK_ERROR_FLAG;
+			break;
+		} else if (!wait || (count >= 3))
+			bg_record->state = BG_BLOCK_FREE;
+		else if (bg_record->state != BG_BLOCK_FREE)
+			bg_record->state = BG_BLOCK_TERM;
+#endif
+
+		if (!wait || (bg_record->state == BG_BLOCK_FREE)
+#ifndef HAVE_BGL
+		    ||  (bg_record->state & BG_BLOCK_ERROR_FLAG)
+#endif
+			) {
+			break;
+		}
+		/* If we were locked outside of this we need to unlock
+		   to not cause deadlock on this mutex until we are
+		   done.
+		*/
+		slurm_mutex_unlock(&block_state_mutex);
+		sleep(FREE_SLEEP_INTERVAL);
+		count++;
+		slurm_mutex_lock(&block_state_mutex);
+	}
+
+	rc = SLURM_SUCCESS;
+	if ((bg_record->state == BG_BLOCK_FREE)
+	    || (bg_record->state & BG_BLOCK_ERROR_FLAG))
+		remove_from_bg_list(bg_lists->booted, bg_record);
+	else if (count >= MAX_FREE_RETRIES) {
+		/* Something isn't right, go mark this one in an error
+		   state. */
+		update_block_msg_t block_msg;
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("bg_free_block: block %s is not in state "
+			     "free (%s), putting it in error state.",
+			     bg_record->bg_block_id,
+			     bg_block_state_string(bg_record->state));
+		slurm_init_update_block_msg(&block_msg);
+		block_msg.bg_block_id = bg_record->bg_block_id;
+		block_msg.state = BG_BLOCK_ERROR_FLAG;
+		block_msg.reason = "Block would not deallocate";
+		slurm_mutex_unlock(&block_state_mutex);
+		select_g_update_block(&block_msg);
+		slurm_mutex_lock(&block_state_mutex);
+		rc = SLURM_ERROR;
+	}
+	if (!locked)
+		slurm_mutex_unlock(&block_state_mutex);
+
+	return rc;
+}
+
+/* block_state_mutex should be unlocked before calling this */
+extern int free_block_list(uint32_t job_id, List track_list,
+			   bool destroy, bool wait)
+{
+	bg_record_t *bg_record = NULL;
+	int retries;
+	ListIterator itr = NULL;
+	bg_free_block_list_t *bg_free_list;
+	pthread_attr_t attr_agent;
+	pthread_t thread_agent;
+
+	if (!track_list || !list_count(track_list))
+		return SLURM_SUCCESS;
+
+	bg_free_list = xmalloc(sizeof(bg_free_block_list_t));
+	bg_free_list->track_list = list_create(NULL);
+	bg_free_list->destroy = destroy;
+	bg_free_list->job_id = job_id;
+
+	slurm_mutex_lock(&block_state_mutex);
+	list_transfer(bg_free_list->track_list, track_list);
+	itr = list_iterator_create(bg_free_list->track_list);
+	while ((bg_record = list_next(itr))) {
+		if (bg_record->magic != BLOCK_MAGIC) {
+			error("block was already destroyed %p", bg_record);
+			continue;
+		}
+		bg_record->free_cnt++;
+
+		if (bg_record->job_ptr
+		    && !IS_JOB_FINISHED(bg_record->job_ptr)) {
+			info("We are freeing a block (%s) that has job %u(%u).",
+			     bg_record->bg_block_id,
+			     bg_record->job_ptr->job_id,
+			     bg_record->job_running);
+			/* This is not thread safe if called from
+			   bg_job_place.c anywhere from within
+			   submit_job() or at startup. */
+			slurm_mutex_unlock(&block_state_mutex);
+			bg_requeue_job(bg_record->job_ptr->job_id, 0);
+			slurm_mutex_lock(&block_state_mutex);
+		}
+		if (remove_from_bg_list(bg_lists->job_running, bg_record)
+		    == SLURM_SUCCESS)
+			num_unused_cpus += bg_record->cpu_cnt;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+
+	if (wait) {
+		/* Track_freeing_blocks waits until the list is done
+		   and frees the memory of bg_free_list.
+		*/
+		_track_freeing_blocks(bg_free_list);
+		return SLURM_SUCCESS;
+	}
+
+	/* _track_freeing_blocks handles cleanup */
+	slurm_attr_init(&attr_agent);
+	if (pthread_attr_setdetachstate(&attr_agent, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	retries = 0;
+	while (pthread_create(&thread_agent, &attr_agent,
+			      _track_freeing_blocks,
+			      bg_free_list)) {
+		error("pthread_create error %m");
+		if (++retries > MAX_PTHREAD_RETRIES)
+			fatal("Can't create "
+			      "pthread");
+		/* sleep and retry */
+		usleep(1000);
+	}
+	slurm_attr_destroy(&attr_agent);
+	return SLURM_SUCCESS;
+}
+
+/* Determine if specific slurm node is already in DOWN or DRAIN state */
+extern int node_already_down(char *node_name)
+{
+	struct node_record *node_ptr = find_node_record(node_name);
+
+	if (node_ptr) {
+		if (IS_NODE_DRAIN(node_ptr))
+			return 2;
+		else if (IS_NODE_DOWN(node_ptr))
+			return 1;
+		else
+			return 0;
+	}
+
+	return 0;
+}
+
+/*
+ * Convert a BG API error code to a string
+ * IN inx - error code from any of the BG Bridge APIs
+ * RET - string describing the error condition
+ */
+extern const char *bg_err_str(int inx)
+{
+	static char tmp_char[10];
+
+	switch (inx) {
+	case SLURM_SUCCESS:
+		return "Slurm Success";
+	case SLURM_ERROR:
+		return "Slurm Error";
+	case BG_ERROR_BLOCK_NOT_FOUND:
+		return "Block not found";
+	case BG_ERROR_BOOT_ERROR:
+		return "Block boot error";
+	case BG_ERROR_JOB_NOT_FOUND:
+		return "Job not found";
+	case BG_ERROR_MP_NOT_FOUND:
+		return "Midplane not found";
+	case BG_ERROR_SWITCH_NOT_FOUND:
+		return "Switch not found";
+	case BG_ERROR_BLOCK_ALREADY_DEFINED:
+		return "Block already defined";
+	case BG_ERROR_JOB_ALREADY_DEFINED:
+		return "Job already defined";
+	case BG_ERROR_CONNECTION_ERROR:
+		return "Connection error";
+	case BG_ERROR_INTERNAL_ERROR:
+		return "Internal error";
+	case BG_ERROR_INVALID_INPUT:
+		return "Invalid input";
+	case BG_ERROR_INCONSISTENT_DATA:
+		return "Inconsistent data";
+	case BG_ERROR_NO_IOBLOCK_CONNECTED:
+		return "No IO Block Connected";
+	}
+	/* I know this isn't the best way to handle this, but it only
+	   happens very rarely and usually in debugging, so it
+	   hopefully isn't really all that bad.
+	*/
+	snprintf(tmp_char, sizeof(tmp_char), "%u ?", inx);
+	return tmp_char;
+}
+
diff --git a/src/plugins/select/bluegene/bg_core.h b/src/plugins/select/bluegene/bg_core.h
new file mode 100644
index 000000000..7277a7824
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_core.h
@@ -0,0 +1,87 @@
+/*****************************************************************************\
+ *  bg_core.h - header for blue gene core functions processing module.
+ *
+ *  $Id$
+ *****************************************************************************
+ *  Copyright (C) 2004 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BG_CORE_H_
+#define _BG_CORE_H_
+
+#include "bg_enums.h"
+#include "bg_structs.h"
+#include "bg_record_functions.h"
+#include "bg_job_place.h"
+#include "bg_job_run.h"
+#include "bg_job_info.h"
+#include "bg_node_info.h"
+#include "ba_common.h"
+#include "bridge_linker.h"
+#include "bg_status.h"
+
+/* Change BLOCK_STATE_VERSION value when changing the state save
+ * format i.e. pack_block() */
+#define BLOCK_STATE_VERSION      "VER005"
+#define BLOCK_2_2_STATE_VERSION  "VER004" /*Slurm 2.2's version*/
+#define BLOCK_2_1_STATE_VERSION  "VER003" /*Slurm 2.1's version*/
+
+/* Global variables */
+/* extern bg_config_t *bg_conf; */
+/* extern bg_lists_t *bg_lists; */
+/* extern time_t last_bg_update; */
+/* extern bool agent_fini; */
+/* extern pthread_mutex_t block_state_mutex; */
+/* extern pthread_mutex_t request_list_mutex; */
+/* extern int blocks_are_created; */
+/* extern int num_unused_cpus; */
+
+extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b);
+extern void bg_requeue_job(uint32_t job_id, bool wait_for_start);
+
+/* sort a list of bg_records by size (node count) */
+extern void sort_bg_record_inc_size(List records);
+
+extern int bg_free_block(bg_record_t *bg_record, bool wait, bool locked);
+
+extern void *mult_free_block(void *args);
+extern void *mult_destroy_block(void *args);
+extern int free_block_list(uint32_t job_id, List track_list,
+			   bool destroy, bool wait);
+extern int read_bg_conf();
+extern int node_already_down(char *node_name);
+extern const char *bg_err_str(int inx);
+
+#endif /* _BG_CORE_H_ */
+
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/bg_defined_block.c
similarity index 63%
rename from src/plugins/select/bluegene/plugin/defined_block.c
rename to src/plugins/select/bluegene/bg_defined_block.c
index ce60e8c86..97807b0fe 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/bg_defined_block.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,7 +37,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "defined_block.h"
+#include "bg_defined_block.h"
 
 /*
  * create_defined_blocks - create the static blocks that will be used
@@ -54,23 +54,16 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 
 	ListIterator itr;
 	bg_record_t *bg_record = NULL;
-	ListIterator itr_found;
 	int i;
-	bg_record_t *found_record = NULL;
 	uint16_t geo[SYSTEM_DIMENSIONS];
 	char temp[256];
-	List results = NULL;
 	struct part_record *part_ptr = NULL;
-	char *non_usable_nodes = NULL;
-	bitstr_t *bitmap = bit_alloc(node_record_count);
+	bitstr_t *usable_mp_bitmap = bit_alloc(node_record_count);
 
-#ifdef HAVE_BG_FILES
-	init_wires();
-#endif
 	/* Locks are already in place to protect part_list here */
 	itr = list_iterator_create(part_list);
 	while ((part_ptr = list_next(itr))) {
-		/* we only want to use bps that are in
+		/* we only want to use mps that are in
 		 * partitions
 		 */
 		if (!part_ptr->node_bitmap) {
@@ -78,128 +71,97 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 			       part_ptr->name);
 			continue;
 		}
-		bit_or(bitmap, part_ptr->node_bitmap);
+		bit_or(usable_mp_bitmap, part_ptr->node_bitmap);
 	}
 	list_iterator_destroy(itr);
 
-	bit_not(bitmap);
-	if (bit_ffs(bitmap) != -1) {
+	if (bit_ffs(usable_mp_bitmap) == -1) {
 		fatal("We don't have any nodes in any partitions.  "
 		      "Can't create blocks.  "
 		      "Please check your slurm.conf.");
 	}
-	non_usable_nodes = bitmap2node_name(bitmap);
-	removable_set_bps(non_usable_nodes);
-	FREE_NULL_BITMAP(bitmap);
 
 	slurm_mutex_lock(&block_state_mutex);
 	reset_ba_system(false);
+	ba_set_removable_mps(usable_mp_bitmap, 1);
 	if (bg_lists->main) {
 		itr = list_iterator_create(bg_lists->main);
 		while ((bg_record = list_next(itr))) {
-			if (bg_found_block_list) {
-				itr_found = list_iterator_create(
-					bg_found_block_list);
-				while ((found_record = (bg_record_t*)
-					list_next(itr_found)) != NULL) {
-/* 					info("%s[%s] ?= %s[%s]", */
-/* 					     bg_record->nodes, */
-/* 					     bg_record->ionodes, */
-/* 					     found_record->nodes, */
-/* 					     found_record->ionodes); */
-
-					if ((bit_equal(bg_record->bitmap,
-						       found_record->bitmap))
-					    && (bit_equal(bg_record->
-							  ionode_bitmap,
-							  found_record->
-							  ionode_bitmap))
-						) {
-						/* now make sure the
-						   conn_type is the same for
-						   regular sized blocks */
-						if ((bg_record->node_cnt
-						     >= bg_conf->bp_node_cnt)
-						    && bg_record->conn_type
-						    != found_record->conn_type)
-							continue;
-						/* don't remake this one */
-						break;
-					}
-				}
-				list_iterator_destroy(itr_found);
-			} else {
-				error("create_defined_blocks: "
-				      "no bg_found_block_list 1");
-			}
-			if (bg_record->bp_count > 0
+			/* If we are deleting old blocks they will
+			   have been added to the main list, so we
+			   want to skip over them.
+			*/
+			if (bg_record->free_cnt)
+				continue;
+			if (bg_record->mp_count > 0
 			    && !bg_record->full_block
-			    && bg_record->cpu_cnt >= bg_conf->cpus_per_bp) {
+			    && bg_record->cpu_cnt >= bg_conf->cpus_per_mp) {
 				char *name = NULL;
+				char start_char[SYSTEM_DIMENSIONS+1];
+				char geo_char[SYSTEM_DIMENSIONS+1];
 
 				if (overlapped == LAYOUT_OVERLAP) {
 					reset_ba_system(false);
-					removable_set_bps(non_usable_nodes);
+					ba_set_removable_mps(usable_mp_bitmap,
+							     1);
 				}
 
-				/* we want the bps that aren't
+				/* we want the mps that aren't
 				 * in this record to mark them as used
 				 */
-				if (set_all_bps_except(bg_record->nodes)
+				if (ba_set_removable_mps(
+					    bg_record->mp_bitmap, 1)
 				    != SLURM_SUCCESS)
-					fatal("something happened in "
-					      "the load of %s.  "
-					      "Did you use smap to "
-					      "make the "
-					      "bluegene.conf file?",
+					fatal("It doesn't seem we have a "
+					      "bitmap for %s",
 					      bg_record->bg_block_id);
 
-				for(i=0; i<SYSTEM_DIMENSIONS; i++)
+				for (i=0; i<SYSTEM_DIMENSIONS; i++) {
 					geo[i] = bg_record->geo[i];
-				debug2("adding %s %c%c%c %c%c%c",
-				       bg_record->nodes,
-				       alpha_num[bg_record->start[X]],
-				       alpha_num[bg_record->start[Y]],
-				       alpha_num[bg_record->start[Z]],
-				       alpha_num[geo[X]],
-				       alpha_num[geo[Y]],
-				       alpha_num[geo[Z]]);
-				if (bg_record->bg_block_list
-				    && list_count(bg_record->bg_block_list)) {
-					if (check_and_set_node_list(
-						    bg_record->bg_block_list)
-					    == SLURM_ERROR) {
-						debug2("something happened in "
-						       "the load of %s"
-						       "Did you use smap to "
-						       "make the "
-						       "bluegene.conf file?",
+					start_char[i] = alpha_num[
+						bg_record->start[i]];
+					geo_char[i] = alpha_num[geo[i]];
+				}
+				start_char[i] = '\0';
+				geo_char[i] = '\0';
+
+				debug2("adding %s %s %s",
+				       bg_record->mp_str,
+				       start_char, geo_char);
+				if (bg_record->ba_mp_list
+				    && list_count(bg_record->ba_mp_list)) {
+					if ((rc = check_and_set_mp_list(
+						     bg_record->ba_mp_list))
+					    != SLURM_SUCCESS) {
+						error("Something happened in "
+						      "the load of %s.  "
+						      "Did you use smap to "
+						      "make the "
+						      "bluegene.conf file?",
 						       bg_record->bg_block_id);
-						list_iterator_destroy(itr);
-						reset_all_removed_bps();
-						slurm_mutex_unlock(
-							&block_state_mutex);
-						xfree(non_usable_nodes);
-						return SLURM_ERROR;
+						break;
 					}
+					ba_reset_all_removed_mps();
 				} else {
-					results = list_create(NULL);
+#ifdef HAVE_BGQ
+					List results =
+						list_create(destroy_ba_mp);
+#else
+					List results = list_create(NULL);
+#endif
 					name = set_bg_block(
 						results,
 						bg_record->start,
 						geo,
 						bg_record->conn_type);
-					reset_all_removed_bps();
+					ba_reset_all_removed_mps();
 					if (!name) {
 						error("I was unable to "
 						      "make the "
 						      "requested block.");
 						list_destroy(results);
-						list_iterator_destroy(itr);
-						slurm_mutex_unlock(
-							&block_state_mutex);
-						xfree(non_usable_nodes);
-						return SLURM_ERROR;
+						rc = SLURM_ERROR;
+						break;
 					}
 
 					snprintf(temp, sizeof(temp), "%s%s",
@@ -207,26 +169,31 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						 name);
 
 					xfree(name);
-					if (strcmp(temp, bg_record->nodes)) {
+					if (strcmp(temp, bg_record->mp_str)) {
 						fatal("given list of %s "
 						      "but allocated %s, "
 						      "your order might be "
 						      "wrong in bluegene.conf",
-						      bg_record->nodes,
+						      bg_record->mp_str,
 						      temp);
 					}
-					if (bg_record->bg_block_list)
-						list_destroy(bg_record->
-							     bg_block_list);
-					bg_record->bg_block_list =
-						list_create(destroy_ba_node);
-					copy_node_path(
-						results,
-						&bg_record->bg_block_list);
+					if (bg_record->ba_mp_list)
+						list_destroy(
+							bg_record->ba_mp_list);
+#ifdef HAVE_BGQ
+					bg_record->ba_mp_list = results;
+					results = NULL;
+#else
+					bg_record->ba_mp_list =
+						list_create(destroy_ba_mp);
+					copy_node_path(results,
+						       &bg_record->ba_mp_list);
 					list_destroy(results);
+#endif
 				}
 			}
-			if (found_record == NULL) {
+			if (!block_exist_in_list(
+				    bg_found_block_list, bg_record)) {
 				if (bg_record->full_block) {
 					/* if this is defined we need
 					   to remove it since we are
@@ -243,30 +210,29 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 					list_remove(itr);
 					continue;
 				}
-				if ((rc = configure_block(bg_record))
-				    == SLURM_ERROR) {
-					list_iterator_destroy(itr);
-					slurm_mutex_unlock(&block_state_mutex);
-					xfree(non_usable_nodes);
-					return rc;
-				}
+				if ((rc = bridge_block_create(bg_record))
+				    != SLURM_SUCCESS)
+					break;
 				print_bg_record(bg_record);
 			}
 		}
 		list_iterator_destroy(itr);
+		if (rc != SLURM_SUCCESS)
+			goto end_it;
 	} else {
 		error("create_defined_blocks: no bg_lists->main 2");
-		slurm_mutex_unlock(&block_state_mutex);
-		xfree(non_usable_nodes);
-		return SLURM_ERROR;
+		rc = SLURM_ERROR;
+		goto end_it;
 	}
-	xfree(non_usable_nodes);
-
 	slurm_mutex_unlock(&block_state_mutex);
 	create_full_system_block(bg_found_block_list);
 
 	slurm_mutex_lock(&block_state_mutex);
 	sort_bg_record_inc_size(bg_lists->main);
+
+end_it:
+	ba_reset_all_removed_mps();
+	FREE_NULL_BITMAP(usable_mp_bitmap);
 	slurm_mutex_unlock(&block_state_mutex);
 
 #ifdef _PRINT_BLOCKS_AND_EXIT
@@ -275,6 +241,12 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 		debug("\n\n");
 		while ((found_record = (bg_record_t *) list_next(itr))
 		       != NULL) {
+			/* If we are deleting old blocks they will
+			   have been added to the main list, so we
+			   want to skip over them.
+			*/
+			if (found_record->free_cnt)
+				continue;
 			print_bg_record(found_record);
 		}
 		list_iterator_destroy(itr);
@@ -283,7 +255,6 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 	}
  	exit(0);
 #endif	/* _PRINT_BLOCKS_AND_EXIT */
-	rc = SLURM_SUCCESS;
 	//exit(0);
 	return rc;
 }
@@ -297,15 +268,25 @@ extern int create_full_system_block(List bg_found_block_list)
 	List records = NULL;
 	uint16_t geo[SYSTEM_DIMENSIONS];
 	int i;
-	blockreq_t blockreq;
+	select_ba_request_t blockreq;
 	List results = NULL;
 	struct part_record *part_ptr = NULL;
 	bitstr_t *bitmap = bit_alloc(node_record_count);
+	static int *dims = NULL;
+	bool larger = 0;
+	char start_char[SYSTEM_DIMENSIONS+1];
+	char geo_char[SYSTEM_DIMENSIONS+1];
+
+	if (!dims) {
+		dims = select_g_ba_get_dims();
+		memset(start_char, 0, sizeof(start_char));
+		memset(geo_char, 0, sizeof(geo_char));
+	}
 
 	/* Locks are already in place to protect part_list here */
 	itr = list_iterator_create(part_list);
 	while ((part_ptr = list_next(itr))) {
-		/* we only want to use bps that are in
+		/* we only want to use mps that are in
 		 * partitions
 		 */
 		if (!part_ptr->node_bitmap) {
@@ -331,26 +312,26 @@ extern int create_full_system_block(List bg_found_block_list)
 	*/
 	slurm_mutex_lock(&block_state_mutex);
 
-	geo[X] = DIM_SIZE[X] - 1;
-	geo[Y] = DIM_SIZE[Y] - 1;
-	geo[Z] = DIM_SIZE[Z] - 1;
+	for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+		geo[i] = dims[i] - 1;
+		if (geo[i] > 0)
+			larger = 1;
+		geo_char[i] = alpha_num[geo[i]];
+		start_char[i] = alpha_num[0];
+	}
 
-	i = (10+strlen(bg_conf->slurm_node_prefix));
-	name = xmalloc(i);
-	if ((geo[X] == 0) && (geo[Y] == 0) && (geo[Z] == 0))
-		snprintf(name, i, "%s000",
-			 bg_conf->slurm_node_prefix);
+	if (!larger)
+		name = xstrdup_printf("%s%s",
+				      bg_conf->slurm_node_prefix, start_char);
 	else
-		snprintf(name, i, "%s[000x%c%c%c]",
-			 bg_conf->slurm_node_prefix,
-			 alpha_num[geo[X]], alpha_num[geo[Y]],
-			 alpha_num[geo[Z]]);
-
+		name = xstrdup_printf("%s[%sx%s]",
+				      bg_conf->slurm_node_prefix,
+				      start_char, geo_char);
 
 	if (bg_found_block_list) {
 		itr = list_iterator_create(bg_found_block_list);
 		while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-			if (!strcmp(name, bg_record->nodes)) {
+			if (!strcmp(name, bg_record->mp_str)) {
 				xfree(name);
 				list_iterator_destroy(itr);
 				/* don't create total already there */
@@ -366,7 +347,14 @@ extern int create_full_system_block(List bg_found_block_list)
 		itr = list_iterator_create(bg_lists->main);
 		while ((bg_record = (bg_record_t *) list_next(itr))
 		       != NULL) {
-			if (!strcmp(name, bg_record->nodes)) {
+			/* If we are deleting old blocks they will
+			   have been added to the main list, so we
+			   want to skip over them.
+			*/
+			if (bg_record->free_cnt)
+				continue;
+
+			if (!strcmp(name, bg_record->mp_str)) {
 				xfree(name);
 				list_iterator_destroy(itr);
 				/* don't create total already there */
@@ -383,9 +371,10 @@ extern int create_full_system_block(List bg_found_block_list)
 
 	records = list_create(destroy_bg_record);
 
-	memset(&blockreq, 0, sizeof(blockreq_t));
-	blockreq.block = name;
-	blockreq.conn_type = SELECT_TORUS;
+	memset(&blockreq, 0, sizeof(select_ba_request_t));
+	blockreq.save_name = name;
+	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+		blockreq.conn_type[i] = SELECT_TORUS;
 
 	add_bg_record(records, NULL, &blockreq, 0 , 0);
 	xfree(name);
@@ -397,20 +386,23 @@ extern int create_full_system_block(List bg_found_block_list)
 		goto no_total;
 	}
 	reset_ba_system(false);
-	for(i=0; i<SYSTEM_DIMENSIONS; i++)
-		geo[i] = bg_record->geo[i];
-	debug2("adding %s %c%c%c %c%c%c",
-	       bg_record->nodes,
-	       alpha_num[bg_record->start[X]],
-	       alpha_num[bg_record->start[Y]],
-	       alpha_num[bg_record->start[Z]],
-	       alpha_num[geo[X]],
-	       alpha_num[geo[Y]],
-	       alpha_num[geo[Z]]);
+	for(i=0; i<SYSTEM_DIMENSIONS; i++) {
+		geo_char[i] = alpha_num[bg_record->geo[i]];
+		start_char[i] = alpha_num[bg_record->start[i]];
+	}
+	debug2("adding %s %s %s",  bg_record->mp_str, start_char, geo_char);
+	if (bg_record->ba_mp_list)
+		list_flush(bg_record->ba_mp_list);
+	else
+		bg_record->ba_mp_list = list_create(destroy_ba_mp);
+#ifdef HAVE_BGQ
+	results = list_create(destroy_ba_mp);
+#else
 	results = list_create(NULL);
+#endif
 	name = set_bg_block(results,
 			    bg_record->start,
-			    geo,
+			    bg_record->geo,
 			    bg_record->conn_type);
 	if (!name) {
 		error("I was unable to make the full system block.");
@@ -420,13 +412,17 @@ extern int create_full_system_block(List bg_found_block_list)
 		return SLURM_ERROR;
 	}
 	xfree(name);
-	if (bg_record->bg_block_list)
-		list_destroy(bg_record->bg_block_list);
-	bg_record->bg_block_list = list_create(destroy_ba_node);
-	copy_node_path(results, &bg_record->bg_block_list);
+	if (bg_record->ba_mp_list)
+		list_destroy(bg_record->ba_mp_list);
+#ifdef HAVE_BGQ
+	bg_record->ba_mp_list = results;
+	results = NULL;
+#else
+	bg_record->ba_mp_list = list_create(destroy_ba_mp);
+	copy_node_path(results, &bg_record->ba_mp_list);
 	list_destroy(results);
-
-	if ((rc = configure_block(bg_record)) == SLURM_ERROR) {
+#endif
+	if ((rc = bridge_block_create(bg_record)) == SLURM_ERROR) {
 		error("create_full_system_block: "
 		      "unable to configure block in api");
 		destroy_bg_record(bg_record);
diff --git a/src/plugins/select/bluegene/plugin/defined_block.h b/src/plugins/select/bluegene/bg_defined_block.h
similarity index 96%
rename from src/plugins/select/bluegene/plugin/defined_block.h
rename to src/plugins/select/bluegene/bg_defined_block.h
index 315fa1a3f..eb0a8a658 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.h
+++ b/src/plugins/select/bluegene/bg_defined_block.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@
 #ifndef _BLUEGENE_DEFINED_BLOCK_H_
 #define _BLUEGENE_DEFINED_BLOCK_H_
 
-#include "bluegene.h"
+#include "bg_core.h"
 
 extern int create_defined_blocks(bg_layout_t overlapped,
 				 List bg_found_block_list);
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/bg_dynamic_block.c
similarity index 66%
rename from src/plugins/select/bluegene/plugin/dynamic_block.c
rename to src/plugins/select/bluegene/bg_dynamic_block.c
index 49356700a..9daf48003 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.c
+++ b/src/plugins/select/bluegene/bg_dynamic_block.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,13 +37,13 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "dynamic_block.h"
+#include "bg_dynamic_block.h"
 
 static int _split_block(List block_list, List new_blocks,
 			bg_record_t *bg_record, int cnodes);
 
 static int _breakup_blocks(List block_list, List new_blocks,
-			   ba_request_t *request, List my_block_list,
+			   select_ba_request_t *request, List my_block_list,
 			   bool only_free, bool only_small);
 
 /*
@@ -52,7 +52,8 @@ static int _breakup_blocks(List block_list, List new_blocks,
  * RET - a list of created block(s) or NULL on failure errno is set.
  */
 extern List create_dynamic_block(List block_list,
-				 ba_request_t *request, List my_block_list,
+				 select_ba_request_t *request,
+				 List my_block_list,
 				 bool track_down_nodes)
 {
 	int rc = SLURM_SUCCESS;
@@ -62,18 +63,19 @@ extern List create_dynamic_block(List block_list,
 	List results = NULL;
 	List new_blocks = NULL;
 	bitstr_t *my_bitmap = NULL;
-	blockreq_t blockreq;
+	select_ba_request_t blockreq;
 	int cnodes = request->procs / bg_conf->cpu_ratio;
-	char *unusable_nodes = NULL;
+	uint16_t start_geo[SYSTEM_DIMENSIONS];
 
 	if (cnodes < bg_conf->smallest_block) {
 		error("Can't create this size %d "
-		      "on this system numpsets is %d",
+		      "on this system ionodes_per_mp is %d",
 		      request->procs,
-		      bg_conf->numpsets);
+		      bg_conf->ionodes_per_mp);
 		goto finished;
 	}
-	memset(&blockreq, 0, sizeof(blockreq_t));
+	memset(&blockreq, 0, sizeof(select_ba_request_t));
+	memcpy(start_geo, request->geometry, sizeof(start_geo));
 
 	/* We need to lock this just incase a blocks_overlap is called
 	   which will in turn reset and set the system as it sees fit.
@@ -96,52 +98,62 @@ extern List create_dynamic_block(List block_list,
 			if (bg_record->free_cnt) {
 				if (bg_conf->slurm_debug_flags
 				    & DEBUG_FLAG_BG_PICK) {
-					char *start_geo = give_geo(
-						bg_record->start);
-					char *geo = give_geo(bg_record->geo);
-
+					int dim;
+					char start_geo[SYSTEM_DIMENSIONS+1];
+					char geo[SYSTEM_DIMENSIONS+1];
+					for (dim=0; dim<SYSTEM_DIMENSIONS;
+					     dim++) {
+						start_geo[dim] = alpha_num[
+							bg_record->start[dim]];
+						geo[dim] = alpha_num[
+							bg_record->geo[dim]];
+					}
+					start_geo[dim] = '\0';
+					geo[dim] = '\0';
 					info("not adding %s(%s) %s %s %s %u "
 					     "(free_cnt)",
 					     bg_record->bg_block_id,
-					     bg_record->nodes,
+					     bg_record->mp_str,
 					     bg_block_state_string(
 						     bg_record->state),
 					     start_geo,
 					     geo,
-					     bg_record->node_cnt);
-					xfree(start_geo);
-					xfree(geo);
+					     bg_record->cnode_cnt);
 				}
 				continue;
 			}
 
-			if (!my_bitmap) {
-				my_bitmap =
-					bit_alloc(bit_size(bg_record->bitmap));
-			}
+			if (!my_bitmap)
+				my_bitmap = bit_alloc(
+					bit_size(bg_record->mp_bitmap));
 
-			if (!bit_super_set(bg_record->bitmap, my_bitmap)) {
-				bit_or(my_bitmap, bg_record->bitmap);
+			if (!bit_super_set(bg_record->mp_bitmap, my_bitmap)) {
+				bit_or(my_bitmap, bg_record->mp_bitmap);
 
 				if (bg_conf->slurm_debug_flags
 				    & DEBUG_FLAG_BG_PICK) {
-					char *start_geo =
-						give_geo(bg_record->start);
-					char *geo =
-						give_geo(bg_record->geo);
-
+					int dim;
+					char start_geo[SYSTEM_DIMENSIONS+1];
+					char geo[SYSTEM_DIMENSIONS+1];
+					for (dim=0; dim<SYSTEM_DIMENSIONS;
+					     dim++) {
+						start_geo[dim] = alpha_num[
+							bg_record->start[dim]];
+						geo[dim] = alpha_num[
+							bg_record->geo[dim]];
+					}
+					start_geo[dim] = '\0';
+					geo[dim] = '\0';
 					info("adding %s(%s) %s %s %s %u",
 					     bg_record->bg_block_id,
-					     bg_record->nodes,
+					     bg_record->mp_str,
 					     bg_block_state_string(
 						     bg_record->state),
 					     start_geo, geo,
-					     bg_record->node_cnt);
-					xfree(start_geo);
-					xfree(geo);
+					     bg_record->cnode_cnt);
 				}
-				if (check_and_set_node_list(
-					    bg_record->bg_block_list)
+				if (check_and_set_mp_list(
+					    bg_record->ba_mp_list)
 				    == SLURM_ERROR) {
 					if (bg_conf->slurm_debug_flags
 					    & DEBUG_FLAG_BG_PICK)
@@ -153,21 +165,32 @@ extern List create_dynamic_block(List block_list,
 					rc = SLURM_ERROR;
 					goto finished;
 				}
-			} else if (bg_conf->slurm_debug_flags
-				   & DEBUG_FLAG_BG_PICK) {
-				char *start_geo = give_geo(bg_record->start);
-				char *geo = give_geo(bg_record->geo);
-
-				info("not adding %s(%s) %s %s %s %u ",
-				     bg_record->bg_block_id,
-				     bg_record->nodes,
-				     bg_block_state_string(
-					     bg_record->state),
-				     start_geo,
-				     geo,
-				     bg_record->node_cnt);
-				xfree(start_geo);
-				xfree(geo);
+			} else {
+				if (bg_conf->slurm_debug_flags
+				    & DEBUG_FLAG_BG_PICK) {
+					int dim;
+					char start_geo[SYSTEM_DIMENSIONS+1];
+					char geo[SYSTEM_DIMENSIONS+1];
+					for (dim=0; dim<SYSTEM_DIMENSIONS;
+					     dim++) {
+						start_geo[dim] = alpha_num[
+							bg_record->start[dim]];
+						geo[dim] = alpha_num[
+							bg_record->geo[dim]];
+					}
+					start_geo[dim] = '\0';
+					geo[dim] = '\0';
+					info("not adding %s(%s) %s %s %s %u ",
+					     bg_record->bg_block_id,
+					     bg_record->mp_str,
+					     bg_block_state_string(
+						     bg_record->state),
+					     start_geo,
+					     geo,
+					     bg_record->cnode_cnt);
+				}
+				/* just so we don't look at it later */
+				bg_record->free_cnt = -1;
 			}
 		}
 		list_iterator_destroy(itr);
@@ -178,23 +201,10 @@ extern List create_dynamic_block(List block_list,
 			info("No list was given");
 	}
 
-	if (request->avail_node_bitmap) {
-		bitstr_t *bitmap = bit_alloc(node_record_count);
-
-		/* we want the bps that aren't in this partition to
-		 * mark them as used
-		 */
-		bit_or(bitmap, request->avail_node_bitmap);
-		bit_not(bitmap);
-		unusable_nodes = bitmap2node_name(bitmap);
-
-		//info("not using %s", nodes);
-		removable_set_bps(unusable_nodes);
-
-		FREE_NULL_BITMAP(bitmap);
-	}
+	if (request->avail_mp_bitmap)
+		ba_set_removable_mps(request->avail_mp_bitmap, 1);
 
-	if (request->size==1 && cnodes < bg_conf->bp_node_cnt) {
+	if (request->size==1 && cnodes < bg_conf->mp_cnode_cnt) {
 		switch(cnodes) {
 #ifdef HAVE_BGL
 		case 32:
@@ -240,7 +250,7 @@ extern List create_dynamic_block(List block_list,
 		/* Sort the list so the small blocks are in the order
 		 * of ionodes. */
 		list_sort(block_list, (ListCmpF)bg_record_cmpf_inc);
-		request->conn_type = SELECT_SMALL;
+		request->conn_type[0] = SELECT_SMALL;
 		new_blocks = list_create(destroy_bg_record);
 		/* check only blocks that are free and small */
 		if (_breakup_blocks(block_list, new_blocks,
@@ -264,11 +274,16 @@ extern List create_dynamic_block(List block_list,
 			goto finished;
 
 		/* check all usable blocks */
-		if (_breakup_blocks(block_list, new_blocks,
-				    request, my_block_list,
-				    false, false)
-		    == SLURM_SUCCESS)
-			goto finished;
+		/* This check will result in unused, booted blocks to
+		   be freed before looking at free space, so we will
+		   just skip it.  If you want this kind of behavior
+		   enable it.
+		*/
+		/* if (_breakup_blocks(block_list, new_blocks, */
+		/* 		    request, my_block_list, */
+		/* 		    false, false) */
+		/*     == SLURM_SUCCESS) */
+		/* 	goto finished; */
 
 		/* Re-sort the list back to the original order. */
 		list_sort(block_list, (ListCmpF)bg_record_sort_aval_inc);
@@ -278,13 +293,14 @@ extern List create_dynamic_block(List block_list,
 			info("small block not able to be placed inside others");
 	}
 
-	if (request->conn_type == SELECT_NAV)
-		request->conn_type = SELECT_TORUS;
+	if (request->conn_type[0] == SELECT_NAV)
+		request->conn_type[0] = SELECT_TORUS;
 
 	//debug("going to create %d", request->size);
 	if (!new_ba_request(request)) {
-		if (request->geometry[X] != (uint16_t)NO_VAL) {
-			char *geo = give_geo(request->geometry);
+		if (request->geometry[0] != (uint16_t)NO_VAL) {
+			char *geo = give_geo(request->geometry,
+					     SYSTEM_DIMENSIONS, 1);
 			error("Problems with request for size %d geo %s",
 			      request->size, geo);
 			xfree(geo);
@@ -301,11 +317,22 @@ extern List create_dynamic_block(List block_list,
 	rc = SLURM_SUCCESS;
 	if (results)
 		list_flush(results);
-	else
+	else {
+#ifdef HAVE_BGQ
+		results = list_create(destroy_ba_mp);
+#else
 		results = list_create(NULL);
+#endif
+	}
+
+	rc = allocate_block(request, results);
+	/* This could be changed in allocate_block so set it back up */
+	memcpy(request->geometry, start_geo, sizeof(start_geo));
 
-	if (allocate_block(request, results))
+	if (rc) {
+		rc = SLURM_SUCCESS;
 		goto setup_records;
+	}
 
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("allocate failure for size %d base "
@@ -313,14 +340,15 @@ extern List create_dynamic_block(List block_list,
 		     request->size);
 	rc = SLURM_ERROR;
 
-	if (!list_count(block_list) || !my_block_list)
+	if (!list_count(my_block_list) || !my_block_list)
 		goto finished;
 
 	/*Try to put block starting in the smallest of the exisiting blocks*/
-	itr = list_iterator_create(block_list);
-	itr2 = list_iterator_create(block_list);
+	itr = list_iterator_create(my_block_list);
+	itr2 = list_iterator_create(my_block_list);
 	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-		/* never check a block being deleted or with a job running */
+		bool is_small = 0;
+		/* never check a block with a job running */
 		if (bg_record->free_cnt
 		    || bg_record->job_running != NO_JOB_RUNNING)
 			continue;
@@ -328,10 +356,10 @@ extern List create_dynamic_block(List block_list,
 		/* Here we are only looking for the first
 		   block on the midplane.  So either the count
 		   is greater or equal than
-		   bg_conf->bp_node_cnt or the first bit is
+		   bg_conf->mp_cnode_cnt or the first bit is
 		   set in the ionode_bitmap.
 		*/
-		if (bg_record->node_cnt < bg_conf->bp_node_cnt) {
+		if (bg_record->cnode_cnt < bg_conf->mp_cnode_cnt) {
 			bool found = 0;
 			if (bit_ffs(bg_record->ionode_bitmap) != 0)
 				continue;
@@ -339,10 +367,11 @@ extern List create_dynamic_block(List block_list,
 			   this midplane that have jobs running.
 			*/
 			while ((found_record = list_next(itr2))) {
-				if ((found_record->job_running
-				     != NO_JOB_RUNNING)
-				    && bit_overlap(bg_record->bitmap,
-						   found_record->bitmap)) {
+				if (!found_record->free_cnt
+				    && (found_record->job_running
+					!= NO_JOB_RUNNING)
+				    && bit_overlap(bg_record->mp_bitmap,
+						   found_record->mp_bitmap)) {
 					found = 1;
 					break;
 				}
@@ -350,23 +379,33 @@ extern List create_dynamic_block(List block_list,
 			list_iterator_reset(itr2);
 			if (found)
 				continue;
+			is_small = 1;
 		}
 
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
-			info("removing %s for request %d",
-			     bg_record->nodes, request->size);
-		remove_block(bg_record->bg_block_list, (int)NO_VAL,
-			     (int)bg_record->conn_type);
-		/* need to set any unusable nodes that this last block
-		   used */
-		removable_set_bps(unusable_nodes);
+			info("removing %s(%s) for request %d",
+			     bg_record->bg_block_id,
+			     bg_record->mp_str, request->size);
+
+		remove_block(bg_record->ba_mp_list, is_small);
 		rc = SLURM_SUCCESS;
 		if (results)
 			list_flush(results);
-		else
+		else {
+#ifdef HAVE_BGQ
+			results = list_create(destroy_ba_mp);
+#else
 			results = list_create(NULL);
-		if (allocate_block(request, results))
+#endif
+		}
+
+		rc = allocate_block(request, results);
+		/* This could be changed in allocate_block so set it back up */
+		memcpy(request->geometry, start_geo, sizeof(start_geo));
+		if (rc) {
+			rc = SLURM_SUCCESS;
 			break;
+		}
 
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 			info("allocate failure for size %d base partitions",
@@ -381,33 +420,39 @@ setup_records:
 		/*set up bg_record(s) here */
 		new_blocks = list_create(destroy_bg_record);
 
-		blockreq.block = request->save_name;
+		blockreq.save_name = request->save_name;
 #ifdef HAVE_BGL
 		blockreq.blrtsimage = request->blrtsimage;
 #endif
 		blockreq.linuximage = request->linuximage;
 		blockreq.mloaderimage = request->mloaderimage;
 		blockreq.ramdiskimage = request->ramdiskimage;
-		blockreq.conn_type = request->conn_type;
+		memcpy(blockreq.conn_type, request->conn_type,
+		       sizeof(blockreq.conn_type));
 
-		add_bg_record(new_blocks, results, &blockreq, 0, 0);
+		add_bg_record(new_blocks, &results, &blockreq, 0, 0);
 	}
 
 finished:
-	reset_all_removed_bps();
+	if (request->avail_mp_bitmap
+	    && (bit_ffc(request->avail_mp_bitmap) == -1))
+		ba_reset_all_removed_mps();
 	slurm_mutex_unlock(&block_state_mutex);
 
-	xfree(unusable_nodes);
+	/* reset the ones we mucked with */
+	itr = list_iterator_create(my_block_list);
+	while ((bg_record = (bg_record_t *) list_next(itr))) {
+		if (bg_record->free_cnt == -1)
+			bg_record->free_cnt = 0;
+	}
+	list_iterator_destroy(itr);
+
+
 	xfree(request->save_name);
 
-	if (request->elongate_geos) {
-		list_destroy(request->elongate_geos);
-		request->elongate_geos = NULL;
-	}
 	if (results)
 		list_destroy(results);
 	errno = rc;
-
 	return new_blocks;
 }
 
@@ -415,9 +460,8 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 					bitstr_t *ionodes, int size)
 {
 	bg_record_t *found_record = NULL;
-	ba_node_t *new_ba_node = NULL;
-	ba_node_t *ba_node = NULL;
-	char bitstring[BITSIZE];
+	ba_mp_t *new_ba_mp = NULL;
+	ba_mp_t *ba_mp = NULL;
 
 	found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
 	found_record->magic = BLOCK_MAGIC;
@@ -425,76 +469,77 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 	found_record->job_running = NO_JOB_RUNNING;
 	found_record->user_name = xstrdup(bg_record->user_name);
 	found_record->user_uid = bg_record->user_uid;
-	found_record->bg_block_list = list_create(destroy_ba_node);
-	if (bg_record->bg_block_list)
-		ba_node = list_peek(bg_record->bg_block_list);
-	if (!ba_node) {
-		if (bg_record->nodes) {
-			hostlist_t hl = hostlist_create(bg_record->nodes);
+	found_record->ba_mp_list = list_create(destroy_ba_mp);
+	if (bg_record->ba_mp_list)
+		ba_mp = list_peek(bg_record->ba_mp_list);
+	if (!ba_mp) {
+		if (bg_record->mp_str) {
+			hostlist_t hl = hostlist_create(bg_record->mp_str);
 			char *host = hostlist_shift(hl);
 			hostlist_destroy(hl);
-			found_record->nodes = xstrdup(host);
+			found_record->mp_str = xstrdup(host);
 			free(host);
-			error("you gave me a list with no ba_nodes using %s",
-			      found_record->nodes);
+			error("you gave me a list with no ba_mps using %s",
+			      found_record->mp_str);
 		} else {
-			found_record->nodes = xstrdup_printf(
-				"%s%c%c%c",
+			char tmp_char[SYSTEM_DIMENSIONS+1];
+			int dim;
+			for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+				tmp_char[dim] =
+					alpha_num[found_record->start[dim]];
+			tmp_char[dim] = '\0';
+			found_record->mp_str = xstrdup_printf(
+				"%s%s",
 				bg_conf->slurm_node_prefix,
-				alpha_num[found_record->start[X]],
-				alpha_num[found_record->start[Y]],
-				alpha_num[found_record->start[Z]]);
-			error("you gave me a record with no ba_nodes "
+				tmp_char);
+			error("you gave me a record with no ba_mps "
 			      "and no nodes either using %s",
-			      found_record->nodes);
+			      found_record->mp_str);
 		}
 	} else {
-		int i=0,j=0;
-		new_ba_node = ba_copy_node(ba_node);
-		for (i=0; i<SYSTEM_DIMENSIONS; i++){
-			for(j=0;j<NUM_PORTS_PER_NODE;j++) {
-				ba_node->axis_switch[i].int_wire[j].used = 0;
-				if (i!=X) {
-					if (j==3 || j==4)
-						ba_node->axis_switch[i].
-							int_wire[j].
-							used = 1;
-				}
-				ba_node->axis_switch[i].int_wire[j].
-					port_tar = j;
-			}
-		}
-		list_append(found_record->bg_block_list, new_ba_node);
-		found_record->bp_count = 1;
-		found_record->nodes = xstrdup_printf(
-			"%s%c%c%c",
-			bg_conf->slurm_node_prefix,
-			alpha_num[ba_node->coord[X]],
-			alpha_num[ba_node->coord[Y]],
-			alpha_num[ba_node->coord[Z]]);
+		new_ba_mp = ba_copy_mp(ba_mp);
+		/* We need to have this node wrapped in Q to handle
+		   wires correctly when creating around the midplane.
+		*/
+		ba_setup_mp(new_ba_mp, false, true);
+
+		new_ba_mp->used = BA_MP_USED_TRUE;
+		list_append(found_record->ba_mp_list, new_ba_mp);
+		found_record->mp_count = 1;
+		found_record->mp_str = xstrdup_printf(
+			"%s%s",
+			bg_conf->slurm_node_prefix, new_ba_mp->coord_str);
 	}
+
 #ifdef HAVE_BGL
 	found_record->node_use = SELECT_COPROCESSOR_MODE;
 	found_record->blrtsimage = xstrdup(bg_record->blrtsimage);
 #endif
+#ifdef HAVE_BG_L_P
 	found_record->linuximage = xstrdup(bg_record->linuximage);
-	found_record->mloaderimage = xstrdup(bg_record->mloaderimage);
 	found_record->ramdiskimage = xstrdup(bg_record->ramdiskimage);
+#endif
+	found_record->mloaderimage = xstrdup(bg_record->mloaderimage);
 
 	process_nodes(found_record, false);
 
-	found_record->conn_type = SELECT_SMALL;
+	if (bg_record->conn_type[0] >= SELECT_SMALL)
+		found_record->conn_type[0] = bg_record->conn_type[0];
+	else
+		found_record->conn_type[0] = SELECT_SMALL;
 
 	xassert(bg_conf->cpu_ratio);
 	found_record->cpu_cnt = bg_conf->cpu_ratio * size;
-	found_record->node_cnt = size;
+	found_record->cnode_cnt = size;
 
 	found_record->ionode_bitmap = bit_copy(ionodes);
-	bit_fmt(bitstring, BITSIZE, found_record->ionode_bitmap);
-	found_record->ionodes = xstrdup(bitstring);
+	found_record->ionode_str =
+		ba_set_ionode_str(found_record->ionode_bitmap);
+	found_record->mp_used_bitmap = bit_alloc(node_record_count);
+
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("made small block of %s[%s]",
-		     found_record->nodes, found_record->ionodes);
+		     found_record->mp_str, found_record->ionode_str);
 	return found_record;
 }
 
@@ -503,13 +548,13 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 static int _split_block(List block_list, List new_blocks,
 			bg_record_t *bg_record, int cnodes)
 {
-	bool full_bp = false;
+	bool full_mp = false;
 	bitoff_t start = 0;
-	blockreq_t blockreq;
+	select_ba_request_t blockreq;
 
-	memset(&blockreq, 0, sizeof(blockreq_t));
+	memset(&blockreq, 0, sizeof(select_ba_request_t));
 
-	switch(bg_record->node_cnt) {
+	switch(bg_record->cnode_cnt) {
 #ifdef HAVE_BGL
 	case 32:
 		error("We got a 32 we should never have this");
@@ -522,7 +567,7 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
@@ -538,11 +583,11 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
-		full_bp = true;
+		full_mp = true;
 		break;
 #else
 	case 16:
@@ -556,7 +601,7 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
@@ -572,7 +617,7 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
@@ -593,7 +638,7 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
@@ -620,7 +665,7 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
@@ -654,16 +699,16 @@ static int _split_block(List block_list, List new_blocks,
 			break;
 		default:
 			error("We don't make a %d from size %d",
-			      cnodes, bg_record->node_cnt);
+			      cnodes, bg_record->cnode_cnt);
 			goto finished;
 			break;
 		}
-		full_bp = true;
+		full_mp = true;
 		break;
 #endif
 	}
 
-	if (!full_bp && bg_record->ionode_bitmap) {
+	if (!full_mp && bg_record->ionode_bitmap) {
 		if ((start = bit_ffs(bg_record->ionode_bitmap)) == -1)
 			start = 0;
 	}
@@ -673,7 +718,7 @@ static int _split_block(List block_list, List new_blocks,
 		info("Asking for %u 32CNBlocks, and %u 128CNBlocks "
 		     "from a %u block, starting at ionode %d.",
 		     blockreq.small32, blockreq.small128,
-		     bg_record->node_cnt, start);
+		     bg_record->cnode_cnt, start);
 #else
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("Asking for %u 16CNBlocks, %u 32CNBlocks, "
@@ -681,7 +726,7 @@ static int _split_block(List block_list, List new_blocks,
 		     "from a %u block, starting at ionode %d.",
 		     blockreq.small16, blockreq.small32,
 		     blockreq.small64, blockreq.small128,
-		     blockreq.small256, bg_record->node_cnt, start);
+		     blockreq.small256, bg_record->cnode_cnt, start);
 #endif
 	handle_small_record_request(new_blocks, &blockreq, bg_record, start);
 
@@ -690,17 +735,18 @@ finished:
 }
 
 static int _breakup_blocks(List block_list, List new_blocks,
-			   ba_request_t *request, List my_block_list,
+			   select_ba_request_t *request, List my_block_list,
 			   bool only_free, bool only_small)
 {
 	int rc = SLURM_ERROR;
 	bg_record_t *bg_record = NULL;
 	ListIterator itr = NULL, bit_itr = NULL;
 	int total_cnode_cnt=0;
-	char tmp_char[256];
-	bitstr_t *ionodes = bit_alloc(bg_conf->numpsets);
+	char start_char[SYSTEM_DIMENSIONS+1];
+	bitstr_t *ionodes = bit_alloc(bg_conf->ionodes_per_mp);
 	int cnodes = request->procs / bg_conf->cpu_ratio;
-	int curr_bp_bit = -1;
+	int curr_mp_bit = -1;
+	int dim;
 
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("cpu_count=%d cnodes=%d o_free=%d o_small=%d",
@@ -733,9 +779,14 @@ static int _breakup_blocks(List block_list, List new_blocks,
 	 */
 	itr = list_iterator_create(block_list);
 	while ((bg_record = list_next(itr))) {
-		if (bg_record->free_cnt) {
+		/* If the free_cnt is -1 that just means we just
+		   didn't add it to the system, in this case it is
+		   probably a small block that we really should be
+		   looking at.
+		*/
+		if (bg_record->free_cnt > 0) {
 			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
-				info("%s being free for other job(s), skipping",
+				info("%s being freed by other job(s), skipping",
 				     bg_record->bg_block_id);
 			continue;
 		}
@@ -746,16 +797,17 @@ static int _breakup_blocks(List block_list, List new_blocks,
 		 * that isn't used */
 
 		/* check for free blocks on the first and second time */
-		if (only_free && (bg_record->state != RM_PARTITION_FREE))
+		if (only_free && (bg_record->state != BG_BLOCK_FREE))
 			continue;
 
 		/* check small blocks first */
-		if (only_small && (bg_record->node_cnt > bg_conf->bp_node_cnt))
+		if (only_small
+		    && (bg_record->cnode_cnt >= bg_conf->mp_cnode_cnt))
 			continue;
 
-		if (request->avail_node_bitmap &&
-		    !bit_super_set(bg_record->bitmap,
-				   request->avail_node_bitmap)) {
+		if (request->avail_mp_bitmap &&
+		    !bit_super_set(bg_record->mp_bitmap,
+				   request->avail_mp_bitmap)) {
 			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 				info("bg block %s has nodes not usable "
 				     "by this job",
@@ -763,36 +815,39 @@ static int _breakup_blocks(List block_list, List new_blocks,
 			continue;
 		}
 
-		if (bg_record->node_cnt == cnodes) {
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
-				info("found it here %s, %s",
-				     bg_record->bg_block_id,
-				     bg_record->nodes);
-			request->save_name = xstrdup_printf(
-				"%c%c%c",
-				alpha_num[bg_record->start[X]],
-				alpha_num[bg_record->start[Y]],
-				alpha_num[bg_record->start[Z]]);
+		if (bg_record->cnode_cnt == cnodes) {
+			ba_mp_t *ba_mp = NULL;
+			if (bg_record->ba_mp_list)
+				ba_mp = list_peek(bg_record->ba_mp_list);
+			if (!ba_mp) {
+				for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+					start_char[dim] = alpha_num[
+						bg_record->start[dim]];
+				start_char[dim] = '\0';
+				request->save_name = xstrdup(start_char);
+			} else
+				request->save_name = xstrdup(ba_mp->coord_str);
 
 			rc = SLURM_SUCCESS;
 			goto finished;
 		}
 		/* lets see if we can combine some small ones */
-		if (bg_record->node_cnt < cnodes) {
+		if (bg_record->cnode_cnt < cnodes) {
 			char bitstring[BITSIZE];
 			bitstr_t *bitstr = NULL;
 			int num_over = 0;
-			int num_cnodes = bg_record->node_cnt;
-			int rec_bp_bit = bit_ffs(bg_record->bitmap);
+			int num_cnodes = bg_record->cnode_cnt;
+			int rec_mp_bit = bit_ffs(bg_record->mp_bitmap);
 
-			if (curr_bp_bit != rec_bp_bit) {
+			if (curr_mp_bit != rec_mp_bit) {
 				/* Got a different node than
 				 * previously, since the list should
 				 * be in order of nodes for small blocks
 				 * just clear here since the last node
 				 * doesn't have any more. */
-				curr_bp_bit = rec_bp_bit;
-				bit_nclear(ionodes, 0, (bg_conf->numpsets-1));
+				curr_mp_bit = rec_mp_bit;
+				bit_nclear(ionodes, 0,
+					   (bg_conf->ionodes_per_mp-1));
 				total_cnode_cnt = 0;
 			}
 
@@ -823,10 +878,11 @@ static int _breakup_blocks(List block_list, List new_blocks,
 				list_iterator_reset(bit_itr);
 			}
 			if (!bitstr) {
-				bit_nclear(ionodes, 0, (bg_conf->numpsets-1));
+				bit_nclear(ionodes, 0,
+					   (bg_conf->ionodes_per_mp-1));
 				bit_or(ionodes, bg_record->ionode_bitmap);
 				total_cnode_cnt = num_cnodes =
-					bg_record->node_cnt;
+					bg_record->cnode_cnt;
 			} else
 				total_cnode_cnt += num_cnodes;
 
@@ -834,15 +890,26 @@ static int _breakup_blocks(List block_list, List new_blocks,
 			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 				info("combine adding %s %s %d got %d set "
 				     "ionodes %s total is %s",
-				     bg_record->bg_block_id, bg_record->nodes,
+				     bg_record->bg_block_id, bg_record->mp_str,
 				     num_cnodes, total_cnode_cnt,
-				     bg_record->ionodes, bitstring);
+				     bg_record->ionode_str, bitstring);
 			if (total_cnode_cnt == cnodes) {
-				request->save_name = xstrdup_printf(
-					"%c%c%c",
-					alpha_num[bg_record->start[X]],
-					alpha_num[bg_record->start[Y]],
-					alpha_num[bg_record->start[Z]]);
+				ba_mp_t *ba_mp = NULL;
+				if (bg_record->ba_mp_list)
+					ba_mp = list_peek(
+						bg_record->ba_mp_list);
+				if (!ba_mp) {
+					for (dim=0; dim<SYSTEM_DIMENSIONS;
+					     dim++)
+						start_char[dim] = alpha_num[
+							bg_record->start[dim]];
+					start_char[dim] = '\0';
+					request->save_name =
+						xstrdup(start_char);
+				} else
+					request->save_name =
+						xstrdup(ba_mp->coord_str);
+
 				if (!my_block_list) {
 					rc = SLURM_SUCCESS;
 					goto finished;
@@ -863,6 +930,17 @@ static int _breakup_blocks(List block_list, List new_blocks,
 	}
 
 	if (bg_record) {
+		ba_mp_t *ba_mp = NULL;
+		if (bg_record->ba_mp_list)
+			ba_mp = list_peek(bg_record->ba_mp_list);
+		if (!ba_mp) {
+			for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+				start_char[dim] = alpha_num[
+					bg_record->start[dim]];
+			start_char[dim] = '\0';
+			request->save_name = xstrdup(start_char);
+		} else
+			request->save_name = xstrdup(ba_mp->coord_str);
 		/* It appears we don't need this original record
 		 * anymore, just work off the copy if indeed it is a copy. */
 
@@ -895,17 +973,14 @@ static int _breakup_blocks(List block_list, List new_blocks,
 		/* } */
 
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+			char tmp_char[256];
 			format_node_name(bg_record, tmp_char,
 					 sizeof(tmp_char));
 			info("going to split %s, %s",
 			     bg_record->bg_block_id,
 			     tmp_char);
 		}
-		request->save_name = xstrdup_printf(
-			"%c%c%c",
-			alpha_num[bg_record->start[X]],
-			alpha_num[bg_record->start[Y]],
-			alpha_num[bg_record->start[Z]]);
+
 		if (!my_block_list) {
 			rc = SLURM_SUCCESS;
 			goto finished;
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.h b/src/plugins/select/bluegene/bg_dynamic_block.h
similarity index 94%
rename from src/plugins/select/bluegene/plugin/dynamic_block.h
rename to src/plugins/select/bluegene/bg_dynamic_block.h
index e440e121b..82469889d 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.h
+++ b/src/plugins/select/bluegene/bg_dynamic_block.h
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,10 +40,11 @@
 #ifndef _BLUEGENE_DYNAMIC_BLOCK_H_
 #define _BLUEGENE_DYNAMIC_BLOCK_H_
 
-#include "bluegene.h"
+#include "bg_core.h"
 
 extern List create_dynamic_block(List block_list,
-				 ba_request_t *request, List my_block_list,
+				 select_ba_request_t *request,
+				 List my_block_list,
 				 bool track_down_nodes);
 
 extern bg_record_t *create_small_record(bg_record_t *bg_record,
diff --git a/src/plugins/select/bluegene/bg_enums.h b/src/plugins/select/bluegene/bg_enums.h
new file mode 100644
index 000000000..b47ee524c
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_enums.h
@@ -0,0 +1,173 @@
+/*****************************************************************************\
+ *  bg_enums.h - hearder file containing enums for the Blue Gene/Q plugin.
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef ATTACH_BGL_H	/* Test for attach_bgl.h on BGL */
+#ifndef ATTACH_BG_H	/* Test for attach_bg.h on BGP */
+#define ATTACH_BGL_H	/* Replacement for attach_bgl.h on BGL */
+#define ATTACH_BG_H	/* Replacement for attach_bg.h on BGP */
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_BG_FILES
+
+/* MPI Debug support */
+typedef struct {
+	const char * host_name;        /* Something we can pass to inet_addr */
+	const char * executable_name;  /* The name of the image */
+	int    pid;                    /* The pid of the process */
+} MPIR_PROCDESC;
+
+# ifdef HAVE_BG_L_P
+# include "rm_api.h"
+# endif
+
+#elif defined HAVE_BG_L_P
+typedef char *   pm_partition_id_t;
+typedef int      rm_connection_type_t;
+typedef int      rm_partition_mode_t;
+typedef int      rm_partition_state_t;
+typedef void *   rm_partition_t;
+typedef char *   rm_BGL_t;
+typedef char *   rm_BG_t;
+typedef char *   rm_component_id_t;
+typedef rm_component_id_t rm_bp_id_t;
+typedef int      rm_BP_state_t;
+typedef char *   rm_job_list_t;
+#endif
+
+#ifdef HAVE_BGL
+typedef rm_BGL_t my_bluegene_t;
+#define PARTITION_ALREADY_DEFINED -6
+#elif defined HAVE_BGP
+typedef rm_BG_t my_bluegene_t;
+#else
+typedef void * my_bluegene_t;
+#endif
+
+typedef enum bg_layout_type {
+	LAYOUT_STATIC,  /* no overlaps, except for full system block
+			   blocks never change */
+	LAYOUT_OVERLAP, /* overlaps permitted, must be defined in
+			   bluegene.conf file */
+	LAYOUT_DYNAMIC	/* slurm will make all blocks */
+} bg_layout_t;
+
+typedef enum {
+	BG_BLOCK_FREE = 0,  // Block is free
+	BG_BLOCK_ALLOCATED, // Block is allocated (reserved either
+			    // right before booting or right before free
+	BG_BLOCK_BUSY,      // Block is Busy
+	BG_BLOCK_BOOTING,   // Block is booting
+	BG_BLOCK_INITED,    // Block is initialized
+	BG_BLOCK_REBOOTING, // Block is rebooting
+	BG_BLOCK_TERM,      // Block is terminating
+	BG_BLOCK_NAV,       // Block state is undefined
+} bg_block_status_t;
+
+typedef enum {
+        BG_JOB_SETUP = 0,   //!< Job is setting up.
+        BG_JOB_LOADING,     //!< Job is loading.
+        BG_JOB_STARTING,    //!< Job is starting.
+        BG_JOB_RUNNING,     //!< Job is running.
+        BG_JOB_CLEANUP,     //!< Job is ending.
+        BG_JOB_TERMINATED,  //!< Job is terminated.
+        BG_JOB_ERROR        //!< Job is in error status.
+} bg_job_status_t;
+
+#define BG_BLOCK_ERROR_FLAG    0x1000  // Block is in error
+
+
+#define BG_SWITCH_NONE         0x0000
+#define BG_SWITCH_OUT          0x0001
+#define BG_SWITCH_IN           0x0002
+#define BG_SWITCH_OUT_PASS     0x0004
+#define BG_SWITCH_IN_PASS      0x0008
+#define BG_SWITCH_WRAPPED      0x0003 /* just wrap used */
+#define BG_SWITCH_PASS_FLAG    0x0010 /* flag for marking a midplane
+				       * with a passthough used */
+#define BG_SWITCH_PASS_USED    0x000C /* passthough ports used */
+#define BG_SWITCH_PASS         0x001C /* just passthough used */
+#define BG_SWITCH_WRAPPED_PASS 0x001F /* all ports are in use, but no torus */
+#define BG_SWITCH_TORUS        0x000F /* all ports are in use in a torus */
+#define BG_SWITCH_START        0x0200 /* modified from the start list */
+
+/*
+ * Total time to boot a bglblock should not exceed
+ * BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
+ * (BG_INCR_BLOCK_BOOT * base partition count).
+ * For example, if BG_MIN_BLOCK_BOOT=300, BG_MIN_BLOCK_BOOT=200,
+ * BG_INCR_BLOCK_BOOT=20 and there are 4 blocks being booted,
+ * wait up to 580 seconds (300 + 200 (20 * 4)).
+ */
+
+#define BG_FREE_PREVIOUS_BLOCK 300 	/* time in seconds */
+#define BG_MIN_BLOCK_BOOT  300		/* time in seconds */
+#define BG_INCR_BLOCK_BOOT 20		/* time in seconds per BP */
+
+#define MAX_PTHREAD_RETRIES  1
+#define BLOCK_ERROR_STATE    -3
+#define ADMIN_ERROR_STATE    -4
+#define BUFSIZE 4096
+#define BITSIZE 128
+
+#define BLOCK_MAGIC 0x3afd
+
+#define REMOVE_USER_ERR  -1
+#define REMOVE_USER_NONE  0
+#define REMOVE_USER_FOUND 2
+
+typedef enum {
+	BG_ERROR_INVALID_STATE = 100,
+	BG_ERROR_BLOCK_NOT_FOUND,
+	BG_ERROR_BOOT_ERROR,
+	BG_ERROR_JOB_NOT_FOUND,
+	BG_ERROR_MP_NOT_FOUND,
+	BG_ERROR_SWITCH_NOT_FOUND,
+	BG_ERROR_BLOCK_ALREADY_DEFINED,
+	BG_ERROR_JOB_ALREADY_DEFINED,
+	BG_ERROR_CONNECTION_ERROR,
+	BG_ERROR_INTERNAL_ERROR,
+	BG_ERROR_INVALID_INPUT,
+	BG_ERROR_INCONSISTENT_DATA,
+	BG_ERROR_NO_IOBLOCK_CONNECTED,
+} bg_errno_t;
+
+#endif	/* #ifndef ATTACH_BG_H */
+#endif	/* #ifndef ATTACH_BGL_H */
diff --git a/src/plugins/select/bluegene/plugin/jobinfo.c b/src/plugins/select/bluegene/bg_job_info.c
similarity index 67%
rename from src/plugins/select/bluegene/plugin/jobinfo.c
rename to src/plugins/select/bluegene/bg_job_info.c
index 0b621bcf3..0a84ef436 100644
--- a/src/plugins/select/bluegene/plugin/jobinfo.c
+++ b/src/plugins/select/bluegene/bg_job_info.c
@@ -1,13 +1,13 @@
 /*****************************************************************************\
- *  jobinfo.c - functions used for the select_jobinfo_t structure
+ *  bg_job_info.c - functions used for the select_jobinfo_t structure
  *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,8 +37,7 @@
 \*****************************************************************************/
 
 #include "src/common/slurm_xlator.h"
-#include "bluegene.h"
-#include "jobinfo.h"
+#include "bg_core.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
@@ -61,14 +60,20 @@ extern select_jobinfo_t *alloc_select_jobinfo()
 {
 	int i;
 	select_jobinfo_t *jobinfo = xmalloc(sizeof(struct select_jobinfo));
-	for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+	jobinfo->dim_cnt = 0; /* This will be setup later */
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
 		jobinfo->geometry[i] = (uint16_t) NO_VAL;
+		jobinfo->conn_type[i] = (uint16_t) NO_VAL;
 	}
-	jobinfo->conn_type = (uint16_t) NO_VAL;
 	jobinfo->reboot = (uint16_t) NO_VAL;
 	jobinfo->rotate = (uint16_t) NO_VAL;
 	jobinfo->magic = JOBINFO_MAGIC;
-	jobinfo->node_cnt = NO_VAL;
+	jobinfo->block_cnode_cnt = NO_VAL;
+	jobinfo->cnode_cnt = NO_VAL;
+
+	/* This bitstr is created when used. */
+	//jobinfo->units_used = bit_alloc(bg_conf->mp_cnode_cnt);
+
 	/* Remainder of structure is already NULL fulled */
 
 	return jobinfo;
@@ -87,13 +92,15 @@ extern int free_select_jobinfo(select_jobinfo_t *jobinfo)
 			return EINVAL;
 		}
 		jobinfo->magic = 0;
+		jobinfo->bg_record = NULL;
 		xfree(jobinfo->bg_block_id);
-		xfree(jobinfo->nodes);
-		xfree(jobinfo->ionodes);
+		xfree(jobinfo->mp_str);
+		xfree(jobinfo->ionode_str);
 		xfree(jobinfo->blrtsimage);
 		xfree(jobinfo->linuximage);
 		xfree(jobinfo->mloaderimage);
 		xfree(jobinfo->ramdiskimage);
+		FREE_NULL_BITMAP(jobinfo->units_used);
 		xfree(jobinfo);
 	}
 	return rc;
@@ -111,29 +118,38 @@ extern int set_select_jobinfo(select_jobinfo_t *jobinfo,
 	uint16_t *uint16 = (uint16_t *) data;
 	uint32_t *uint32 = (uint32_t *) data;
 	char *tmp_char = (char *) data;
+	bg_record_t *bg_record = (bg_record_t *) data;
 	uint32_t new_size;
+	uint16_t first_conn_type;
 
-	if (jobinfo == NULL) {
-		error("set_select_jobinfo: jobinfo not set");
-		return SLURM_ERROR;
-	}
+	xassert(jobinfo);
 
 	if (jobinfo->magic != JOBINFO_MAGIC) {
 		error("set_select_jobinfo: jobinfo magic bad");
 		return SLURM_ERROR;
 	}
 
+	if (!jobinfo->dim_cnt)
+		jobinfo->dim_cnt = SYSTEM_DIMENSIONS;
+
 	switch (data_type) {
+	case SELECT_JOBDATA_DIM_CNT:
+		jobinfo->dim_cnt = *uint16;
+		break;
 	case SELECT_JOBDATA_GEOMETRY:
 		new_size = 1;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+		first_conn_type = jobinfo->conn_type[0];
+		for (i=0; i<jobinfo->dim_cnt; i++) {
 			jobinfo->geometry[i] = uint16[i];
 			new_size *= uint16[i];
+
+			/* Make sure the conn type is correct with the
+			 * new count */
+			if ((new_size > 1)
+			    && (first_conn_type >= SELECT_SMALL))
+				jobinfo->conn_type[i] = SELECT_TORUS;
 		}
 
-		/* Make sure the conn type is correct with the new count */
-		if ((new_size > 1) && (jobinfo->conn_type == SELECT_SMALL))
-			jobinfo->conn_type = SELECT_TORUS;
 		break;
 	case SELECT_JOBDATA_REBOOT:
 		jobinfo->reboot = *uint16;
@@ -142,32 +158,59 @@ extern int set_select_jobinfo(select_jobinfo_t *jobinfo,
 		jobinfo->rotate = *uint16;
 		break;
 	case SELECT_JOBDATA_CONN_TYPE:
-		jobinfo->conn_type = *uint16;
+		for (i=0; i<jobinfo->dim_cnt; i++)
+			jobinfo->conn_type[i] = uint16[i];
 		break;
 	case SELECT_JOBDATA_BLOCK_ID:
 		/* we xfree() any preset value to avoid a memory leak */
 		xfree(jobinfo->bg_block_id);
 		jobinfo->bg_block_id = xstrdup(tmp_char);
 		break;
+	case SELECT_JOBDATA_BLOCK_NODE_CNT:
+		jobinfo->block_cnode_cnt = *uint32;
+		break;
+	case SELECT_JOBDATA_BLOCK_PTR:
+		jobinfo->bg_record = bg_record;
+		xfree(jobinfo->bg_block_id);
+		if (bg_record) {
+			jobinfo->bg_block_id = xstrdup(bg_record->bg_block_id);
+			jobinfo->block_cnode_cnt = bg_record->cnode_cnt;
+		} else {
+			jobinfo->bg_block_id = xstrdup("unassigned");
+			jobinfo->block_cnode_cnt = 0;
+		}
+		break;
 	case SELECT_JOBDATA_NODES:
-		xfree(jobinfo->nodes);
-		jobinfo->nodes = xstrdup(tmp_char);
+		xfree(jobinfo->mp_str);
+		jobinfo->mp_str = xstrdup(tmp_char);
 		break;
 	case SELECT_JOBDATA_IONODES:
-		xfree(jobinfo->ionodes);
-		jobinfo->ionodes = xstrdup(tmp_char);
+		xfree(jobinfo->ionode_str);
+		if (tmp_char) {
+#ifdef HAVE_BGQ
+			jobinfo->dim_cnt = 5;
+			for (i=0; i<jobinfo->dim_cnt; i++) {
+				jobinfo->start_loc[i] =
+					select_char2coord(tmp_char[i]);
+			}
+#else
+			jobinfo->dim_cnt = SYSTEM_DIMENSIONS;
+#endif
+			jobinfo->ionode_str = xstrdup(tmp_char);
+		} else
+			jobinfo->dim_cnt = SYSTEM_DIMENSIONS;
+
 		break;
 	case SELECT_JOBDATA_NODE_CNT:
-		jobinfo->node_cnt = *uint32;
-#ifdef HAVE_BG_L_P
+		jobinfo->cnode_cnt = *uint32;
 		/* Make sure the conn type is correct with the new count */
-		if ((bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)
-		    || (jobinfo->node_cnt < bg_conf->bp_node_cnt)) {
-			if (jobinfo->conn_type < SELECT_SMALL)
-				jobinfo->conn_type = SELECT_SMALL;
-		} else if (jobinfo->conn_type >= SELECT_SMALL)
-			jobinfo->conn_type = SELECT_TORUS;
-#endif
+		if ((bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)
+		    || (jobinfo->cnode_cnt < bg_conf->mp_cnode_cnt)) {
+			if (jobinfo->conn_type[0] < SELECT_SMALL)
+				jobinfo->conn_type[0] = SELECT_SMALL;
+		} else if (jobinfo->conn_type[0] >= SELECT_SMALL)
+			for (i=0; i<SYSTEM_DIMENSIONS; i++)
+				jobinfo->conn_type[i] = SELECT_TORUS;
 		break;
 	case SELECT_JOBDATA_ALTERED:
 		jobinfo->altered = *uint16;
@@ -192,6 +235,10 @@ extern int set_select_jobinfo(select_jobinfo_t *jobinfo,
 		xfree(jobinfo->ramdiskimage);
 		jobinfo->ramdiskimage = xstrdup(tmp_char);
 		break;
+	case SELECT_JOBDATA_START_LOC:
+		for (i=0; i<jobinfo->dim_cnt; i++)
+			jobinfo->start_loc[i] = uint16[i];
+		break;
 	default:
 		debug("set_select_jobinfo: data_type %d invalid",
 		      data_type);
@@ -212,20 +259,25 @@ extern int get_select_jobinfo(select_jobinfo_t *jobinfo,
 	int i, rc = SLURM_SUCCESS;
 	uint16_t *uint16 = (uint16_t *) data;
 	uint32_t *uint32 = (uint32_t *) data;
+	bg_record_t **bg_record = (bg_record_t **) data;
 	char **tmp_char = (char **) data;
 
-	if (jobinfo == NULL) {
-		error("get_jobinfo: jobinfo not set");
-		return SLURM_ERROR;
-	}
+	xassert(jobinfo);
+
 	if (jobinfo->magic != JOBINFO_MAGIC) {
 		error("get_jobinfo: jobinfo magic bad");
 		return SLURM_ERROR;
 	}
 
+	if (!jobinfo->dim_cnt)
+		jobinfo->dim_cnt = SYSTEM_DIMENSIONS;
+
 	switch (data_type) {
+	case SELECT_JOBDATA_DIM_CNT:
+		*uint16 = jobinfo->dim_cnt;
+		break;
 	case SELECT_JOBDATA_GEOMETRY:
-		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+		for (i=0; i<jobinfo->dim_cnt; i++) {
 			uint16[i] = jobinfo->geometry[i];
 		}
 		break;
@@ -236,7 +288,8 @@ extern int get_select_jobinfo(select_jobinfo_t *jobinfo,
 		*uint16 = jobinfo->rotate;
 		break;
 	case SELECT_JOBDATA_CONN_TYPE:
-		*uint16 = jobinfo->conn_type;
+		for (i=0; i<jobinfo->dim_cnt; i++)
+			uint16[i] = jobinfo->conn_type[i];
 		break;
 	case SELECT_JOBDATA_BLOCK_ID:
 		if ((jobinfo->bg_block_id == NULL)
@@ -245,22 +298,28 @@ extern int get_select_jobinfo(select_jobinfo_t *jobinfo,
 		else
 			*tmp_char = xstrdup(jobinfo->bg_block_id);
 		break;
+	case SELECT_JOBDATA_BLOCK_NODE_CNT:
+		*uint32 = jobinfo->block_cnode_cnt;
+		break;
+	case SELECT_JOBDATA_BLOCK_PTR:
+		*bg_record = jobinfo->bg_record;
+		break;
 	case SELECT_JOBDATA_NODES:
-		if ((jobinfo->nodes == NULL)
-		    ||  (jobinfo->nodes[0] == '\0'))
+		if ((jobinfo->mp_str == NULL)
+		    ||  (jobinfo->mp_str[0] == '\0'))
 			*tmp_char = NULL;
 		else
-			*tmp_char = xstrdup(jobinfo->nodes);
+			*tmp_char = xstrdup(jobinfo->mp_str);
 		break;
 	case SELECT_JOBDATA_IONODES:
-		if ((jobinfo->ionodes == NULL)
-		    ||  (jobinfo->ionodes[0] == '\0'))
+		if ((jobinfo->ionode_str == NULL)
+		    ||  (jobinfo->ionode_str[0] == '\0'))
 			*tmp_char = NULL;
 		else
-			*tmp_char = xstrdup(jobinfo->ionodes);
+			*tmp_char = xstrdup(jobinfo->ionode_str);
 		break;
 	case SELECT_JOBDATA_NODE_CNT:
-		*uint32 = jobinfo->node_cnt;
+		*uint32 = jobinfo->cnode_cnt;
 		break;
 	case SELECT_JOBDATA_ALTERED:
 		*uint16 = jobinfo->altered;
@@ -293,6 +352,11 @@ extern int get_select_jobinfo(select_jobinfo_t *jobinfo,
 		else
 			*tmp_char = xstrdup(jobinfo->ramdiskimage);
 		break;
+	case SELECT_JOBDATA_START_LOC:
+		for (i=0; i<jobinfo->dim_cnt; i++) {
+			uint16[i] = jobinfo->start_loc[i];
+		}
+		break;
 	default:
 		debug2("get_jobinfo data_type %d invalid",
 		       data_type);
@@ -309,7 +373,6 @@ extern int get_select_jobinfo(select_jobinfo_t *jobinfo,
 extern select_jobinfo_t *copy_select_jobinfo(select_jobinfo_t *jobinfo)
 {
 	struct select_jobinfo *rc = NULL;
-	int i;
 
 	if (jobinfo == NULL)
 		;
@@ -317,22 +380,28 @@ extern select_jobinfo_t *copy_select_jobinfo(select_jobinfo_t *jobinfo)
 		error("copy_jobinfo: jobinfo magic bad");
 	else {
 		rc = xmalloc(sizeof(struct select_jobinfo));
-		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
-			rc->geometry[i] = (uint16_t)jobinfo->geometry[i];
-		}
-		rc->conn_type = jobinfo->conn_type;
+		rc->dim_cnt = jobinfo->dim_cnt;
+		memcpy(rc->geometry, jobinfo->geometry, sizeof(rc->geometry));
+		memcpy(rc->conn_type, jobinfo->conn_type,
+		       sizeof(rc->conn_type));
+		memcpy(rc->start_loc, jobinfo->start_loc,
+		       sizeof(rc->start_loc));
 		rc->reboot = jobinfo->reboot;
 		rc->rotate = jobinfo->rotate;
+		rc->bg_record = jobinfo->bg_record;
 		rc->bg_block_id = xstrdup(jobinfo->bg_block_id);
 		rc->magic = JOBINFO_MAGIC;
-		rc->nodes = xstrdup(jobinfo->nodes);
-		rc->ionodes = xstrdup(jobinfo->ionodes);
-		rc->node_cnt = jobinfo->node_cnt;
+		rc->mp_str = xstrdup(jobinfo->mp_str);
+		rc->ionode_str = xstrdup(jobinfo->ionode_str);
+		rc->block_cnode_cnt = jobinfo->block_cnode_cnt;
+		rc->cnode_cnt = jobinfo->cnode_cnt;
 		rc->altered = jobinfo->altered;
 		rc->blrtsimage = xstrdup(jobinfo->blrtsimage);
 		rc->linuximage = xstrdup(jobinfo->linuximage);
 		rc->mloaderimage = xstrdup(jobinfo->mloaderimage);
 		rc->ramdiskimage = xstrdup(jobinfo->ramdiskimage);
+		if (jobinfo->units_used)
+			rc->units_used = bit_copy(jobinfo->units_used);
 	}
 
 	return rc;
@@ -351,23 +420,74 @@ extern int  pack_select_jobinfo(select_jobinfo_t *jobinfo, Buf buffer,
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	int dims = slurmdb_setup_cluster_dims();
 
-	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		if (jobinfo) {
+			if (jobinfo->dim_cnt)
+				dims = jobinfo->dim_cnt;
+			else if (bg_recover != NOT_FROM_CONTROLLER)
+				xassert(0);
+
+			pack16(dims, buffer);
 			/* NOTE: If new elements are added here, make sure to
 			 * add equivalant pack of zeros below for NULL
 			 * pointer */
 			for (i=0; i<dims; i++) {
 				pack16(jobinfo->geometry[i], buffer);
+				pack16(jobinfo->conn_type[i], buffer);
+				pack16(jobinfo->start_loc[i], buffer);
 			}
-			pack16(jobinfo->conn_type, buffer);
 			pack16(jobinfo->reboot, buffer);
 			pack16(jobinfo->rotate, buffer);
 
-			pack32(jobinfo->node_cnt, buffer);
+			pack32(jobinfo->block_cnode_cnt, buffer);
+			pack32(jobinfo->cnode_cnt, buffer);
 
 			packstr(jobinfo->bg_block_id, buffer);
-			packstr(jobinfo->nodes, buffer);
-			packstr(jobinfo->ionodes, buffer);
+			packstr(jobinfo->mp_str, buffer);
+			packstr(jobinfo->ionode_str, buffer);
+
+			packstr(jobinfo->blrtsimage, buffer);
+			packstr(jobinfo->linuximage, buffer);
+			packstr(jobinfo->mloaderimage, buffer);
+			packstr(jobinfo->ramdiskimage, buffer);
+			pack_bit_fmt(jobinfo->units_used, buffer);
+		} else {
+			pack16(dims, buffer);
+			/* pack space for 3 positions for geo
+			 * conn_type and start_loc and then, reboot, and rotate
+			 */
+			for (i=0; i<((dims*3)+2); i++) {
+				pack16((uint16_t) 0, buffer);
+			}
+			pack32((uint32_t) 0, buffer); //block_cnode_cnt
+			pack32((uint32_t) 0, buffer); //cnode_cnt
+			packnull(buffer); //bg_block_id
+			packnull(buffer); //nodes
+			packnull(buffer); //ionodes
+
+			packnull(buffer); //blrts
+			packnull(buffer); //linux
+			packnull(buffer); //mloader
+			packnull(buffer); //ramdisk
+			packnull(buffer); //units_used
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		if (jobinfo) {
+			/* NOTE: If new elements are added here, make sure to
+			 * add equivalant pack of zeros below for NULL
+			 * pointer */
+			for (i=0; i<dims; i++) {
+				pack16(jobinfo->geometry[i], buffer);
+			}
+			pack16(jobinfo->conn_type[0], buffer);
+			pack16(jobinfo->reboot, buffer);
+			pack16(jobinfo->rotate, buffer);
+
+			pack32(jobinfo->cnode_cnt, buffer);
+
+			packstr(jobinfo->bg_block_id, buffer);
+			packstr(jobinfo->mp_str, buffer);
+			packstr(jobinfo->ionode_str, buffer);
 
 			packstr(jobinfo->blrtsimage, buffer);
 			packstr(jobinfo->linuximage, buffer);
@@ -399,16 +519,16 @@ extern int  pack_select_jobinfo(select_jobinfo_t *jobinfo, Buf buffer,
 			for (i=0; i<SYSTEM_DIMENSIONS; i++) {
 				pack16(jobinfo->geometry[i], buffer);
 			}
-			pack16(jobinfo->conn_type, buffer);
+			pack16(jobinfo->conn_type[0], buffer);
 			pack16(jobinfo->reboot, buffer);
 			pack16(jobinfo->rotate, buffer);
 
-			pack32(jobinfo->node_cnt, buffer);
+			pack32(jobinfo->cnode_cnt, buffer);
 			pack32(0, buffer);
 
 			packstr(jobinfo->bg_block_id, buffer);
-			packstr(jobinfo->nodes, buffer);
-			packstr(jobinfo->ionodes, buffer);
+			packstr(jobinfo->mp_str, buffer);
+			packstr(jobinfo->ionode_str, buffer);
 
 			if (cluster_flags & CLUSTER_FLAG_BGL)
 				packstr(jobinfo->blrtsimage, buffer);
@@ -456,24 +576,65 @@ extern int unpack_select_jobinfo(select_jobinfo_t **jobinfo_pptr, Buf buffer,
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	int dims = slurmdb_setup_cluster_dims();
 	select_jobinfo_t *jobinfo = xmalloc(sizeof(struct select_jobinfo));
+	char *bit_char = NULL;
 	*jobinfo_pptr = jobinfo;
 
 	jobinfo->magic = JOBINFO_MAGIC;
-	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack16(&jobinfo->dim_cnt, buffer);
+
+		xassert(jobinfo->dim_cnt);
+		dims = jobinfo->dim_cnt;
+
+		for (i=0; i<dims; i++) {
+			safe_unpack16(&(jobinfo->geometry[i]), buffer);
+			safe_unpack16(&(jobinfo->conn_type[i]), buffer);
+			safe_unpack16(&(jobinfo->start_loc[i]), buffer);
+		}
+
+		safe_unpack16(&(jobinfo->reboot), buffer);
+		safe_unpack16(&(jobinfo->rotate), buffer);
+
+		safe_unpack32(&(jobinfo->block_cnode_cnt), buffer);
+		safe_unpack32(&(jobinfo->cnode_cnt), buffer);
+
+		safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->mp_str), &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->ionode_str), &uint32_tmp,
+				       buffer);
+
+		safe_unpackstr_xmalloc(&(jobinfo->blrtsimage),
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->linuximage), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->mloaderimage), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->ramdiskimage), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&bit_char, &uint32_tmp, buffer);
+		if (bit_char) {
+			jobinfo->units_used = bit_alloc(bg_conf->mp_cnode_cnt);
+			bit_unfmt(jobinfo->units_used, bit_char);
+			xfree(bit_char);
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		jobinfo->dim_cnt = dims;
 		for (i=0; i<dims; i++) {
 			safe_unpack16(&(jobinfo->geometry[i]), buffer);
 		}
 
-		safe_unpack16(&(jobinfo->conn_type), buffer);
+		safe_unpack16(&(jobinfo->conn_type[0]), buffer);
 		safe_unpack16(&(jobinfo->reboot), buffer);
 		safe_unpack16(&(jobinfo->rotate), buffer);
 
-		safe_unpack32(&(jobinfo->node_cnt), buffer);
+		safe_unpack32(&(jobinfo->cnode_cnt), buffer);
 
 		safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp,
 				       buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->nodes), &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->ionodes), &uint32_tmp,
+		safe_unpackstr_xmalloc(&(jobinfo->mp_str), &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->ionode_str), &uint32_tmp,
 				       buffer);
 
 		safe_unpackstr_xmalloc(&(jobinfo->blrtsimage),
@@ -485,20 +646,21 @@ extern int unpack_select_jobinfo(select_jobinfo_t **jobinfo_pptr, Buf buffer,
 		safe_unpackstr_xmalloc(&(jobinfo->ramdiskimage), &uint32_tmp,
 				       buffer);
 	} else {
+		jobinfo->dim_cnt = dims;
 		for (i=0; i<dims; i++) {
 			safe_unpack16(&(jobinfo->geometry[i]), buffer);
 		}
-		safe_unpack16(&(jobinfo->conn_type), buffer);
+		safe_unpack16(&(jobinfo->conn_type[0]), buffer);
 		safe_unpack16(&(jobinfo->reboot), buffer);
 		safe_unpack16(&(jobinfo->rotate), buffer);
 
-		safe_unpack32(&(jobinfo->node_cnt), buffer);
+		safe_unpack32(&(jobinfo->cnode_cnt), buffer);
 		safe_unpack32(&uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp,
 				       buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->nodes), &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->ionodes), &uint32_tmp,
+		safe_unpackstr_xmalloc(&(jobinfo->mp_str), &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(jobinfo->ionode_str), &uint32_tmp,
 				       buffer);
 
 		if (cluster_flags & CLUSTER_FLAG_BGL)
@@ -533,6 +695,7 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 	int i;
 	char *tmp_image = "default";
 	char *header = "CONNECT REBOOT ROTATE GEOMETRY BLOCK_ID";
+	bool print_x = 1;
 
 	if (buf == NULL) {
 		error("sprint_jobinfo: buf is null");
@@ -555,15 +718,18 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 		return buf;
 	}
 
+	if (mode == SELECT_PRINT_GEOMETRY)
+		print_x = 0;
+
 	if (jobinfo->geometry[0] == (uint16_t) NO_VAL) {
-		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
-			if (geo)
+		for (i=0; i<jobinfo->dim_cnt; i++) {
+			if (geo && print_x)
 				xstrcat(geo, "x0");
 			else
 				xstrcat(geo, "0");
 		}
-	} else
-		geo = give_geo(jobinfo->geometry);
+	} else if (mode != SELECT_PRINT_START_LOC)
+		geo = give_geo(jobinfo->geometry, jobinfo->dim_cnt, print_x);
 
 	switch (mode) {
 	case SELECT_PRINT_HEAD:
@@ -572,7 +738,7 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 	case SELECT_PRINT_DATA:
 		snprintf(buf, size,
 			 "%7.7s %6.6s %6.6s    %s %-16s",
-			 conn_type_string(jobinfo->conn_type),
+			 conn_type_string(jobinfo->conn_type[0]),
 			 _yes_no_string(jobinfo->reboot),
 			 _yes_no_string(jobinfo->rotate),
 			 geo,
@@ -582,7 +748,7 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 		snprintf(buf, size,
 			 "Connection=%s Reboot=%s Rotate=%s "
 			 "Geometry=%s",
-			 conn_type_string(jobinfo->conn_type),
+			 conn_type_string(jobinfo->conn_type[0]),
 			 _yes_no_string(jobinfo->reboot),
 			 _yes_no_string(jobinfo->rotate),
 			 geo);
@@ -591,7 +757,7 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 		snprintf(buf, size,
 			 "Connection=%s Reboot=%s Rotate=%s "
 			 "Geometry=%s Block_ID=%s",
-			 conn_type_string(jobinfo->conn_type),
+			 conn_type_string(jobinfo->conn_type[0]),
 			 _yes_no_string(jobinfo->reboot),
 			 _yes_no_string(jobinfo->rotate),
 			 geo,
@@ -601,15 +767,15 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 		snprintf(buf, size, "%s", jobinfo->bg_block_id);
 		break;
 	case SELECT_PRINT_NODES:
-		if (jobinfo->ionodes && jobinfo->ionodes[0])
+		if (jobinfo->ionode_str && jobinfo->ionode_str[0])
 			snprintf(buf, size, "%s[%s]",
-				 jobinfo->nodes, jobinfo->ionodes);
+				 jobinfo->mp_str, jobinfo->ionode_str);
 		else
-			snprintf(buf, size, "%s", jobinfo->nodes);
+			snprintf(buf, size, "%s", jobinfo->mp_str);
 		break;
 	case SELECT_PRINT_CONNECTION:
 		snprintf(buf, size, "%s",
-			 conn_type_string(jobinfo->conn_type));
+			 conn_type_string(jobinfo->conn_type[0]));
 		break;
 	case SELECT_PRINT_REBOOT:
 		snprintf(buf, size, "%s",
@@ -642,6 +808,11 @@ extern char *sprint_select_jobinfo(select_jobinfo_t *jobinfo,
 			tmp_image = jobinfo->ramdiskimage;
 		snprintf(buf, size, "%s", tmp_image);
 		break;
+	case SELECT_PRINT_START_LOC:
+		xfree(geo);
+		geo = give_geo(jobinfo->start_loc, jobinfo->dim_cnt, 0);
+		snprintf(buf, size, "%s", geo);
+		break;
 	default:
 		error("sprint_jobinfo: bad mode %d", mode);
 		if (size > 0)
@@ -663,6 +834,7 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 	char *tmp_image = "default";
 	char *buf = NULL;
 	char *header = "CONNECT REBOOT ROTATE GEOMETRY BLOCK_ID";
+	bool print_x = 1;
 
 	if ((mode != SELECT_PRINT_DATA)
 	    && jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
@@ -679,15 +851,18 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 		return buf;
 	}
 
+	if (mode == SELECT_PRINT_GEOMETRY)
+		print_x = 0;
+
 	if (jobinfo->geometry[0] == (uint16_t) NO_VAL) {
 		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
-			if (geo)
+			if (geo && print_x)
 				xstrcat(geo, "x0");
 			else
 				xstrcat(geo, "0");
 		}
-	} else
-		geo = give_geo(jobinfo->geometry);
+	} else if (mode != SELECT_PRINT_START_LOC)
+		geo = give_geo(jobinfo->geometry, jobinfo->dim_cnt, print_x);
 
 	switch (mode) {
 	case SELECT_PRINT_HEAD:
@@ -696,7 +871,7 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 	case SELECT_PRINT_DATA:
 		xstrfmtcat(buf,
 			   "%7.7s %6.6s %6.6s    %s %-16s",
-			   conn_type_string(jobinfo->conn_type),
+			   conn_type_string(jobinfo->conn_type[0]),
 			   _yes_no_string(jobinfo->reboot),
 			   _yes_no_string(jobinfo->rotate),
 			   geo,
@@ -706,7 +881,7 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 		xstrfmtcat(buf,
 			   "Connection=%s Reboot=%s Rotate=%s "
 			   "Geometry=%s Block_ID=%s",
-			   conn_type_string(jobinfo->conn_type),
+			   conn_type_string(jobinfo->conn_type[0]),
 			   _yes_no_string(jobinfo->reboot),
 			   _yes_no_string(jobinfo->rotate),
 			   geo,
@@ -716,15 +891,15 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 		xstrfmtcat(buf, "%s", jobinfo->bg_block_id);
 		break;
 	case SELECT_PRINT_NODES:
-		if (jobinfo->ionodes && jobinfo->ionodes[0])
+		if (jobinfo->ionode_str && jobinfo->ionode_str[0])
 			xstrfmtcat(buf, "%s[%s]",
-				   jobinfo->nodes, jobinfo->ionodes);
+				   jobinfo->mp_str, jobinfo->ionode_str);
 		else
-			xstrfmtcat(buf, "%s", jobinfo->nodes);
+			xstrfmtcat(buf, "%s", jobinfo->mp_str);
 		break;
 	case SELECT_PRINT_CONNECTION:
 		xstrfmtcat(buf, "%s",
-			   conn_type_string(jobinfo->conn_type));
+			   conn_type_string(jobinfo->conn_type[0]));
 		break;
 	case SELECT_PRINT_REBOOT:
 		xstrfmtcat(buf, "%s",
@@ -757,6 +932,11 @@ extern char *xstrdup_select_jobinfo(select_jobinfo_t *jobinfo, int mode)
 			tmp_image = jobinfo->ramdiskimage;
 		xstrfmtcat(buf, "%s", tmp_image);
 		break;
+	case SELECT_PRINT_START_LOC:
+		xfree(geo);
+		geo = give_geo(jobinfo->start_loc, jobinfo->dim_cnt, 0);
+		xstrfmtcat(buf, "%s", geo);
+		break;
 	default:
 		error("xstrdup_jobinfo: bad mode %d", mode);
 	}
diff --git a/src/plugins/select/bluegene/plugin/jobinfo.h b/src/plugins/select/bluegene/bg_job_info.h
similarity index 81%
rename from src/plugins/select/bluegene/plugin/jobinfo.h
rename to src/plugins/select/bluegene/bg_job_info.h
index 7e0338972..418328772 100644
--- a/src/plugins/select/bluegene/plugin/jobinfo.h
+++ b/src/plugins/select/bluegene/bg_job_info.h
@@ -1,14 +1,14 @@
 /*****************************************************************************\
- *  jobinfo.h - definitions of functions used for the select_jobinfo_t
+ *  bg_job_info.h - definitions of functions used for the select_jobinfo_t
  *              structure
  *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,23 +45,39 @@
 #define JOBINFO_MAGIC 0x83ac
 
 struct select_jobinfo {
-	uint16_t geometry[SYSTEM_DIMENSIONS];	/* node count in various
-						 * dimensions, e.g. XYZ */
-	uint16_t conn_type;	/* see enum connection_type */
-	uint16_t reboot;	/* reboot block before starting job */
-	uint16_t rotate;	/* permit geometry rotation if set */
-	char *bg_block_id;	/* Blue Gene block ID */
-	uint16_t magic;		/* magic number */
-	char *nodes;            /* node list given for estimated start */
-	char *ionodes;          /* for bg to tell which ionodes of a small
-				 * block the job is running */
-	uint32_t node_cnt;      /* how many cnodes in block running job */
 	uint16_t altered;       /* see if we have altered this job
 				 * or not yet */
+	bg_record_t *bg_record; /* For internal use only DO NOT PACK */
+	char *bg_block_id;	/* Blue Gene block ID */
 	char *blrtsimage;       /* BlrtsImage for this block */
+	uint32_t block_cnode_cnt; /* how many cnodes in the block
+				   * This is used to say we are
+				   * running a sub-block job. */
+	uint32_t cnode_cnt;     /* how many cnodes in job running on block */
+	uint16_t conn_type[HIGHEST_DIMENSIONS];	/* see enum connection_type */
+	uint16_t dim_cnt;       /* how many dimensions this
+				 * represents in most cases this will
+				 * be SYSTEM_DIMENSIONS, but in the
+				 * case of a sub-block allocation
+				 * this will be the number of
+				 * dimensions a cnode represent.  In
+				 * Q that is 5 while a midplane is
+				 * only representing 4. */
+	uint16_t geometry[HIGHEST_DIMENSIONS];	/* node count in various
+						 * dimensions, e.g. AXYZ */
+	char *ionode_str;       /* for bg to tell which ionodes of a small
+				 * block the job is running */
 	char *linuximage;       /* LinuxImage for this block */
+	uint16_t magic;		/* magic number */
+	char *mp_str;           /* midplane list given for estimated start */
 	char *mloaderimage;     /* mloaderImage for this block */
 	char *ramdiskimage;     /* RamDiskImage for this block */
+	uint16_t reboot;	/* reboot block before starting job */
+	uint16_t rotate;	/* permit geometry rotation if set */
+	uint16_t start_loc[HIGHEST_DIMENSIONS];	/* where in block we
+						 * are starting from */
+	bitstr_t *units_used;   /* Used for a step the cnodes used.
+				 */
 };
 
 /* allocate storage for a select job credential
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/bg_job_place.c
similarity index 72%
rename from src/plugins/select/bluegene/plugin/bg_job_place.c
rename to src/plugins/select/bluegene/bg_job_place.c
index 88c8031d0..74d0b92b4 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/bg_job_place.c
@@ -8,7 +8,7 @@
  *  Written by Dan Phung <phung4@llnl.gov> and Morris Jette <jette1@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -43,40 +43,31 @@
 #include "src/common/node_select.h"
 #include "src/common/uid.h"
 #include "src/slurmctld/trigger_mgr.h"
-#include "bluegene.h"
-#include "dynamic_block.h"
+#include "bg_core.h"
+#include "bg_dynamic_block.h"
+#include "bg_read_config.h"
 
 #define _DEBUG 0
 #define MAX_GROUPS 128
 
-#define SWAP(a,b,t)				\
-	_STMT_START {				\
-		(t) = (a);			\
-		(a) = (b);			\
-		(b) = (t);			\
-	} _STMT_END
-
 
 pthread_mutex_t create_dynamic_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-static void _rotate_geo(uint16_t *req_geometry, int rot_cnt);
 static int _get_user_groups(uint32_t user_id, uint32_t group_id,
 			    gid_t *groups, int max_groups, int *ngroups);
 static int _test_image_perms(char *image_name, List image_list,
 			     struct job_record* job_ptr);
-#ifdef HAVE_BGL
-static int _check_images(struct job_record* job_ptr,
-			 char **blrtsimage, char **linuximage,
-			 char **mloaderimage, char **ramdiskimage);
-#else
+
+static bool _check_rotate_geo(uint16_t *match_geo,
+			      uint16_t *req_geo, bool rotate);
+
 static int _check_images(struct job_record* job_ptr,
-			 char **linuximage,
-			 char **mloaderimage, char **ramdiskimage);
-#endif
+			 select_ba_request_t *request);
+
 static bg_record_t *_find_matching_block(List block_list,
 					 struct job_record* job_ptr,
 					 bitstr_t* slurm_block_bitmap,
-					 ba_request_t *request,
+					 select_ba_request_t *request,
 					 uint32_t max_cpus,
 					 int *allow, int check_image,
 					 int overlap_check,
@@ -87,7 +78,7 @@ static int _check_for_booted_overlapping_blocks(
 	bg_record_t *bg_record, int overlap_check, List overlapped_list,
 	uint16_t query_mode);
 static int _dynamically_request(List block_list, int *blocks_added,
-				ba_request_t *request,
+				select_ba_request_t *request,
 				char *user_req_nodes,
 				uint16_t query_mode);
 static int _find_best_block_match(List block_list, int *blocks_added,
@@ -98,29 +89,6 @@ static int _find_best_block_match(List block_list, int *blocks_added,
 				  bg_record_t** found_bg_record,
 				  uint16_t query_mode, int avail_cpus);
 static int _sync_block_lists(List full_list, List incomp_list);
-static void _build_select_struct(struct job_record *job_ptr,
-				 bitstr_t *bitmap, uint32_t node_cnt);
-static List _get_preemptables(uint16_t query_mode, bg_record_t *bg_record,
-			      List preempt_jobs);
-
-/* Rotate a 3-D geometry array through its six permutations */
-static void _rotate_geo(uint16_t *req_geometry, int rot_cnt)
-{
-	uint16_t tmp;
-
-	switch (rot_cnt) {
-	case 0:		/* ABC -> ACB */
-	case 2:		/* CAB -> CBA */
-	case 4:		/* BCA -> BAC */
-		SWAP(req_geometry[Y], req_geometry[Z], tmp);
-		break;
-	case 1:		/* ACB -> CAB */
-	case 3:		/* CBA -> BCA */
-	case 5:		/* BAC -> ABC */
-		SWAP(req_geometry[X], req_geometry[Y], tmp);
-		break;
-	}
-}
 
 /*
  * Get a list of groups associated with a specific user_id
@@ -204,72 +172,102 @@ static int _test_image_perms(char *image_name, List image_list,
 	return allow;
 }
 
-#ifdef HAVE_BGL
-static int _check_images(struct job_record* job_ptr,
-			 char **blrtsimage, char **linuximage,
-			 char **mloaderimage, char **ramdiskimage)
+static bool _check_rotate_geo(uint16_t *match_geo,
+			      uint16_t *req_geo, bool rotate)
+{
+	bool match = false;
+	int rot_cnt = 0;	/* attempt 6 rotations  */
+	int dim = 0;
+#ifdef HAVE_BGQ
+	int max_rotate=24;
 #else
-	static int _check_images(struct job_record* job_ptr,
-				 char **linuximage,
-				 char **mloaderimage, char **ramdiskimage)
+	int max_rotate=6;
 #endif
+	for (rot_cnt=0; rot_cnt<max_rotate; rot_cnt++) {
+		for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+			if (match_geo[dim] < req_geo[dim])
+				break;
+		}
+
+		if (dim >= SYSTEM_DIMENSIONS) {
+			match = true;
+			break;
+		}
+
+		if (!rotate)
+			break;
+		ba_rotate_geo(req_geo, rot_cnt);
+	}
+
+	return match;
+}
+
+static int _check_images(struct job_record* job_ptr,
+			 select_ba_request_t *request)
 {
 	int allow = 0;
 
 #ifdef HAVE_BGL
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_BLRTS_IMAGE, blrtsimage);
+			   SELECT_JOBDATA_BLRTS_IMAGE, &request->blrtsimage);
 
-	if (*blrtsimage) {
-		allow = _test_image_perms(*blrtsimage, bg_conf->blrts_list,
+	if (request->blrtsimage) {
+		allow = _test_image_perms(request->blrtsimage,
+					  bg_conf->blrts_list,
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed to use BlrtsImage %s",
 			      job_ptr->user_id, job_ptr->group_id,
-			      *blrtsimage);
+			      request->blrtsimage);
 			return SLURM_ERROR;
 
 		}
 	}
 #endif
+
+#ifdef HAVE_BG_L_P
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_LINUX_IMAGE, linuximage);
-	if (*linuximage) {
-		allow = _test_image_perms(*linuximage, bg_conf->linux_list,
+			   SELECT_JOBDATA_LINUX_IMAGE, &request->linuximage);
+	if (request->linuximage) {
+		allow = _test_image_perms(request->linuximage,
+					  bg_conf->linux_list,
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed to use LinuxImage %s",
-			      job_ptr->user_id, job_ptr->group_id, *linuximage);
+			      job_ptr->user_id, job_ptr->group_id,
+			      request->linuximage);
 			return SLURM_ERROR;
 		}
 	}
 
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_MLOADER_IMAGE, mloaderimage);
-	if (*mloaderimage) {
-		allow = _test_image_perms(*mloaderimage,
-					  bg_conf->mloader_list,
+			   SELECT_JOBDATA_RAMDISK_IMAGE,
+			   &request->ramdiskimage);
+	if (request->ramdiskimage) {
+		allow = _test_image_perms(request->ramdiskimage,
+					  bg_conf->ramdisk_list,
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed "
-			      "to use MloaderImage %s",
+			      "to use RamDiskImage %s",
 			      job_ptr->user_id, job_ptr->group_id,
-			      *mloaderimage);
+			      request->ramdiskimage);
 			return SLURM_ERROR;
 		}
 	}
-
+#endif
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_RAMDISK_IMAGE, ramdiskimage);
-	if (*ramdiskimage) {
-		allow = _test_image_perms(*ramdiskimage,
-					  bg_conf->ramdisk_list,
+			   SELECT_JOBDATA_MLOADER_IMAGE,
+			   &request->mloaderimage);
+	if (request->mloaderimage) {
+		allow = _test_image_perms(request->mloaderimage,
+					  bg_conf->mloader_list,
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed "
-			      "to use RamDiskImage %s",
+			      "to use MloaderImage %s",
 			      job_ptr->user_id, job_ptr->group_id,
-			      *ramdiskimage);
+			      request->mloaderimage);
 			return SLURM_ERROR;
 		}
 	}
@@ -280,7 +278,7 @@ static int _check_images(struct job_record* job_ptr,
 static bg_record_t *_find_matching_block(List block_list,
 					 struct job_record* job_ptr,
 					 bitstr_t* slurm_block_bitmap,
-					 ba_request_t *request,
+					 select_ba_request_t *request,
 					 uint32_t max_cpus,
 					 int *allow, int check_image,
 					 int overlap_check,
@@ -309,19 +307,10 @@ static bg_record_t *_find_matching_block(List block_list,
 		if (bg_record->job_ptr)
 			bg_record->job_running = bg_record->job_ptr->job_id;
 
-		/* block is messed up some how (BLOCK_ERROR_STATE)
-		 * ignore it or if state == RM_PARTITION_ERROR */
-		if ((bg_record->job_running == BLOCK_ERROR_STATE)
-		    || (bg_record->state == RM_PARTITION_ERROR)) {
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
-				info("block %s is in an error "
-				     "state (can't use)",
-				     bg_record->bg_block_id);
-			continue;
-		} else if ((bg_conf->layout_mode == LAYOUT_DYNAMIC)
-			   || ((!SELECT_IS_CHECK_FULL_SET(query_mode)
-				|| SELECT_IS_MODE_RUN_NOW(query_mode))
-			       && (bg_conf->layout_mode != LAYOUT_DYNAMIC))) {
+		if ((bg_conf->layout_mode == LAYOUT_DYNAMIC)
+		    || ((!SELECT_IS_CHECK_FULL_SET(query_mode)
+			 || SELECT_IS_MODE_RUN_NOW(query_mode))
+			&& (bg_conf->layout_mode != LAYOUT_DYNAMIC))) {
 			if (bg_record->free_cnt) {
 				/* No reason to look at a block that
 				   is being freed unless we are
@@ -334,6 +323,18 @@ static bg_record_t *_find_matching_block(List block_list,
 					     "job(s), skipping",
 					     bg_record->bg_block_id);
 				continue;
+			} else if ((bg_record->job_running == BLOCK_ERROR_STATE)
+				   || (bg_record->state
+				       & BG_BLOCK_ERROR_FLAG)) {
+				/* block is messed up some how
+				 * (BLOCK_ERROR_STATE_FLAG)
+				 * ignore it or if state == BG_BLOCK_ERROR */
+				if (bg_conf->slurm_debug_flags
+				    & DEBUG_FLAG_BG_PICK)
+					info("block %s is in an error "
+					     "state (can't use)",
+					     bg_record->bg_block_id);
+				continue;
 			} else if ((bg_record->job_running != NO_JOB_RUNNING)
 				   && (bg_record->job_running
 				       != job_ptr->job_id)) {
@@ -360,12 +361,14 @@ static bg_record_t *_find_matching_block(List block_list,
 			/* We use the proccessor count per block here
 			   mostly to see if we can run on a smaller block.
 			*/
-			convert_num_unit((float)bg_record->cpu_cnt, tmp_char,
-					 sizeof(tmp_char), UNIT_NONE);
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+				convert_num_unit((float)bg_record->cpu_cnt,
+						 tmp_char,
+						 sizeof(tmp_char), UNIT_NONE);
 				info("block %s CPU count (%s) not suitable",
 				     bg_record->bg_block_id,
 				     tmp_char);
+			}
 			continue;
 		}
 
@@ -376,11 +379,18 @@ static bg_record_t *_find_matching_block(List block_list,
 		 * drained, allocated to some other job, or in some
 		 * SLURM block not available to this job.
 		 */
-		if (!bit_super_set(bg_record->bitmap, slurm_block_bitmap)) {
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
+		if (!bit_super_set(bg_record->mp_bitmap, slurm_block_bitmap)) {
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+				char *temp = bitmap2node_name(
+					bg_record->mp_bitmap);
+				char *temp2 = bitmap2node_name(
+					slurm_block_bitmap);
 				info("bg block %s has nodes not "
-				     "usable by this job",
-				     bg_record->bg_block_id);
+				     "usable by this job %s %s",
+				     bg_record->bg_block_id, temp, temp2);
+				xfree(temp);
+				xfree(temp2);
+			}
 			continue;
 		}
 
@@ -389,14 +399,13 @@ static bg_record_t *_find_matching_block(List block_list,
 		 */
 		if (job_ptr->details->req_node_bitmap
 		    && (!bit_super_set(job_ptr->details->req_node_bitmap,
-				       bg_record->bitmap))) {
+				       bg_record->mp_bitmap))) {
 			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 				info("bg block %s lacks required nodes",
 				     bg_record->bg_block_id);
 			continue;
 		}
 
-
 		if (_check_for_booted_overlapping_blocks(
 			    block_list, itr, bg_record,
 			    overlap_check, overlapped_list, query_mode))
@@ -411,6 +420,7 @@ static bg_record_t *_find_matching_block(List block_list,
 				continue;
 			}
 #endif
+#ifdef HAVE_BG_L_P
 			if (request->linuximage &&
 			    strcasecmp(request->linuximage,
 				       bg_record->linuximage)) {
@@ -418,40 +428,40 @@ static bg_record_t *_find_matching_block(List block_list,
 				continue;
 			}
 
-			if (request->mloaderimage &&
-			    strcasecmp(request->mloaderimage,
-				       bg_record->mloaderimage)) {
-				*allow = 1;
-				continue;
-			}
-
 			if (request->ramdiskimage &&
 			    strcasecmp(request->ramdiskimage,
 				       bg_record->ramdiskimage)) {
 				*allow = 1;
 				continue;
 			}
+#endif
+			if (request->mloaderimage &&
+			    strcasecmp(request->mloaderimage,
+				       bg_record->mloaderimage)) {
+				*allow = 1;
+				continue;
+			}
 		}
 
 		/***********************************************/
 		/* check the connection type specified matches */
 		/***********************************************/
-		if ((request->conn_type != bg_record->conn_type)
-		    && (request->conn_type != SELECT_NAV)) {
-#ifndef HAVE_BGL
-			if (request->conn_type >= SELECT_SMALL) {
+		if ((request->conn_type[0] != bg_record->conn_type[0])
+		    && (request->conn_type[0] != SELECT_NAV)) {
+#ifdef HAVE_BGP
+			if (request->conn_type[0] >= SELECT_SMALL) {
 				/* we only want to reboot blocks if
 				   they have to be so skip booted
 				   blocks if in small state
 				*/
 				if (check_image
 				    && (bg_record->state
-					== RM_PARTITION_READY)) {
+					== BG_BLOCK_INITED)) {
 					*allow = 1;
 					continue;
 				}
 				goto good_conn_type;
-			} else if (bg_record->conn_type >= SELECT_SMALL) {
+			} else if (bg_record->conn_type[0] >= SELECT_SMALL) {
 				/* since we already checked to see if
 				   the cpus were good this means we are
 				   looking for a block in a range that
@@ -465,40 +475,21 @@ static bg_record_t *_find_matching_block(List block_list,
 				info("bg block %s conn-type not usable "
 				     "asking for %s bg_record is %s",
 				     bg_record->bg_block_id,
-				     conn_type_string(request->conn_type),
-				     conn_type_string(bg_record->conn_type));
+				     conn_type_string(request->conn_type[0]),
+				     conn_type_string(bg_record->conn_type[0]));
 			continue;
 		}
-#ifndef HAVE_BGL
+#ifdef HAVE_BGP
 	good_conn_type:
 #endif
 		/*****************************************/
 		/* match up geometry as "best" possible  */
 		/*****************************************/
-		if (request->geometry[X] == (uint16_t)NO_VAL)
-			;	/* Geometry not specified */
-		else {	/* match requested geometry */
-			bool match = false;
-			int rot_cnt = 0;	/* attempt six rotations  */
-
-			for (rot_cnt=0; rot_cnt<6; rot_cnt++) {
-				if ((bg_record->geo[X] >= request->geometry[X])
-				    && (bg_record->geo[Y]
-					>= request->geometry[Y])
-				    && (bg_record->geo[Z]
-					>= request->geometry[Z])) {
-					match = true;
-					break;
-				}
-				if (!request->rotate)
-					break;
-
-				_rotate_geo(request->geometry, rot_cnt);
-			}
+		if ((request->geometry[0] != (uint16_t)NO_VAL)
+		    && (!_check_rotate_geo(bg_record->geo, request->geometry,
+					   request->rotate)))
+			continue;
 
-			if (!match)
-				continue;	/* Not usable */
-		}
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 			info("we found one! %s", bg_record->bg_block_id);
 		break;
@@ -594,9 +585,9 @@ static int _check_for_booted_overlapping_blocks(
 			 */
 			if (bg_conf->layout_mode == LAYOUT_OVERLAP
 			    && ((overlap_check == 0 && bg_record->state
-				 != RM_PARTITION_READY)
+				 != BG_BLOCK_INITED)
 				|| (overlap_check == 1 && found_record->state
-				    != RM_PARTITION_FREE))) {
+				    != BG_BLOCK_FREE))) {
 
 				if (!is_test) {
 					rc = 1;
@@ -610,11 +601,11 @@ static int _check_for_booted_overlapping_blocks(
 				 && (bg_conf->layout_mode != LAYOUT_DYNAMIC)))
 			    && ((found_record->job_running != NO_JOB_RUNNING)
 				|| (found_record->state
-				    == RM_PARTITION_ERROR))) {
+				    & BG_BLOCK_ERROR_FLAG))) {
 				if ((found_record->job_running
 				     == BLOCK_ERROR_STATE)
 				    || (found_record->state
-					== RM_PARTITION_ERROR))
+					& BG_BLOCK_ERROR_FLAG))
 					error("can't use %s, "
 					      "overlapping block %s "
 					      "is in an error state.",
@@ -707,7 +698,8 @@ static int _check_for_booted_overlapping_blocks(
 								job_ptr->job_id,
 								-1,
 								(uint16_t)
-								NO_VAL)) {
+								NO_VAL,
+								false)) {
 							error("Couldn't "
 							      "requeue job %u, "
 							      "failing it: %s",
@@ -720,6 +712,7 @@ static int _check_for_booted_overlapping_blocks(
 								 job_id);
 						}
 					}
+
 					free_block_list(NO_VAL, tmp_list, 0, 0);
 					list_destroy(tmp_list);
 				}
@@ -741,7 +734,7 @@ static int _check_for_booted_overlapping_blocks(
  */
 
 static int _dynamically_request(List block_list, int *blocks_added,
-				ba_request_t *request,
+				select_ba_request_t *request,
 				char *user_req_nodes,
 				uint16_t query_mode)
 {
@@ -752,10 +745,7 @@ static int _dynamically_request(List block_list, int *blocks_added,
 	ListIterator itr = NULL;
 	int rc = SLURM_ERROR;
 	int create_try = 0;
-	uint16_t start_geo[SYSTEM_DIMENSIONS];
 
-	memcpy(start_geo, request->geometry,
-	       sizeof(uint16_t)*SYSTEM_DIMENSIONS);
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("going to create %d", request->size);
 	list_of_lists = list_create(NULL);
@@ -795,17 +785,15 @@ static int _dynamically_request(List block_list, int *blocks_added,
 
 		/* 1- try empty space
 		   2- we see if we can create one in the
-		   unused bps
+		   unused mps
 		   3- see if we can create one in the non
-		   job running bps
+		   job running mps
 		*/
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 			info("trying with %d", create_try);
-		if ((new_blocks = create_dynamic_block(block_list,
-						       request, temp_list,
-						       true))) {
+		if ((new_blocks = create_dynamic_block(
+			     block_list, request, temp_list, true))) {
 			bg_record_t *bg_record = NULL;
-
 			while ((bg_record = list_pop(new_blocks))) {
 				if (block_exist_in_list(block_list, bg_record))
 					destroy_bg_record(bg_record);
@@ -827,7 +815,7 @@ static int _dynamically_request(List block_list, int *blocks_added,
 					list_append(block_list, bg_record);
 					(*blocks_added) = 1;
 				} else {
-					if (configure_block(bg_record)
+					if (bridge_block_create(bg_record)
 					    == SLURM_ERROR) {
 						destroy_bg_record(bg_record);
 						error("_dynamically_request: "
@@ -841,11 +829,8 @@ static int _dynamically_request(List block_list, int *blocks_added,
 					(*blocks_added) = 1;
 				}
 			}
-
 			list_destroy(new_blocks);
 			if (!*blocks_added) {
-				memcpy(request->geometry, start_geo,
-				       sizeof(uint16_t)*SYSTEM_DIMENSIONS);
 				rc = SLURM_ERROR;
 				continue;
 			}
@@ -858,10 +843,6 @@ static int _dynamically_request(List block_list, int *blocks_added,
 			rc = SLURM_ERROR;
 			break;
 		}
-
-		memcpy(request->geometry, start_geo,
-		       sizeof(uint16_t)*SYSTEM_DIMENSIONS);
-
 	}
 	list_iterator_destroy(itr);
 
@@ -893,33 +874,31 @@ static int _find_best_block_match(List block_list,
 {
 	bg_record_t *bg_record = NULL;
 	uint16_t req_geometry[SYSTEM_DIMENSIONS];
-	uint16_t conn_type, rotate, target_size = 0;
+	uint16_t target_size = 0;
 	uint32_t req_procs = job_ptr->details->min_cpus;
-	ba_request_t request;
-	int i;
+	select_ba_request_t request;
+	int i, dim;
 	int overlap_check = 0;
 	int allow = 0;
 	int check_image = 1;
 	uint32_t max_cpus = job_ptr->details->max_cpus;
 	char tmp_char[256];
 	static int total_cpus = 0;
-#ifdef HAVE_BGL
-	char *blrtsimage = NULL;        /* BlrtsImage for this request */
-#endif
-	char *linuximage = NULL;        /* LinuxImage for this request */
-	char *mloaderimage = NULL;      /* mloaderImage for this request */
-	char *ramdiskimage = NULL;      /* RamDiskImage for this request */
 	int rc = SLURM_SUCCESS;
 	int create_try = 0;
 	List overlapped_list = NULL;
 	bool is_test = SELECT_IS_TEST(query_mode);
 
-	if (!total_cpus)
-		total_cpus = DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z]
-			* bg_conf->cpus_per_bp;
+	if (!total_cpus) {
+		int *cluster_dims = select_g_ba_get_dims();
+		total_cpus = 1;
+		for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+			total_cpus *= cluster_dims[dim];
+		total_cpus *= bg_conf->cpus_per_mp;
+	}
 
 	if (req_nodes > max_nodes) {
-		error("can't run this job max bps is %u asking for %u",
+		error("can't run this job max mps is %u asking for %u",
 		      max_nodes, req_nodes);
 		return SLURM_ERROR;
 	}
@@ -936,76 +915,68 @@ static int _find_best_block_match(List block_list,
 		return SLURM_ERROR;
 	}
 
+	memset(&request, 0, sizeof(select_ba_request_t));
+
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_CONN_TYPE, &conn_type);
+			   SELECT_JOBDATA_CONN_TYPE, &request.conn_type);
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_GEOMETRY, &req_geometry);
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_ROTATE, &rotate);
+			   SELECT_JOBDATA_ROTATE, &request.rotate);
 
-#ifdef HAVE_BGL
-	if ((rc = _check_images(job_ptr, &blrtsimage, &linuximage,
-				&mloaderimage, &ramdiskimage)) == SLURM_ERROR)
-		goto end_it;
-#else
-	if ((rc = _check_images(job_ptr, &linuximage,
-				&mloaderimage, &ramdiskimage)) == SLURM_ERROR)
+	if ((rc = _check_images(job_ptr, &request)) == SLURM_ERROR)
 		goto end_it;
-#endif
 
-	if (req_geometry[X] != 0 && req_geometry[X] != (uint16_t)NO_VAL) {
+	if (req_geometry[0] != 0 && req_geometry[0] != (uint16_t)NO_VAL) {
+		char tmp_geo[SYSTEM_DIMENSIONS+1];
+
 		target_size = 1;
-		for (i=0; i<SYSTEM_DIMENSIONS; i++)
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
 			target_size *= req_geometry[i];
+			tmp_geo[i] = alpha_num[req_geometry[i]];
+		}
+		tmp_geo[i] = '\0';
+
 		if (target_size != min_nodes) {
 			debug2("min_nodes not set correctly %u "
-			       "should be %u from %u%u%u",
+			       "should be %u from %s",
 			       min_nodes, target_size,
-			       req_geometry[X],
-			       req_geometry[Y],
-			       req_geometry[Z]);
+			       tmp_geo);
 			min_nodes = target_size;
 		}
 		if (!req_nodes)
 			req_nodes = min_nodes;
 	} else {
-		req_geometry[X] = (uint16_t)NO_VAL;
+		req_geometry[0] = (uint16_t)NO_VAL;
 		target_size = min_nodes;
 	}
 
 	*found_bg_record = NULL;
 	allow = 0;
 
-	memset(&request, 0, sizeof(ba_request_t));
-
-	for(i=0; i<SYSTEM_DIMENSIONS; i++)
-		request.geometry[i] = req_geometry[i];
+	memcpy(request.geometry, req_geometry, sizeof(req_geometry));
 
 	request.deny_pass = (uint16_t)NO_VAL;
 	request.save_name = NULL;
-	request.elongate_geos = NULL;
 	request.size = target_size;
 	request.procs = req_procs;
-	request.conn_type = conn_type;
-	request.rotate = rotate;
-	request.elongate = rotate;
+	request.elongate = request.rotate;
+	/* request.start[0] = 1; */
+	/* request.start[1] = 2; */
+	/* request.start[2] = 0; */
+	/* request.start[3] = 2; */
+	/* request.start_req = 1; */
 
-#ifdef HAVE_BGL
-	request.blrtsimage = blrtsimage;
-#endif
-	request.linuximage = linuximage;
-	request.mloaderimage = mloaderimage;
-	request.ramdiskimage = ramdiskimage;
 	if (job_ptr->details->req_node_bitmap)
-		request.avail_node_bitmap = job_ptr->details->req_node_bitmap;
+		request.avail_mp_bitmap = job_ptr->details->req_node_bitmap;
 	else
-		request.avail_node_bitmap = slurm_block_bitmap;
+		request.avail_mp_bitmap = slurm_block_bitmap;
 
 	/* since we only look at procs after this and not nodes we
 	 *  need to set a max_cpus if given
 	 */
 	if (max_cpus == (uint32_t)NO_VAL)
-		max_cpus = max_nodes * bg_conf->cpus_per_bp;
+		max_cpus = max_nodes * bg_conf->cpus_per_mp;
 
 	while (1) {
 		/* Here we are creating a list of all the blocks that
@@ -1026,6 +997,10 @@ static int _find_best_block_match(List block_list,
 						 overlap_check,
 						 overlapped_list,
 						 query_mode);
+		/* this could get altered in _find_matching_block so we
+		   need to reset it */
+		memcpy(request.geometry, req_geometry, sizeof(req_geometry));
+
 		if (!bg_record && overlapped_list
 		    && list_count(overlapped_list)) {
 			ListIterator itr =
@@ -1045,11 +1020,12 @@ static int _find_best_block_match(List block_list,
 
 		/* set the bitmap and do other allocation activities */
 		if (bg_record) {
+#ifdef HAVE_BG_L_P
 			if (!is_test) {
-				if (check_block_bp_states(
+				if (bridge_block_check_mp_states(
 					    bg_record->bg_block_id, 1)
 				    != SLURM_SUCCESS) {
-					/* check_block_bp_states will
+					/* check_block_mp_states will
 					   set this block in the main
 					   list to an error state, but
 					   we aren't looking
@@ -1060,7 +1036,7 @@ static int _find_best_block_match(List block_list,
 					*/
 					bg_record->job_running =
 						BLOCK_ERROR_STATE;
-					bg_record->state = RM_PARTITION_ERROR;
+					bg_record->state |= BG_BLOCK_ERROR_FLAG;
 					error("_find_best_block_match: Picked "
 					      "block (%s) had some issues with "
 					      "hardware, trying a different "
@@ -1069,19 +1045,15 @@ static int _find_best_block_match(List block_list,
 					continue;
 				}
 			}
+#endif
 			format_node_name(bg_record, tmp_char, sizeof(tmp_char));
 
 			debug("_find_best_block_match %s <%s>",
 			      bg_record->bg_block_id, tmp_char);
-			bit_and(slurm_block_bitmap, bg_record->bitmap);
+			bit_and(slurm_block_bitmap, bg_record->mp_bitmap);
 			rc = SLURM_SUCCESS;
 			*found_bg_record = bg_record;
 			goto end_it;
-		} else {
-			/* this gets altered in _find_matching_block so we
-			   reset it */
-			for(i=0; i<SYSTEM_DIMENSIONS; i++)
-				request.geometry[i] = req_geometry[i];
 		}
 
 		/* see if we can just reset the image and reboot the block */
@@ -1150,15 +1122,11 @@ static int _find_best_block_match(List block_list,
 			list_iterator_destroy(itr);
 
 			/* Block list is already in the correct order,
-			   earliest avaliable first,
+			   earliest available first,
 			   so the job list will also be. No need to
 			   sort. */
 			while (1) {
 				bool track_down_nodes = true;
-				/* this gets altered in
-				 * create_dynamic_block so we reset it */
-				for(i=0; i<SYSTEM_DIMENSIONS; i++)
-					request.geometry[i] = req_geometry[i];
 
 				if ((bg_record = list_pop(job_list))) {
 					if (bg_record->job_ptr) {
@@ -1208,6 +1176,7 @@ static int _find_best_block_match(List block_list,
 					   node on the system.
 					*/
 					track_down_nodes = false;
+
 				if (!(new_blocks = create_dynamic_block(
 					      block_list, &request, job_list,
 					      track_down_nodes))) {
@@ -1248,17 +1217,18 @@ static int _find_best_block_match(List block_list,
 						info("Appears we are trying "
 						     "to place this job on "
 						     "the block we just "
-						     "removed.");
+						     "removed %s.",
+						     bg_record->bg_block_id);
 					/* This means we placed the job on
 					   the block we just popped off.
 					*/
 					bit_and(slurm_block_bitmap,
-						bg_record->bitmap);
+						bg_record->mp_bitmap);
 					*found_bg_record = bg_record;
 					break;
 				}
 				bit_and(slurm_block_bitmap,
-					(*found_bg_record)->bitmap);
+					(*found_bg_record)->mp_bitmap);
 
 				if (bg_record) {
 					(*found_bg_record)->job_running =
@@ -1283,12 +1253,11 @@ no_match:
 	rc = SLURM_ERROR;
 
 end_it:
-#ifdef HAVE_BGL
-	xfree(blrtsimage);
-#endif
-	xfree(linuximage);
-	xfree(mloaderimage);
-	xfree(ramdiskimage);
+
+	xfree(request.blrtsimage);
+	xfree(request.linuximage);
+	xfree(request.mloaderimage);
+	xfree(request.ramdiskimage);
 
 	return rc;
 }
@@ -1297,129 +1266,33 @@ end_it:
 static int _sync_block_lists(List full_list, List incomp_list)
 {
 	ListIterator itr;
-	ListIterator itr2;
-	bg_record_t *bg_record = NULL;
 	bg_record_t *new_record = NULL;
 	int count = 0;
 
 	itr = list_iterator_create(full_list);
-	itr2 = list_iterator_create(incomp_list);
 	while ((new_record = list_next(itr))) {
 		/* Make sure we aren't adding any block that doesn't
-		   have a block_id.
+		   have a block_id.  If the record has an original
+		   then we don't need to add either, (since it is
+		   already in the list).
 		*/
-		if (!new_record->bg_block_id)
+		if (!new_record->bg_block_id || new_record->original)
 			continue;
-		while ((bg_record = list_next(itr2))) {
-			/* There is a possiblity the job here is
-			   preempting jobs that are configuring that
-			   just started on a block overlapping the
-			   block we want to use, so we needed to
-			   recreate the deallocating block.  Checking
-			   the free_cnt will make sure we add the
-			   correct block to the mix.
-			*/
-			if (bg_record->free_cnt == new_record->free_cnt
-			    && bit_equal(bg_record->bitmap, new_record->bitmap)
-			    && bit_equal(bg_record->ionode_bitmap,
-					 new_record->ionode_bitmap)) {
-				/* now make sure the conn_type is the same for
-				   regular sized blocks */
-				if ((bg_record->node_cnt
-				     >= bg_conf->bp_node_cnt)
-				    && bg_record->conn_type
-				    != new_record->conn_type)
-					continue;
-
-				break;
-			}
-		}
-
-		if (!bg_record) {
-			list_remove(itr);
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
-				info("sync: adding %s %p",
-				     new_record->bg_block_id,
-				     new_record);
-			list_append(incomp_list, new_record);
-			last_bg_update = time(NULL);
-			count++;
-		}
-		list_iterator_reset(itr2);
+		list_remove(itr);
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
+			info("sync: adding %s %p",
+			     new_record->bg_block_id, new_record);
+		list_append(incomp_list, new_record);
+		last_bg_update = time(NULL);
+		count++;
 	}
 	list_iterator_destroy(itr);
-	list_iterator_destroy(itr2);
-	sort_bg_record_inc_size(incomp_list);
+	if (count)
+		sort_bg_record_inc_size(incomp_list);
 
 	return count;
 }
 
-/* static void _build_select_struct(struct job_record *job_ptr, */
-/*                                  bitstr_t *bitmap, uint32_t node_cnt2) */
-/* { */
-/* 	int i, j, k; */
-/* 	int first_bit, last_bit; */
-/* 	uint32_t node_cpus, total_cpus = 0, node_cnt; */
-/* 	job_resources_t *job_resrcs_ptr; */
-
-/* 	if (job_ptr->select_job) { */
-/* 		error("select_p_job_test: already have select_job"); */
-/* 		free_job_resources(&job_ptr->select_job); */
-/* 	} */
-
-/* 	node_cnt = bit_set_count(bitmap); */
-/* 	job_ptr->select_job = job_resrcs_ptr = create_job_resources(); */
-/* 	job_resrcs_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t)*node_cnt);*/
-/* 	job_resrcs_ptr->cpu_array_value = xmalloc(sizeof(uint16_t)*node_cnt);*/
-/* 	job_resrcs_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt); */
-/* 	job_resrcs_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt); */
-/* 	job_resrcs_ptr->nhosts = node_cnt; */
-/* 	job_resrcs_ptr->node_bitmap = bit_copy(bitmap); */
-/* 	if (job_resrcs_ptr->node_bitmap == NULL) */
-/* 		fatal("bit_copy malloc failure"); */
-/* 	job_resrcs_ptr->ncpus = job_ptr->num_cpus; */
-/* 	if (build_job_resources(job_resrcs_ptr, (void
- * 	*)node_record_table_ptr, 1)) */
-/* 		error("select_p_job_test: build_job_resources: %m"); */
-
-/* 	if (job_ptr->num_cpus <= bg_conf->cpus_per_bp) */
-/* 		node_cpus = job_ptr->num_cpus; */
-/* 	else */
-/* 		node_cpus = bg_conf->cpus_per_bp; */
-
-/* 	first_bit = bit_ffs(bitmap); */
-/* 	last_bit  = bit_fls(bitmap); */
-/* 	for (i=first_bit, j=0, k=-1; i<=last_bit; i++) { */
-/* 		if (!bit_test(bitmap, i)) */
-/* 			continue; */
-
-/* 		job_resrcs_ptr->cpus[j] = node_cpus; */
-/* 		if ((k == -1) || */
-/* 		    (job_resrcs_ptr->cpu_array_value[k] != node_cpus)) { */
-/* 			job_resrcs_ptr->cpu_array_cnt++; */
-/* 			job_resrcs_ptr->cpu_array_reps[++k] = 1; */
-/* 			job_resrcs_ptr->cpu_array_value[k] = node_cpus; */
-/* 		} else */
-/* 			job_resrcs_ptr->cpu_array_reps[k]++; */
-/* 		total_cpus += node_cpus; */
-/* #if 0 */
-/* 		/\* This function could be used to control allocation of */
-/* 		 * specific c-nodes for multiple job steps per job allocation.*/
-/* 		 * Such functionality is not currently support on BlueGene */
-/* 		 * systems. */
-/* 		 * Also see #ifdef HAVE_BG logic in common/job_resources.c *\/*/
-/* 		if (set_job_resources_node(job_resrcs_ptr, j)) */
-/* 			error("select_p_job_test:
- * 			set_job_resources_node: %m"); */
-/* #endif */
-/* 		j++; */
-/* 	} */
-/* 	if (job_resrcs_ptr->ncpus != total_cpus) { */
-/* 		error("select_p_job_test: ncpus mismatch %u != %u", */
-/* 		      job_resrcs_ptr->ncpus, total_cpus); */
-/* 	} */
-/* } */
-
 static void _build_select_struct(struct job_record *job_ptr,
 				 bitstr_t *bitmap, uint32_t node_cnt)
 {
@@ -1448,10 +1321,7 @@ static void _build_select_struct(struct job_record *job_ptr,
 		fatal("bit_copy malloc failure");
 
 	job_resrcs_ptr->cpu_array_cnt = 1;
-	if (job_ptr->details->min_cpus < bg_conf->cpus_per_bp)
-		job_resrcs_ptr->cpu_array_value[0] = job_ptr->details->min_cpus;
-	else
-		job_resrcs_ptr->cpu_array_value[0] = bg_conf->cpus_per_bp;
+	job_resrcs_ptr->cpu_array_value[0] = bg_conf->cpu_ratio;
 	job_resrcs_ptr->cpu_array_reps[0] = node_cnt;
 	total_cpus = bg_conf->cpu_ratio * node_cnt;
 
@@ -1540,13 +1410,16 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	int rc = SLURM_SUCCESS;
 	bg_record_t* bg_record = NULL;
 	char buf[256];
-	uint16_t conn_type = (uint16_t)NO_VAL;
+	uint16_t conn_type[SYSTEM_DIMENSIONS];
 	List block_list = NULL;
 	int blocks_added = 0;
 	time_t starttime = time(NULL);
 	uint16_t local_mode = mode;
 	int avail_cpus = num_unused_cpus;
+	int dim = 0;
 
+	for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+		conn_type[dim] = (uint16_t)NO_VAL;
 	if (preemptee_candidates && preemptee_job_list
 	    && list_count(preemptee_candidates))
 		local_mode |= SELECT_MODE_PREEMPT_FLAG;
@@ -1562,14 +1435,18 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_CONN_TYPE, &conn_type);
-	if (conn_type == SELECT_NAV) {
-		if (bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)
-			conn_type = SELECT_SMALL;
-		else if (min_nodes > 1)
-			conn_type = SELECT_TORUS;
-		else if (job_ptr->details->min_cpus < bg_conf->cpus_per_bp)
-			conn_type = SELECT_SMALL;
-
+	if (conn_type[0] == SELECT_NAV) {
+		if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)
+			conn_type[0] = SELECT_SMALL;
+		else if (min_nodes > 1) {
+			for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+				conn_type[dim] = SELECT_TORUS;
+		} else if (job_ptr->details->min_cpus < bg_conf->cpus_per_mp)
+			conn_type[0] = SELECT_SMALL;
+		else {
+			for (dim=1; dim<SYSTEM_DIMENSIONS; dim++)
+				conn_type[dim] = SELECT_NAV;
+		}
 		set_select_jobinfo(job_ptr->select_jobinfo->data,
 				   SELECT_JOBDATA_CONN_TYPE,
 				   &conn_type);
@@ -1587,36 +1464,39 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
 			      buf, sizeof(buf),
 			      SELECT_PRINT_MIXED);
+
 	debug("bluegene:submit_job: %u mode=%d %s nodes=%u-%u-%u",
 	      job_ptr->job_id, local_mode, buf,
 	      min_nodes, req_nodes, max_nodes);
+
+#ifdef HAVE_BG_L_P
+# ifdef HAVE_BGL
 	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
 			      buf, sizeof(buf),
 			      SELECT_PRINT_BLRTS_IMAGE);
-#ifdef HAVE_BGL
 	debug3("BlrtsImage=%s", buf);
+# endif
 	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
 			      buf, sizeof(buf),
 			      SELECT_PRINT_LINUX_IMAGE);
-#endif
-#ifdef HAVE_BGL
+# ifdef HAVE_BGL
 	debug3("LinuxImage=%s", buf);
-#else
+# else
 	debug3("ComputNodeImage=%s", buf);
-#endif
-
-	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
-			      buf, sizeof(buf),
-			      SELECT_PRINT_MLOADER_IMAGE);
-	debug3("MloaderImage=%s", buf);
+# endif
 	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
 			      buf, sizeof(buf),
 			      SELECT_PRINT_RAMDISK_IMAGE);
-#ifdef HAVE_BGL
+# ifdef HAVE_BGL
 	debug3("RamDiskImage=%s", buf);
-#else
+# else
 	debug3("RamDiskIoLoadImage=%s", buf);
+# endif
 #endif
+	sprint_select_jobinfo(job_ptr->select_jobinfo->data,
+			      buf, sizeof(buf),
+			      SELECT_PRINT_MLOADER_IMAGE);
+	debug3("MloaderImage=%s", buf);
 
 	/* First look at the empty space, and then remove the
 	   preemptable jobs and try again. */
@@ -1627,7 +1507,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 				    max_nodes, req_nodes,
 				    &bg_record, local_mode, avail_cpus);
 
-	if ((rc != SLURM_SUCCESS) && SELECT_IS_PREEMPT_SET(local_mode)) {
+	if (rc == SLURM_SUCCESS && SELECT_IS_PREEMPT_SET(local_mode)) {
 		ListIterator itr;
 		ListIterator job_itr;
 		bg_record_t *found_record;
@@ -1678,128 +1558,129 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	}
 
 	if (rc == SLURM_SUCCESS) {
-		if (bg_record) {
-			/* Here we see if there is a job running since
-			 * some jobs take awhile to finish we need to
-			 * make sure the time of the end is in the
-			 * future.  If it isn't (meaning it is in the
-			 * past or current time) we add 5 seconds to
-			 * it so we don't use the block immediately.
-			 */
-			if (bg_record->job_ptr
-			    && bg_record->job_ptr->end_time) {
-				if (bg_record->job_ptr->end_time <= starttime)
-					starttime += 5;
-				else
-					starttime =
-						bg_record->job_ptr->end_time;
-			} else if (bg_record->job_running == BLOCK_ERROR_STATE)
-				starttime = INFINITE;
-
-			/* make sure the job is eligible to run */
-			if (job_ptr->details->begin_time > starttime)
-				starttime = job_ptr->details->begin_time;
-
-			job_ptr->start_time = starttime;
+		if (!bg_record)
+			fatal("we got a success, but no block back");
+		/* Here we see if there is a job running since
+		 * some jobs take awhile to finish we need to
+		 * make sure the time of the end is in the
+		 * future.  If it isn't (meaning it is in the
+		 * past or current time) we add 5 seconds to
+		 * it so we don't use the block immediately.
+		 */
+		if (bg_record->job_ptr
+		    && bg_record->job_ptr->end_time) {
+			if (bg_record->job_ptr->end_time <= starttime)
+				starttime += 5;
+			else
+				starttime = bg_record->job_ptr->end_time;
+		} else if (bg_record->job_running == BLOCK_ERROR_STATE)
+			starttime = INFINITE;
+
+		/* make sure the job is eligible to run */
+		if (job_ptr->details->begin_time > starttime)
+			starttime = job_ptr->details->begin_time;
+
+		job_ptr->start_time = starttime;
+
+		set_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_NODES,
+				   bg_record->mp_str);
+		set_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_IONODES,
+				   bg_record->ionode_str);
+		if (!bg_record->bg_block_id) {
+			debug("%d can start unassigned job %u "
+			      "at %ld on %s",
+			      local_mode, job_ptr->job_id,
+			      starttime, bg_record->mp_str);
 
 			set_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_NODES,
-					   bg_record->nodes);
+					   SELECT_JOBDATA_BLOCK_PTR,
+					   NULL);
 			set_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_IONODES,
-					   bg_record->ionodes);
-			if (!bg_record->bg_block_id) {
-				debug("%d can start unassigned job %u "
-				      "at %ld on %s",
-				      local_mode, job_ptr->job_id,
-				      starttime, bg_record->nodes);
-
-				set_select_jobinfo(
-					job_ptr->select_jobinfo->data,
-					SELECT_JOBDATA_BLOCK_ID,
-					"unassigned");
+					   SELECT_JOBDATA_NODE_CNT,
+					   &bg_record->cnode_cnt);
+		} else {
+			if ((bg_record->ionode_str)
+			    && (job_ptr->part_ptr->max_share <= 1))
+				error("Small block used in "
+				      "non-shared partition");
+
+			debug("%d(%d) can start job %u "
+			      "at %ld on %s(%s) %d",
+			      local_mode, mode, job_ptr->job_id,
+			      starttime, bg_record->bg_block_id,
+			      bg_record->mp_str,
+			      SELECT_IS_MODE_RUN_NOW(local_mode));
+
+			if (SELECT_IS_MODE_RUN_NOW(local_mode)) {
+				/* Set this up to be the
+				   correct pointer since we
+				   probably are working off a
+				   copy.
+				*/
+				if (bg_record->original)
+					bg_record = bg_record->original;
 				set_select_jobinfo(
 					job_ptr->select_jobinfo->data,
-					SELECT_JOBDATA_NODE_CNT,
-					&bg_record->node_cnt);
-			} else {
-				if ((bg_record->ionodes)
-				    && (job_ptr->part_ptr->max_share <= 1))
-					error("Small block used in "
-					      "non-shared partition");
-
-				debug("%d(%d) can start job %u "
-				      "at %ld on %s(%s) %d",
-				      local_mode, mode, job_ptr->job_id,
-				      starttime, bg_record->bg_block_id,
-				      bg_record->nodes,
-				      SELECT_IS_MODE_RUN_NOW(local_mode));
-
-				if (SELECT_IS_MODE_RUN_NOW(local_mode)) {
-					set_select_jobinfo(
-						job_ptr->select_jobinfo->data,
-						SELECT_JOBDATA_BLOCK_ID,
-						bg_record->bg_block_id);
-					if (job_ptr) {
-						bg_record->job_running =
-							job_ptr->job_id;
-						bg_record->job_ptr = job_ptr;
+					SELECT_JOBDATA_BLOCK_PTR,
+					bg_record);
+				if (job_ptr) {
+					bg_record->job_running =
+						job_ptr->job_id;
+					bg_record->job_ptr = job_ptr;
 
-						job_ptr->job_state |=
-							JOB_CONFIGURING;
-						last_bg_update = time(NULL);
-					}
-				} else {
-					set_select_jobinfo(
-						job_ptr->select_jobinfo->data,
-						SELECT_JOBDATA_BLOCK_ID,
-						"unassigned");
-					/* Just to make sure we don't
-					   end up using this on
-					   another job, or we have to
-					   wait until preemption is
-					   done.
-					*/
-					bg_record->job_ptr = NULL;
-					bg_record->job_running = NO_JOB_RUNNING;
+					job_ptr->job_state |= JOB_CONFIGURING;
+					last_bg_update = time(NULL);
 				}
+			} else {
 				set_select_jobinfo(
 					job_ptr->select_jobinfo->data,
-					SELECT_JOBDATA_NODE_CNT,
-					&bg_record->node_cnt);
-			}
-			if (SELECT_IS_MODE_RUN_NOW(local_mode))
-				_build_select_struct(job_ptr,
-						     slurm_block_bitmap,
-						     bg_record->node_cnt);
-			/* set up the preempted job list */
-			if (SELECT_IS_PREEMPT_SET(local_mode)) {
-				if (*preemptee_job_list)
-					list_destroy(*preemptee_job_list);
-				*preemptee_job_list = _get_preemptables(
-					local_mode, bg_record,
-					preemptee_candidates);
+					SELECT_JOBDATA_BLOCK_PTR,
+					NULL);
+				/* Just to make sure we don't
+				   end up using this on
+				   another job, or we have to
+				   wait until preemption is
+				   done.
+				*/
+				bg_record->job_ptr = NULL;
+				bg_record->job_running = NO_JOB_RUNNING;
 			}
-			if (!bg_record->bg_block_id) {
-				/* This is a fake record so we need to
-				 * destroy it after we get the info from
-				 * it.  If it was just testing then
-				 * we added this record to the
-				 * block_list.  If this is the case
-				 * it will be handled if se sync the
-				 * lists.  But we don't want to do
-				 * that so we will set blocks_added to
-				 * 0 so it doesn't happen. */
-				if (!blocks_added) {
-					destroy_bg_record(bg_record);
-					bg_record = NULL;
-				}
-				blocks_added = 0;
+
+			set_select_jobinfo(job_ptr->select_jobinfo->data,
+					   SELECT_JOBDATA_NODE_CNT,
+					   &bg_record->cnode_cnt);
+		}
+		if (SELECT_IS_MODE_RUN_NOW(local_mode))
+			_build_select_struct(job_ptr,
+					     slurm_block_bitmap,
+					     bg_record->cnode_cnt);
+		/* set up the preempted job list */
+		if (SELECT_IS_PREEMPT_SET(local_mode)) {
+			if (*preemptee_job_list)
+				list_destroy(*preemptee_job_list);
+			*preemptee_job_list = _get_preemptables(
+				local_mode, bg_record,
+				preemptee_candidates);
+		}
+		if (!bg_record->bg_block_id) {
+			/* This is a fake record so we need to
+			 * destroy it after we get the info from
+			 * it.  If it was just testing then
+			 * we added this record to the
+			 * block_list.  If this is the case
+			 * it will be handled if se sync the
+			 * lists.  But we don't want to do
+			 * that so we will set blocks_added to
+			 * 0 so it doesn't happen. */
+			if (!blocks_added) {
+				destroy_bg_record(bg_record);
+				bg_record = NULL;
 			}
-			last_job_update = time(NULL);
-		} else {
-			error("we got a success, but no block back");
+			blocks_added = 0;
 		}
+		last_job_update = time(NULL);
 	}
 
 	if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
@@ -1809,6 +1690,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 		slurm_mutex_unlock(&block_state_mutex);
 		slurm_mutex_unlock(&create_dynamic_mutex);
 	}
+
 	list_destroy(block_list);
 	return rc;
 }
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/bg_job_place.h
similarity index 98%
rename from src/plugins/select/bluegene/plugin/bg_job_place.h
rename to src/plugins/select/bluegene/bg_job_place.h
index 1697a9dd7..8fb87df20 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.h
+++ b/src/plugins/select/bluegene/bg_job_place.h
@@ -7,7 +7,7 @@
  *  Written by Dan Phung <phung4@llnl.gov> et. al.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/bg_job_run.c
similarity index 52%
rename from src/plugins/select/bluegene/plugin/bg_job_run.c
rename to src/plugins/select/bluegene/bg_job_run.c
index ad46f42f3..5253d2456 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/bg_job_run.c
@@ -5,11 +5,12 @@
  *  $Id$
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
+ *  Written by Morris Jette <jette1@llnl.gov>, Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,7 +55,7 @@
 #include <signal.h>
 #include <unistd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/list.h"
 #include "src/common/macros.h"
@@ -62,227 +63,36 @@
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
 #include "src/slurmctld/proc_req.h"
-#include "bluegene.h"
-
-#define MAX_POLL_RETRIES    220
-#define POLL_INTERVAL        3
+#include "bg_core.h"
 
 bool deleting_old_blocks_flag = 0;
 
 enum update_op {START_OP, TERM_OP, SYNC_OP};
 
 typedef struct {
-	enum update_op op;	/* start | terminate | sync */
+	char *bg_block_id;
+	char *blrtsimage;       /* BlrtsImage for this block */
+	uint16_t conn_type[HIGHEST_DIMENSIONS]; /* needed to boot small
+						   blocks into HTC
+						   mode or not */
 	struct job_record *job_ptr;	/* pointer to job running on
 					 * block or NULL if no job */
-	uint16_t reboot;	/* reboot block before starting job */
-#ifndef HAVE_BGL
-	uint16_t conn_type;     /* needed to boot small blocks into
-				   HTC mode or not */
-#endif
-	pm_partition_id_t bg_block_id;
-	char *blrtsimage;       /* BlrtsImage for this block */
 	char *linuximage;       /* LinuxImage for this block */
 	char *mloaderimage;     /* mloaderImage for this block */
+	enum update_op op;	/* start | terminate | sync */
 	char *ramdiskimage;     /* RamDiskImage for this block */
+	uint16_t reboot;	/* reboot block before starting job */
 } bg_action_t;
 
-#ifdef HAVE_BG_FILES
-static int	_remove_job(db_job_id_t job_id, char *block_id);
-#endif
-
 static void	_destroy_bg_action(void *x);
 static int	_excise_block(List block_list,
-			      pm_partition_id_t bg_block_id,
+			      char *bg_block_id,
 			      char *nodes);
 static List	_get_all_allocated_blocks(void);
 static void *	_block_agent(void *args);
 static void	_block_op(bg_action_t *bg_action_ptr);
 static void	_start_agent(bg_action_t *bg_action_ptr);
 static void	_sync_agent(bg_action_t *bg_action_ptr);
-static void	_term_agent(bg_action_t *bg_action_ptr);
-
-
-#ifdef HAVE_BG_FILES
-/* Kill a job and remove its record from MMCS */
-static int _remove_job(db_job_id_t job_id, char *block_id)
-{
-	int rc;
-	int count = 0;
-	rm_job_t *job_rec = NULL;
-	rm_job_state_t job_state;
-	bool is_history = false;
-
-	debug("removing job %d from MMCS on block %s", job_id, block_id);
-	while (1) {
-		if (count)
-			sleep(POLL_INTERVAL);
-		count++;
-
-		/* Find the job */
-		if ((rc = bridge_get_job(job_id, &job_rec)) != STATUS_OK) {
-
-			if (rc == JOB_NOT_FOUND) {
-				debug("job %d removed from MMCS", job_id);
-				return STATUS_OK;
-			}
-
-			error("bridge_get_job(%d): %s", job_id,
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if ((rc = bridge_get_data(job_rec, RM_JobState, &job_state))
-		    != STATUS_OK) {
-			(void) bridge_free_job(job_rec);
-			if (rc == JOB_NOT_FOUND) {
-				debug("job %d not found in MMCS", job_id);
-				return STATUS_OK;
-			}
-
-			error("bridge_get_data(RM_JobState) for jobid=%d "
-			      "%s", job_id, bg_err_str(rc));
-			continue;
-		}
-
-		/* If this job is in the history table we
-		   should just exit here since it is marked
-		   incorrectly */
-		if ((rc = bridge_get_data(job_rec, RM_JobInHist,
-					  &is_history))
-		    != STATUS_OK) {
-			(void) bridge_free_job(job_rec);
-			if (rc == JOB_NOT_FOUND) {
-				debug("job %d removed from MMCS", job_id);
-				return STATUS_OK;
-			}
-
-			error("bridge_get_data(RM_JobInHist) for jobid=%d "
-			      "%s", job_id, bg_err_str(rc));
-			continue;
-		}
-
-		if ((rc = bridge_free_job(job_rec)) != STATUS_OK)
-			error("bridge_free_job: %s", bg_err_str(rc));
-
-		debug2("job %d on block %s is in state %d history %d",
-		       job_id, block_id, job_state, is_history);
-
-		/* check the state and process accordingly */
-		if (is_history) {
-			debug2("Job %d on block %s isn't in the "
-			       "active job table anymore, final state was %d",
-			       job_id, block_id, job_state);
-			return STATUS_OK;
-		} else if (job_state == RM_JOB_TERMINATED)
-			return STATUS_OK;
-		else if (job_state == RM_JOB_DYING) {
-			if (count > MAX_POLL_RETRIES)
-				error("Job %d on block %s isn't dying, "
-				      "trying for %d seconds", job_id,
-				      block_id, count*POLL_INTERVAL);
-			continue;
-		} else if (job_state == RM_JOB_ERROR) {
-			error("job %d on block %s is in a error state.",
-			      job_id, block_id);
-
-			//free_bg_block();
-			return STATUS_OK;
-		}
-
-		/* we have been told the next 2 lines do the same
-		 * thing, but I don't believe it to be true.  In most
-		 * cases when you do a signal of SIGTERM the mpirun
-		 * process gets killed with a SIGTERM.  In the case of
-		 * bridge_cancel_job it always gets killed with a
-		 * SIGKILL.  From IBM's point of view that is a bad
-		 * deally, so we are going to use signal ;).  Sending
-		 * a SIGKILL will kill the mpirun front end process,
-		 * and if you kill that jobs will never get cleaned up and
-		 * you end up with ciod unreacahble on the next job.
-		 */
-
-//		 rc = bridge_cancel_job(job_id);
-		rc = bridge_signal_job(job_id, SIGTERM);
-
-		if (rc != STATUS_OK) {
-			if (rc == JOB_NOT_FOUND) {
-				debug("job %d on block %s removed from MMCS",
-				      job_id, block_id);
-				return STATUS_OK;
-			}
-			if (rc == INCOMPATIBLE_STATE)
-				debug("job %d on block %s is in an "
-				      "INCOMPATIBLE_STATE",
-				      job_id, block_id);
-			else
-				error("bridge_signal_job(%d): %s", job_id,
-				      bg_err_str(rc));
-		} else if (count > MAX_POLL_RETRIES)
-			error("Job %d on block %s is in state %d and "
-			      "isn't dying, and doesn't appear to be "
-			      "responding to SIGTERM, trying for %d seconds",
-			      job_id, block_id, job_state, count*POLL_INTERVAL);
-
-	}
-
-	error("Failed to remove job %d from MMCS", job_id);
-	return INTERNAL_ERROR;
-}
-
-#endif
-
-/* block_state_mutex should be locked before calling this function */
-static int _reset_block(bg_record_t *bg_record)
-{
-	int rc = SLURM_SUCCESS;
-	if (bg_record) {
-		if (bg_record->job_running > NO_JOB_RUNNING) {
-			bg_record->job_running = NO_JOB_RUNNING;
-			bg_record->job_ptr = NULL;
-		}
-		/* remove user from list */
-
-		if (bg_record->target_name) {
-			if (strcmp(bg_record->target_name,
-				  bg_conf->slurm_user_name)) {
-				xfree(bg_record->target_name);
-				bg_record->target_name =
-					xstrdup(bg_conf->slurm_user_name);
-			}
-			update_block_user(bg_record, 1);
-		} else {
-			bg_record->target_name =
-				xstrdup(bg_conf->slurm_user_name);
-		}
-
-
-		/* Don't reset these (boot_(state/count)), they will be
-		   reset when state changes, and needs to outlast a job
-		   allocation.
-		*/
-		/* bg_record->boot_state = 0; */
-		/* bg_record->boot_count = 0; */
-
-		last_bg_update = time(NULL);
-		/* Only remove from the job_running list if
-		   job_running == NO_JOB_RUNNING, since blocks in
-		   error state could also be in this list and we don't
-		   want to remove them.
-		*/
-		if (bg_record->job_running == NO_JOB_RUNNING)
-			if (remove_from_bg_list(bg_lists->job_running,
-					       bg_record)
-			   == SLURM_SUCCESS) {
-				num_unused_cpus += bg_record->cpu_cnt;
-			}
-	} else {
-		error("No block given to reset");
-		rc = SLURM_ERROR;
-	}
-
-	return rc;
-}
 
 /* block_state_mutex should be locked before
  * calling this function.  This should only be called in _start_agent.
@@ -323,182 +133,6 @@ static void _destroy_bg_action(void *x)
 	}
 }
 
-static void _remove_jobs_on_block_and_reset(rm_job_list_t *job_list,
-					    int job_cnt, char *block_id)
-{
-	bg_record_t *bg_record = NULL;
-	int job_remove_failed = 0;
-
-#ifdef HAVE_BG_FILES
-	rm_element_t *job_elem = NULL;
-	pm_partition_id_t job_block;
-	db_job_id_t job_id;
-	int i, rc;
-#endif
-
-	if (!job_list)
-		job_cnt = 0;
-
-	if (!block_id) {
-		error("_remove_jobs_on_block_and_reset: no block name given");
-		return;
-	}
-
-#ifdef HAVE_BG_FILES
-	for (i=0; i<job_cnt; i++) {
-		if (i) {
-			if ((rc = bridge_get_data(job_list, RM_JobListNextJob,
-						  &job_elem)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_JobListNextJob): %s",
-				      bg_err_str(rc));
-				continue;
-			}
-		} else {
-			if ((rc = bridge_get_data(job_list, RM_JobListFirstJob,
-						  &job_elem)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_JobListFirstJob): %s",
-				      bg_err_str(rc));
-				continue;
-			}
-		}
-
-		if (!job_elem) {
-			error("No Job Elem breaking out job count = %d", i);
-			break;
-		}
-		if ((rc = bridge_get_data(job_elem, RM_JobPartitionID,
-					  &job_block))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_JobPartitionID) %s: %s",
-			      job_block, bg_err_str(rc));
-			continue;
-		}
-
-		if (!job_block) {
-			error("No blockID returned from Database");
-			continue;
-		}
-
-		debug2("looking at block %s looking for %s",
-		       job_block, block_id);
-
-		if (strcmp(job_block, block_id)) {
-			free(job_block);
-			continue;
-		}
-
-		free(job_block);
-
-		if ((rc = bridge_get_data(job_elem, RM_JobDBJobID, &job_id))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_JobDBJobID): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		debug2("got job_id %d",job_id);
-		if ((rc = _remove_job(job_id, block_id)) == INTERNAL_ERROR) {
-			job_remove_failed = 1;
-			break;
-		}
-	}
-#else
-	/* Simpulate better job completion since on a real system it
-	 * could take up minutes to kill a job. */
-	if (job_cnt)
-		sleep(2);
-#endif
-	/* remove the block's users */
-	slurm_mutex_lock(&block_state_mutex);
-	bg_record = find_bg_record_in_list(bg_lists->main, block_id);
-	if (bg_record) {
-		debug("got the record %s user is %s",
-		      bg_record->bg_block_id,
-		      bg_record->user_name);
-
-		if (job_remove_failed) {
-			if (bg_record->nodes)
-				slurm_drain_nodes(
-					bg_record->nodes,
-					"_term_agent: Couldn't remove job",
-					slurm_get_slurm_user_id());
-			else
-				error("Block %s doesn't have a node list.",
-				      block_id);
-		}
-
-		_reset_block(bg_record);
-	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-		debug2("Hopefully we are destroying this block %s "
-		       "since it isn't in the bg_lists->main",
-		       block_id);
-	} else if (job_cnt) {
-		error("Could not find block %s previously assigned to job.  "
-		      "If this is happening at startup and you just changed "
-		      "your bluegene.conf this is expected.  Else you should "
-		      "probably restart your slurmctld since this shouldn't "
-		      "happen outside of that.",
-		      block_id);
-	}
-	slurm_mutex_unlock(&block_state_mutex);
-
-}
-
-static void _reset_block_list(List block_list)
-{
-	ListIterator itr = NULL;
-	bg_record_t *bg_record = NULL;
-	rm_job_list_t *job_list = NULL;
-	int jobs = 0;
-
-#ifdef HAVE_BG_FILES
-	int live_states, rc;
-#endif
-
-	if (!block_list)
-		return;
-
-#ifdef HAVE_BG_FILES
-	debug2("getting the job info");
-	live_states = JOB_ALL_FLAG
-		& (~JOB_TERMINATED_FLAG)
-		& (~JOB_KILLED_FLAG)
-		& (~JOB_ERROR_FLAG);
-
-	if ((rc = bridge_get_jobs(live_states, &job_list)) != STATUS_OK) {
-		error("bridge_get_jobs(): %s", bg_err_str(rc));
-
-		return;
-	}
-
-	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_JobListSize): %s", bg_err_str(rc));
-		jobs = 0;
-	}
-	debug2("job count %d",jobs);
-#endif
-	itr = list_iterator_create(block_list);
-	while ((bg_record = list_next(itr))) {
-		info("Queue clearing of users of BG block %s",
-		     bg_record->bg_block_id);
-#ifndef HAVE_BG_FILES
-		/* simulate jobs running and need to be cleared from MMCS */
-		if (bg_record->job_ptr)
-			jobs = 1;
-#endif
-		_remove_jobs_on_block_and_reset(job_list, jobs,
-						bg_record->bg_block_id);
-	}
-	list_iterator_destroy(itr);
-
-#ifdef HAVE_BG_FILES
-	if ((rc = bridge_free_job_list(job_list)) != STATUS_OK)
-		error("bridge_free_job_list(): %s", bg_err_str(rc));
-#endif
-}
-
 /* Update block user and reboot as needed */
 static void _sync_agent(bg_action_t *bg_action_ptr)
 {
@@ -519,6 +153,9 @@ static void _sync_agent(bg_action_t *bg_action_ptr)
 		bg_action_ptr->job_ptr->details->min_cpus = bg_record->cpu_cnt;
 	bg_record->job_running = bg_action_ptr->job_ptr->job_id;
 	bg_record->job_ptr = bg_action_ptr->job_ptr;
+	set_select_jobinfo(bg_record->job_ptr->select_jobinfo->data,
+			   SELECT_JOBDATA_BLOCK_PTR,
+			   bg_record);
 
 	if (!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) {
 		list_push(bg_lists->job_running, bg_record);
@@ -527,7 +164,7 @@ static void _sync_agent(bg_action_t *bg_action_ptr)
 	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
 		list_push(bg_lists->booted, bg_record);
 
-	if (bg_record->state == RM_PARTITION_READY) {
+	if (bg_record->state == BG_BLOCK_INITED) {
 		if (bg_record->job_ptr) {
 			bg_record->job_ptr->job_state &= (~JOB_CONFIGURING);
 			last_job_update = time(NULL);
@@ -551,7 +188,7 @@ static void _sync_agent(bg_action_t *bg_action_ptr)
 			slurm_mutex_unlock(&block_state_mutex);
 
 	} else {
-		if (bg_record->state != RM_PARTITION_CONFIGURING) {
+		if (bg_record->state != BG_BLOCK_BOOTING) {
 			error("Block %s isn't ready and isn't "
 			      "being configured! Starting job again.",
 			      bg_action_ptr->bg_block_id);
@@ -587,26 +224,20 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	}
 
 	if (bg_record->job_running <= NO_JOB_RUNNING) {
-		// _reset_block(bg_record); should already happened
+		// bg_reset_block(bg_record); should already happened
 		slurm_mutex_unlock(&block_state_mutex);
 		debug("job %u finished during the queueing job "
 		      "(everything is ok)",
 		      bg_action_ptr->job_ptr->job_id);
 		return;
 	}
-	if (bg_record->state == RM_PARTITION_DEALLOCATING) {
+	if (bg_record->state == BG_BLOCK_TERM) {
 		debug("Block is in Deallocating state, waiting for free.");
-		bg_free_block(bg_record, 1, 1);
-		/* no reason to reboot here since we are already
-		   deallocating */
-		bg_action_ptr->reboot = 0;
-		/* Since bg_free_block will unlock block_state_mutex
-		   we need to make sure the block we want is still
-		   around.  Failure will unlock this so no need to
-		   unlock before return.
-		*/
-		if (!_make_sure_block_still_exists(bg_action_ptr, bg_record))
-			return;
+		/* It doesn't appear state of a small block
+		   (conn_type) is held on a BGP system so
+		   if we to reset it so, just set the reboot flag and
+		   handle it later in that code. */
+		bg_action_ptr->reboot = 1;
 	}
 
 	delete_list = list_create(NULL);
@@ -646,7 +277,7 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	if (requeue_job) {
 		list_destroy(delete_list);
 
-		_reset_block(bg_record);
+		bg_reset_block(bg_record);
 
 		slurm_mutex_unlock(&block_state_mutex);
 		bg_requeue_job(bg_action_ptr->job_ptr->job_id, 0);
@@ -668,12 +299,14 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 
 	slurm_mutex_lock(&block_state_mutex);
 	/* Failure will unlock block_state_mutex so no need to unlock before
-	   return. */
+	   return. Failure will unlock block_state_mutex so no need to unlock
+	   before return.
+	*/
 	if (!_make_sure_block_still_exists(bg_action_ptr, bg_record))
 		return;
 
 	if (bg_record->job_running <= NO_JOB_RUNNING) {
-		// _reset_block(bg_record); should already happened
+		// bg_reset_block(bg_record); should already happened
 		slurm_mutex_unlock(&block_state_mutex);
 		debug("job %u already finished before boot",
 		      bg_action_ptr->job_ptr->job_id);
@@ -690,111 +323,112 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 		bg_record->blrtsimage = xstrdup(bg_action_ptr->blrtsimage);
 		rc = 1;
 	}
-#else
-	if ((bg_action_ptr->conn_type >= SELECT_SMALL)
-	   && (bg_action_ptr->conn_type != bg_record->conn_type)) {
+#elif defined HAVE_BGP
+	if ((bg_action_ptr->conn_type[0] >= SELECT_SMALL)
+	   && (bg_action_ptr->conn_type[0] != bg_record->conn_type[0])) {
 		debug3("changing small block mode from %s to %s",
-		       conn_type_string(bg_record->conn_type),
-		       conn_type_string(bg_action_ptr->conn_type));
+		       conn_type_string(bg_record->conn_type[0]),
+		       conn_type_string(bg_action_ptr->conn_type[0]));
 		rc = 1;
-#ifndef HAVE_BG_FILES
+# ifndef HAVE_BG_FILES
 		/* since we don't check state on an emulated system we
 		 * have to change it here
 		 */
-		bg_record->conn_type = bg_action_ptr->conn_type;
-#endif
+		bg_record->conn_type[0] = bg_action_ptr->conn_type[0];
+# endif
 	}
 #endif
+
+#ifdef HAVE_BG_L_P
 	if (bg_action_ptr->linuximage
 	   && strcasecmp(bg_action_ptr->linuximage, bg_record->linuximage)) {
-#ifdef HAVE_BGL
+# ifdef HAVE_BGL
 		debug3("changing LinuxImage from %s to %s",
 		       bg_record->linuximage, bg_action_ptr->linuximage);
-#else
+# else
 		debug3("changing CnloadImage from %s to %s",
 		       bg_record->linuximage, bg_action_ptr->linuximage);
-#endif
+# endif
 		xfree(bg_record->linuximage);
 		bg_record->linuximage = xstrdup(bg_action_ptr->linuximage);
 		rc = 1;
 	}
-	if (bg_action_ptr->mloaderimage
-	   && strcasecmp(bg_action_ptr->mloaderimage,
-			 bg_record->mloaderimage)) {
-		debug3("changing MloaderImage from %s to %s",
-		       bg_record->mloaderimage, bg_action_ptr->mloaderimage);
-		xfree(bg_record->mloaderimage);
-		bg_record->mloaderimage = xstrdup(bg_action_ptr->mloaderimage);
-		rc = 1;
-	}
 	if (bg_action_ptr->ramdiskimage
 	   && strcasecmp(bg_action_ptr->ramdiskimage,
 			 bg_record->ramdiskimage)) {
-#ifdef HAVE_BGL
+# ifdef HAVE_BGL
 		debug3("changing RamDiskImage from %s to %s",
 		       bg_record->ramdiskimage, bg_action_ptr->ramdiskimage);
-#else
+# else
 		debug3("changing IoloadImage from %s to %s",
 		       bg_record->ramdiskimage, bg_action_ptr->ramdiskimage);
-#endif
+# endif
 		xfree(bg_record->ramdiskimage);
 		bg_record->ramdiskimage = xstrdup(bg_action_ptr->ramdiskimage);
 		rc = 1;
 	}
+#endif
+	if (bg_action_ptr->mloaderimage
+	   && strcasecmp(bg_action_ptr->mloaderimage,
+			 bg_record->mloaderimage)) {
+		debug3("changing MloaderImage from %s to %s",
+		       bg_record->mloaderimage, bg_action_ptr->mloaderimage);
+		xfree(bg_record->mloaderimage);
+		bg_record->mloaderimage = xstrdup(bg_action_ptr->mloaderimage);
+		rc = 1;
+	}
 
-	if (rc) {
+	if (rc || bg_action_ptr->reboot) {
 		bg_record->modifying = 1;
 
+		/* Increment free_cnt to make sure we don't loose this
+		 * block since bg_free_block will unlock block_state_mutex.
+		 */
+		bg_record->free_cnt++;
 		bg_free_block(bg_record, 1, 1);
+		bg_record->free_cnt--;
 
-		/* Since bg_free_block will unlock block_state_mutex
-		   we need to make sure the block we want is still
-		   around.  Failure will unlock block_state_mutex so
-		   no need to unlock before return.
-		*/
-		if (!_make_sure_block_still_exists(bg_action_ptr, bg_record))
-			return;
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES && defined HAVE_BG_L_P
 #ifdef HAVE_BGL
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_BlrtsImg,
 					      bg_record->blrtsimage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_BlrtsImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_BlrtsImg): %s",
 			      bg_err_str(rc));
 
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_LinuxImg,
 					      bg_record->linuximage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_LinuxImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_LinuxImg): %s",
 			      bg_err_str(rc));
 
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_RamdiskImg,
 					      bg_record->ramdiskimage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_RamdiskImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_RamdiskImg): %s",
 			      bg_err_str(rc));
 
-#else
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+#elif defined HAVE_BGP
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_CnloadImg,
 					      bg_record->linuximage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_CnloadImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_CnloadImg): %s",
 			      bg_err_str(rc));
 
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_IoloadImg,
 					      bg_record->ramdiskimage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_IoloadImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_IoloadImg): %s",
 			      bg_err_str(rc));
 
-		if (bg_action_ptr->conn_type > SELECT_SMALL) {
+		if (bg_action_ptr->conn_type[0] > SELECT_SMALL) {
 			char *conn_type = NULL;
-			switch(bg_action_ptr->conn_type) {
+			switch(bg_action_ptr->conn_type[0]) {
 			case SELECT_HTC_S:
 				conn_type = "s";
 				break;
@@ -812,57 +446,59 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 			}
 			/* the option has to be set before the pool can be
 			   set */
-			if ((rc = bridge_modify_block(
+			if ((rc = bridge_block_modify(
 				     bg_record->bg_block_id,
 				     RM_MODIFY_Options,
-				     conn_type)) != STATUS_OK)
+				     conn_type)) != SLURM_SUCCESS)
 				error("bridge_set_data(RM_MODIFY_Options): %s",
 				      bg_err_str(rc));
 		}
 #endif
-		if ((rc = bridge_modify_block(bg_record->bg_block_id,
+		if ((rc = bridge_block_modify(bg_record->bg_block_id,
 					      RM_MODIFY_MloaderImg,
 					      bg_record->mloaderimage))
-		    != STATUS_OK)
-			error("bridge_modify_block(RM_MODIFY_MloaderImg): %s",
+		    != SLURM_SUCCESS)
+			error("bridge_block_modify(RM_MODIFY_MloaderImg): %s",
 			      bg_err_str(rc));
 
 #endif
 		bg_record->modifying = 0;
-	} else if (bg_action_ptr->reboot) {
-		bg_record->modifying = 1;
-
-		bg_free_block(bg_record, 1, 1);
+	}
 
-		/* Since bg_free_block will unlock block_state_mutex
-		   we need to make sure the block we want is still
-		   around.  Failure will unlock block_state_mutex so
-		   no need to unlock before return.
-		*/
-		if (!_make_sure_block_still_exists(bg_action_ptr, bg_record))
-			return;
+	if (bg_record->state == BG_BLOCK_FREE) {
+		if ((rc = bridge_block_boot(bg_record)) != SLURM_SUCCESS) {
+			char reason[200];
 
-		bg_record->modifying = 0;
-	}
+			bg_record->boot_state = 0;
+			bg_record->boot_count = 0;
 
-	if (bg_record->state == RM_PARTITION_FREE) {
-		if ((rc = boot_block(bg_record)) != SLURM_SUCCESS) {
-			/* Since boot_block could unlock block_state_mutex
-			   on error we need to make sure the block we
-			   want is still around.  Failure will unlock
-			   block_state_mutex so no need to unlock
-			   before return.
-			*/
-			if (!_make_sure_block_still_exists(bg_action_ptr,
-							   bg_record))
-				return;
-			_reset_block(bg_record);
+			if (rc == BG_ERROR_INVALID_STATE)
+				snprintf(reason, sizeof(reason),
+					 "Block %s is in an incompatible "
+					 "state.  This usually means "
+					 "hardware is allocated "
+					 "by another block (maybe outside "
+					 "of SLURM).",
+					 bg_record->bg_block_id);
+			else
+				snprintf(reason, sizeof(reason),
+					 "Couldn't boot block %s: %s",
+					 bg_record->bg_block_id,
+					 bg_err_str(rc));
 			slurm_mutex_unlock(&block_state_mutex);
-			bg_requeue_job(bg_action_ptr->job_ptr->job_id, 1);
+			requeue_and_error(bg_record, reason);
 			return;
 		}
-	} else if (bg_record->state == RM_PARTITION_CONFIGURING)
+	} else if (bg_record->state == BG_BLOCK_BOOTING) {
+#ifdef HAVE_BG_FILES
 		bg_record->boot_state = 1;
+#else
+		if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+			list_push(bg_lists->booted, bg_record);
+		bg_record->state = BG_BLOCK_INITED;
+		last_bg_update = time(NULL);
+#endif
+	}
 
 
 	if (bg_record->job_running <= NO_JOB_RUNNING) {
@@ -878,12 +514,11 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	*/
 	/* bg_record->boot_count = 0; */
 	xfree(bg_record->target_name);
-	bg_record->target_name =
-		uid_to_string(bg_action_ptr->job_ptr->user_id);
+	bg_record->target_name = uid_to_string(bg_action_ptr->job_ptr->user_id);
 	debug("setting the target_name for Block %s to %s",
 	      bg_record->bg_block_id, bg_record->target_name);
 
-	if (bg_record->state == RM_PARTITION_READY) {
+	if (bg_record->state == BG_BLOCK_INITED) {
 		debug("block %s is ready.", bg_record->bg_block_id);
 		set_user_rc = set_block_user(bg_record);
 		if (bg_action_ptr->job_ptr) {
@@ -910,44 +545,6 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	}
 }
 
-/* Perform job termination work */
-static void _term_agent(bg_action_t *bg_action_ptr)
-{
-	int jobs = 0;
-	rm_job_list_t *job_list = NULL;
-
-#ifdef HAVE_BG_FILES
-	int live_states, rc;
-
-	debug2("getting the job info");
-	live_states = JOB_ALL_FLAG
-		& (~JOB_TERMINATED_FLAG)
-		& (~JOB_KILLED_FLAG)
-		& (~JOB_ERROR_FLAG);
-
-	if ((rc = bridge_get_jobs(live_states, &job_list)) != STATUS_OK) {
-		error("bridge_get_jobs(): %s", bg_err_str(rc));
-
-		return;
-	}
-
-	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_JobListSize): %s", bg_err_str(rc));
-		jobs = 0;
-	}
-	debug2("job count %d",jobs);
-#endif
-	_remove_jobs_on_block_and_reset(job_list, jobs,
-					bg_action_ptr->bg_block_id);
-
-#ifdef HAVE_BG_FILES
-	if ((rc = bridge_free_job_list(job_list)) != STATUS_OK)
-		error("bridge_free_job_list(): %s", bg_err_str(rc));
-#endif
-
-}
-
 static void *_block_agent(void *args)
 {
 	bg_action_t *bg_action_ptr = (bg_action_t *)args;
@@ -955,7 +552,7 @@ static void *_block_agent(void *args)
 	if (bg_action_ptr->op == START_OP)
 		_start_agent(bg_action_ptr);
 	else if (bg_action_ptr->op == TERM_OP)
-		_term_agent(bg_action_ptr);
+		bridge_block_post_job(bg_action_ptr->bg_block_id);
 	else if (bg_action_ptr->op == SYNC_OP)
 		_sync_agent(bg_action_ptr);
 	_destroy_bg_action(bg_action_ptr);
@@ -988,60 +585,59 @@ static void _block_op(bg_action_t *bg_action_ptr)
 }
 
 
-/* get a list of all BG blocks with users */
+/* get a list of all BG blocks with users block_state_mutex must be
+ * unlocked before entering here. */
 static List _get_all_allocated_blocks(void)
 {
 	List ret_list = list_create(destroy_bg_record);
 	ListIterator itr;
-	bg_record_t *block_ptr = NULL;
-	bg_record_t *str_ptr = NULL;
+	bg_record_t *bg_record = NULL;
+	bg_record_t *rm_record = NULL;
 
 	if (!ret_list)
 		fatal("malloc error");
 
-	if (bg_lists->main) {
-		itr = list_iterator_create(bg_lists->main);
-		while ((block_ptr = (bg_record_t *) list_next(itr))) {
-			if ((block_ptr->user_name == NULL)
-			    ||  (block_ptr->user_name[0] == '\0')
-			    ||  (block_ptr->bg_block_id == NULL)
-			    ||  (block_ptr->bg_block_id[0] == '0'))
-				continue;
-			str_ptr = xmalloc(sizeof(bg_record_t));
-			str_ptr->magic = BLOCK_MAGIC;
-			str_ptr->bg_block_id = xstrdup(block_ptr->bg_block_id);
-			str_ptr->nodes = xstrdup(block_ptr->nodes);
-
-			list_append(ret_list, str_ptr);
-		}
-		list_iterator_destroy(itr);
-	} else {
-		error("_get_all_allocated_blocks: no bg_lists->main");
+	xassert(bg_lists->main);
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_lists->main);
+	while ((bg_record = list_next(itr))) {
+		if ((bg_record->magic != BLOCK_MAGIC) || bg_record->free_cnt
+		    || !bg_record->user_name || !bg_record->bg_block_id)
+			continue;
+		rm_record = xmalloc(sizeof(bg_record_t));
+		rm_record->magic = BLOCK_MAGIC;
+		rm_record->bg_block_id =
+			xstrdup(bg_record->bg_block_id);
+		rm_record->mp_str = xstrdup(bg_record->mp_str);
+		list_append(ret_list, rm_record);
 	}
+	list_iterator_destroy(itr);
+
+	slurm_mutex_unlock(&block_state_mutex);
 
 	return ret_list;
 }
 
 /* remove a BG block from the given list */
-static int _excise_block(List block_list, pm_partition_id_t bg_block_id,
+static int _excise_block(List block_list, char *bg_block_id,
 			 char *nodes)
 {
 	int rc = SLURM_SUCCESS;
 	ListIterator iter;
-	bg_record_t *block = NULL;
+	bg_record_t *bg_record = NULL;
 
 	if (block_list) {
 		iter = list_iterator_create(block_list);
 		xassert(iter);
-		while ((block = list_next(iter))) {
+		while ((bg_record = list_next(iter))) {
 			rc = SLURM_ERROR;
-			if (strcmp(block->bg_block_id, bg_block_id))
+			if (strcmp(bg_record->bg_block_id, bg_block_id))
 				continue;
-			if (strcmp(block->nodes, nodes)) {
+			if (strcmp(bg_record->mp_str, nodes)) {
 				/* changed bgblock */
 				error("bg_block_id:%s old_nodes:%s "
 				      "new_nodes:%s",
-				      bg_block_id, nodes, block->nodes);
+				      bg_block_id, nodes, bg_record->mp_str);
 				break;
 			}
 
@@ -1068,7 +664,7 @@ static int _excise_block(List block_list, pm_partition_id_t bg_block_id,
  * many seconds. Do not call from slurmctld  or any other entity that
  * can not wait.
  */
-int term_jobs_on_block(pm_partition_id_t bg_block_id)
+int term_jobs_on_block(char *bg_block_id)
 {
 	int rc = SLURM_SUCCESS;
 	bg_action_t *bg_action_ptr;
@@ -1107,7 +703,14 @@ extern int start_job(struct job_record *job_ptr)
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_REBOOT,
 			   &(bg_action_ptr->reboot));
-#ifdef HAVE_BGL
+	get_select_jobinfo(job_ptr->select_jobinfo->data,
+			   SELECT_JOBDATA_CONN_TYPE,
+			   &(bg_action_ptr->conn_type));
+	get_select_jobinfo(job_ptr->select_jobinfo->data,
+			   SELECT_JOBDATA_MLOADER_IMAGE,
+			   &(bg_action_ptr->mloaderimage));
+#ifdef HAVE_BG_L_P
+# ifdef HAVE_BGL
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_BLRTS_IMAGE,
 			   &(bg_action_ptr->blrtsimage));
@@ -1115,15 +718,14 @@ extern int start_job(struct job_record *job_ptr)
 		bg_action_ptr->blrtsimage =
 			xstrdup(bg_conf->default_blrtsimage);
 		set_select_jobinfo(job_ptr->select_jobinfo->data,
-					    SELECT_JOBDATA_BLRTS_IMAGE,
-					    bg_action_ptr->blrtsimage);
+				   SELECT_JOBDATA_BLRTS_IMAGE,
+				   bg_action_ptr->blrtsimage);
 	}
-#else
+# elif defined HAVE_BGP
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_CONN_TYPE,
 			   &(bg_action_ptr->conn_type));
-#endif
-
+# endif
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_LINUX_IMAGE,
 			   &(bg_action_ptr->linuximage));
@@ -1131,19 +733,10 @@ extern int start_job(struct job_record *job_ptr)
 		bg_action_ptr->linuximage =
 			xstrdup(bg_conf->default_linuximage);
 		set_select_jobinfo(job_ptr->select_jobinfo->data,
-					    SELECT_JOBDATA_LINUX_IMAGE,
-					    bg_action_ptr->linuximage);
-	}
-	get_select_jobinfo(job_ptr->select_jobinfo->data,
-			   SELECT_JOBDATA_MLOADER_IMAGE,
-			   &(bg_action_ptr->mloaderimage));
-	if (!bg_action_ptr->mloaderimage) {
-		bg_action_ptr->mloaderimage =
-			xstrdup(bg_conf->default_mloaderimage);
-		set_select_jobinfo(job_ptr->select_jobinfo->data,
-					    SELECT_JOBDATA_MLOADER_IMAGE,
-					    bg_action_ptr->mloaderimage);
+				   SELECT_JOBDATA_LINUX_IMAGE,
+				   bg_action_ptr->linuximage);
 	}
+
 	get_select_jobinfo(job_ptr->select_jobinfo->data,
 			   SELECT_JOBDATA_RAMDISK_IMAGE,
 			   &(bg_action_ptr->ramdiskimage));
@@ -1151,8 +744,17 @@ extern int start_job(struct job_record *job_ptr)
 		bg_action_ptr->ramdiskimage =
 			xstrdup(bg_conf->default_ramdiskimage);
 		set_select_jobinfo(job_ptr->select_jobinfo->data,
-					    SELECT_JOBDATA_RAMDISK_IMAGE,
-					    bg_action_ptr->ramdiskimage);
+				   SELECT_JOBDATA_RAMDISK_IMAGE,
+				   bg_action_ptr->ramdiskimage);
+	}
+
+#endif
+	if (!bg_action_ptr->mloaderimage) {
+		bg_action_ptr->mloaderimage =
+			xstrdup(bg_conf->default_mloaderimage);
+		set_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_MLOADER_IMAGE,
+				   bg_action_ptr->mloaderimage);
 	}
 
 	slurm_mutex_lock(&block_state_mutex);
@@ -1232,85 +834,85 @@ extern int sync_jobs(List job_list)
 		return SLURM_SUCCESS;
 	run_already = true;
 
+	if (!job_list) {
+		error("sync_jobs: no job_list");
+		return SLURM_ERROR;
+	}
 	/* Insure that all running jobs own the specified block */
 	block_list = _get_all_allocated_blocks();
-	if (job_list) {
-		job_iterator = list_iterator_create(job_list);
-		while ((job_ptr = (struct job_record *)
-			list_next(job_iterator))) {
-			bool good_block = true;
-			if (!IS_JOB_RUNNING(job_ptr))
-				continue;
-
-			bg_action_ptr = xmalloc(sizeof(bg_action_t));
-			bg_action_ptr->op = SYNC_OP;
-			bg_action_ptr->job_ptr = job_ptr;
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = list_next(job_iterator))) {
+		bool good_block = true;
+		if (!IS_JOB_RUNNING(job_ptr))
+			continue;
 
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_BLOCK_ID,
-					   &(bg_action_ptr->bg_block_id));
-#ifdef HAVE_BGL
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_BLRTS_IMAGE,
-					   &(bg_action_ptr->blrtsimage));
-#else
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_CONN_TYPE,
-					   &(bg_action_ptr->conn_type));
+		bg_action_ptr = xmalloc(sizeof(bg_action_t));
+		bg_action_ptr->op = SYNC_OP;
+		bg_action_ptr->job_ptr = job_ptr;
+
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_BLOCK_ID,
+				   &(bg_action_ptr->bg_block_id));
+#ifdef HAVE_BG_L_P
+# ifdef HAVE_BGL
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_BLRTS_IMAGE,
+				   &(bg_action_ptr->blrtsimage));
+# else
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_CONN_TYPE,
+				   &(bg_action_ptr->conn_type));
+# endif
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_LINUX_IMAGE,
+				   &(bg_action_ptr->linuximage));
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_RAMDISK_IMAGE,
+				   &(bg_action_ptr->ramdiskimage));
 #endif
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_LINUX_IMAGE,
-					   &(bg_action_ptr->linuximage));
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_MLOADER_IMAGE,
-					   &(bg_action_ptr->mloaderimage));
-			get_select_jobinfo(job_ptr->select_jobinfo->data,
-					   SELECT_JOBDATA_RAMDISK_IMAGE,
-					   &(bg_action_ptr->ramdiskimage));
-
-			if (bg_action_ptr->bg_block_id == NULL) {
-				error("Running job %u has bgblock==NULL",
-				      job_ptr->job_id);
-				good_block = false;
-			} else if (job_ptr->nodes == NULL) {
-				error("Running job %u has nodes==NULL",
-				      job_ptr->job_id);
-				good_block = false;
-			} else if (_excise_block(block_list,
-						 bg_action_ptr->bg_block_id,
-						 job_ptr->nodes)
-				   != SLURM_SUCCESS) {
-				error("Kill job %u belongs to defunct "
-				      "bgblock %s",
-				      job_ptr->job_id,
-				      bg_action_ptr->bg_block_id);
-				good_block = false;
-			}
-			if (!good_block) {
-				job_ptr->job_state = JOB_FAILED
-					| JOB_COMPLETING;
-				job_ptr->end_time = time(NULL);
-				last_job_update = time(NULL);
-				_destroy_bg_action(bg_action_ptr);
-				continue;
-			}
-
-			debug3("Queue sync of job %u in BG block %s "
-			       "ending at %ld",
-			       job_ptr->job_id,
-			       bg_action_ptr->bg_block_id,
-			       job_ptr->end_time);
-			_block_op(bg_action_ptr);
+		get_select_jobinfo(job_ptr->select_jobinfo->data,
+				   SELECT_JOBDATA_MLOADER_IMAGE,
+				   &(bg_action_ptr->mloaderimage));
+
+		if (bg_action_ptr->bg_block_id == NULL) {
+			error("Running job %u has bgblock==NULL",
+			      job_ptr->job_id);
+			good_block = false;
+		} else if (job_ptr->nodes == NULL) {
+			error("Running job %u has nodes==NULL",
+			      job_ptr->job_id);
+			good_block = false;
+		} else if (_excise_block(block_list,
+					 bg_action_ptr->bg_block_id,
+					 job_ptr->nodes)
+			   != SLURM_SUCCESS) {
+			error("Kill job %u belongs to defunct "
+			      "bgblock %s",
+			      job_ptr->job_id,
+			      bg_action_ptr->bg_block_id);
+			good_block = false;
 		}
-		list_iterator_destroy(job_iterator);
-	} else {
-		error("sync_jobs: no job_list");
-		list_destroy(block_list);
-		return SLURM_ERROR;
+		if (!good_block) {
+			job_ptr->job_state = JOB_FAILED
+				| JOB_COMPLETING;
+			job_ptr->end_time = time(NULL);
+			last_job_update = time(NULL);
+			_destroy_bg_action(bg_action_ptr);
+			continue;
+		}
+
+		debug3("Queue sync of job %u in BG block %s "
+		       "ending at %ld",
+		       job_ptr->job_id,
+		       bg_action_ptr->bg_block_id,
+		       job_ptr->end_time);
+		_block_op(bg_action_ptr);
 	}
+	list_iterator_destroy(job_iterator);
+
 	/* Insure that all other blocks are free of users */
 	if (block_list) {
-		_reset_block_list(block_list);
+		bridge_reset_block_list(block_list);
 		list_destroy(block_list);
 	} else {
 		/* this should never happen,
@@ -1320,68 +922,3 @@ extern int sync_jobs(List job_list)
 	}
 	return SLURM_SUCCESS;
 }
-
-/*
- * Boot a block. Block state expected to be FREE upon entry.
- * NOTE: This function does not wait for the boot to complete.
- * the slurm prolog script needs to perform the waiting.
- * NOTE: block_state_mutex needs to be locked before entering.
- */
-extern int boot_block(bg_record_t *bg_record)
-{
-#ifdef HAVE_BG_FILES
-	int rc;
-	if (bg_record->magic != BLOCK_MAGIC) {
-		error("boot_block: magic was bad");
-		return SLURM_ERROR;
-	}
-
-	if ((rc = bridge_set_block_owner(bg_record->bg_block_id,
-					 bg_conf->slurm_user_name))
-	    != STATUS_OK) {
-		error("bridge_set_block_owner(%s,%s): %s",
-		      bg_record->bg_block_id,
-		      bg_conf->slurm_user_name,
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-
-	info("Booting block %s", bg_record->bg_block_id);
-	if ((rc = bridge_create_block(bg_record->bg_block_id))
-	    != STATUS_OK) {
-		error("bridge_create_block(%s): %s",
-		      bg_record->bg_block_id, bg_err_str(rc));
-		if (rc == INCOMPATIBLE_STATE) {
-			char reason[200];
-			snprintf(reason, sizeof(reason),
-				 "boot_block: "
-				 "Block %s is in an incompatible state.  "
-				 "This usually means hardware is allocated "
-				 "by another block (maybe outside of SLURM).",
-				 bg_record->bg_block_id);
-			bg_record->boot_state = 0;
-			bg_record->boot_count = 0;
-			slurm_mutex_unlock(&block_state_mutex);
-			requeue_and_error(bg_record, reason);
-			slurm_mutex_lock(&block_state_mutex);
-		}
-		return SLURM_ERROR;
-	}
-
-	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
-		list_push(bg_lists->booted, bg_record);
-	/* Set this here just to make sure we know we are suppose to
-	   be booting.  Just incase the block goes free before we
-	   notice we are configuring.
-	*/
-	bg_record->boot_state = 1;
-#else
-	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
-		list_push(bg_lists->booted, bg_record);
-	bg_record->state = RM_PARTITION_READY;
-	last_bg_update = time(NULL);
-#endif
-
-
-	return SLURM_SUCCESS;
-}
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.h b/src/plugins/select/bluegene/bg_job_run.h
similarity index 90%
rename from src/plugins/select/bluegene/plugin/bg_job_run.h
rename to src/plugins/select/bluegene/bg_job_run.h
index 4cb2f41ea..ff9411248 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.h
+++ b/src/plugins/select/bluegene/bg_job_run.h
@@ -7,7 +7,7 @@
  *  Written by Morris Jette <jette1@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,13 +40,7 @@
 #define _BG_JOB_RUN_H_
 
 #include "src/slurmctld/slurmctld.h"
-
-/*
- * Boot a block. Partition state expected to be FREE upon entry.
- * NOTE: This function does not wait for the boot to complete.
- * the slurm prolog script needs to perform the waiting.
- */
-extern int boot_block(bg_record_t *bg_record);
+#include "bg_record_functions.h"
 
 /*
  * Perform any setup required to initiate a job
@@ -85,6 +79,6 @@ extern int term_job(struct job_record *job_ptr);
  * NOTE: This happens when new partitions are created and we
  * need to clean up jobs on them.
  */
-extern int term_jobs_on_block(pm_partition_id_t bg_block_id);
+extern int term_jobs_on_block(char *bg_block_id);
 
 #endif /* _BG_JOB_RUN_H_ */
diff --git a/src/plugins/select/bluegene/bg_list_functions.c b/src/plugins/select/bluegene/bg_list_functions.c
new file mode 100644
index 000000000..b9b9c58fe
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_list_functions.c
@@ -0,0 +1,224 @@
+/*****************************************************************************\
+ *  bg_list_functions.c - header for dealing with the lists that
+ *                        contain bg_records.
+ *
+ *  $Id: bg_list_functions.c 12954 2008-01-04 20:37:49Z da $
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "bg_core.h"
+
+/* see if a record already of like bitmaps exists in a list */
+extern bg_record_t *block_exist_in_list(List my_list, bg_record_t *bg_record)
+{
+	ListIterator itr;
+	bg_record_t *found_record = NULL;
+
+	if (!my_list || !bg_record)
+		return NULL;
+
+	itr = list_iterator_create(my_list);
+	while ((found_record = list_next(itr))) {
+		if (found_record->magic != BLOCK_MAGIC)
+			continue;
+		/* check for full node bitmap compare */
+		if (bit_equal(bg_record->mp_bitmap, found_record->mp_bitmap)
+		    && bit_equal(bg_record->ionode_bitmap,
+				 found_record->ionode_bitmap)) {
+			/* now make sure the conn_type is the same for
+			   regular sized blocks */
+			if (bg_record->cnode_cnt >= bg_conf->mp_cnode_cnt) {
+				int dim;
+				for (dim=0; dim<SYSTEM_DIMENSIONS; dim++) {
+					if (bg_record->conn_type[dim]
+					    != found_record->conn_type[dim])
+						break;
+				}
+				if (dim != SYSTEM_DIMENSIONS)
+					continue;
+			}
+
+			if (bg_record->ionode_str)
+				debug("This block %s[%s] "
+				      "is already in the list %s",
+				      bg_record->mp_str,
+				      bg_record->ionode_str,
+				      found_record->bg_block_id);
+			else
+				debug("This block %s "
+				      "is already in the list %s",
+				      bg_record->mp_str,
+				      found_record->bg_block_id);
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	return found_record;
+}
+
+/* see if the exact record already exists in a list */
+extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record)
+{
+	ListIterator itr = NULL;
+	bg_record_t *found_record = NULL;
+	int rc = 0;
+
+	if (!my_list || !bg_record)
+		return rc;
+
+	itr = list_iterator_create(my_list);
+	while ((found_record = list_next(itr))) {
+		if (bg_record == found_record) {
+			rc = 1;
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	return rc;
+}
+
+/* if looking at the main list this should have some nice
+ * block_state_mutex locks around it.
+ */
+extern bg_record_t *find_bg_record_in_list(List my_list,
+					   const char *bg_block_id)
+{
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+
+	xassert(my_list);
+
+	if (!bg_block_id)
+		return NULL;
+
+	itr = list_iterator_create(my_list);
+	while ((bg_record = list_next(itr))) {
+		if (bg_record->bg_block_id && (bg_record->magic == BLOCK_MAGIC))
+			if (!strcasecmp(bg_record->bg_block_id, bg_block_id))
+				break;
+	}
+	list_iterator_destroy(itr);
+
+	if (bg_record)
+		return bg_record;
+	else
+		return NULL;
+}
+
+/* must set the protecting mutex if any before this function is called */
+
+extern int remove_from_bg_list(List my_list, bg_record_t *bg_record)
+{
+	bg_record_t *found_record = NULL;
+	ListIterator itr;
+	int rc = SLURM_ERROR;
+
+	if (!bg_record)
+		return rc;
+
+	//slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(my_list);
+	while ((found_record = list_next(itr))) {
+		if (found_record->magic == BLOCK_MAGIC)
+			if (bg_record == found_record) {
+				list_remove(itr);
+				rc = SLURM_SUCCESS;
+				break;
+			}
+	}
+	list_iterator_destroy(itr);
+	//slurm_mutex_unlock(&block_state_mutex);
+
+	return rc;
+}
+
+/* This is here to remove from the orignal list when dealing with
+ * copies like above all locks need to be set.  This function does not
+ * free anything you must free it when you are done */
+extern bg_record_t *find_and_remove_org_from_bg_list(List my_list,
+						     bg_record_t *bg_record)
+{
+	ListIterator itr = list_iterator_create(my_list);
+	bg_record_t *found_record = NULL;
+
+	while ((found_record = list_next(itr))) {
+		if (found_record->magic != BLOCK_MAGIC)
+			continue;
+
+		/* check for full node bitmap compare */
+		if (bit_equal(bg_record->mp_bitmap, found_record->mp_bitmap)
+		    && bit_equal(bg_record->ionode_bitmap,
+				 found_record->ionode_bitmap)) {
+			if (!strcmp(bg_record->bg_block_id,
+				    found_record->bg_block_id)) {
+				list_remove(itr);
+				if (bg_conf->slurm_debug_flags
+				    & DEBUG_FLAG_SELECT_TYPE)
+					info("got the block");
+				break;
+			}
+		}
+	}
+	list_iterator_destroy(itr);
+	return found_record;
+}
+
+/* This is here to remove from the orignal list when dealing with
+ * copies like above all locks need to be set */
+extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record)
+{
+	ListIterator itr = list_iterator_create(my_list);
+	bg_record_t *found_record = NULL;
+
+	while ((found_record = list_next(itr))) {
+		if (found_record->magic != BLOCK_MAGIC)
+			continue;
+		/* check for full node bitmap compare */
+		if (bit_equal(bg_record->mp_bitmap, found_record->mp_bitmap)
+		    && bit_equal(bg_record->ionode_bitmap,
+				 found_record->ionode_bitmap)) {
+
+			if (!strcmp(bg_record->bg_block_id,
+				    found_record->bg_block_id)) {
+				if (bg_conf->slurm_debug_flags
+				    & DEBUG_FLAG_SELECT_TYPE)
+					info("got the block");
+				break;
+			}
+		}
+	}
+	list_iterator_destroy(itr);
+	return found_record;
+}
diff --git a/src/plugins/select/bluegene/bg_list_functions.h b/src/plugins/select/bluegene/bg_list_functions.h
new file mode 100644
index 000000000..ff4903d1e
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_list_functions.h
@@ -0,0 +1,64 @@
+/*****************************************************************************\
+ *  bg_list_functions.c - header for dealing with the lists that
+ *                        contain bg_records.
+ *
+ *  $Id: bg_list_functions.c 12954 2008-01-04 20:37:49Z da $
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BRIDGE_BG_LIST_FUNCTIONS_H_
+#define _BRIDGE_BG_LIST_FUNCTIONS_H_
+
+#include "src/common/read_config.h"
+#include "src/common/parse_spec.h"
+#include "src/slurmctld/proc_req.h"
+#include "src/common/list.h"
+#include "src/common/hostlist.h"
+#include "src/common/bitstring.h"
+#include "src/common/xstring.h"
+#include "src/common/xmalloc.h"
+#include "bg_structs.h"
+
+/* see if the exact record already exists in a list */
+extern bg_record_t *block_exist_in_list(List my_list, bg_record_t *bg_record);
+extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record);
+extern bg_record_t *find_bg_record_in_list(List my_list,
+					   const char *bg_block_id);
+extern int remove_from_bg_list(List my_list, bg_record_t *bg_record);
+extern bg_record_t *find_and_remove_org_from_bg_list(List my_list,
+						     bg_record_t *bg_record);
+extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record);
+
+#endif
diff --git a/src/plugins/select/bluegene/plugin/nodeinfo.c b/src/plugins/select/bluegene/bg_node_info.c
similarity index 83%
rename from src/plugins/select/bluegene/plugin/nodeinfo.c
rename to src/plugins/select/bluegene/bg_node_info.c
index e5c243380..590e81f64 100644
--- a/src/plugins/select/bluegene/plugin/nodeinfo.c
+++ b/src/plugins/select/bluegene/bg_node_info.c
@@ -1,13 +1,13 @@
 /*****************************************************************************\
- *  nodeinfo.c - functions used for the select_nodeinfo_t structure
+ *  bg_node_info.c - functions used for the select_nodeinfo_t structure
  *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,10 +37,11 @@
 \*****************************************************************************/
 
 #include "src/common/slurm_xlator.h"
-#include "nodeinfo.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "bluegene.h"
+#include "bg_core.h"
+
+static uint32_t g_bitmap_size = 0;
 
 static void _free_node_subgrp(void *object)
 {
@@ -53,7 +54,6 @@ static void _free_node_subgrp(void *object)
 	}
 }
 
-#ifdef HAVE_BG_L_P
 static node_subgrp_t *_find_subgrp(List subgrp_list, enum node_states state,
 				   uint16_t size)
 {
@@ -75,14 +75,13 @@ static node_subgrp_t *_find_subgrp(List subgrp_list, enum node_states state,
 
 	return subgrp;
 }
-#endif
 
 static int _pack_node_subgrp(node_subgrp_t *subgrp, Buf buffer,
 			     uint16_t protocol_version)
 {
 	if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		pack_bit_fmt(subgrp->bitmap, buffer);
-		pack16(subgrp->node_cnt, buffer);
+		pack16(subgrp->cnode_cnt, buffer);
 		pack16(subgrp->state, buffer);
 	}
 
@@ -115,7 +114,7 @@ static int _unpack_node_subgrp(node_subgrp_t **subgrp_pptr, Buf buffer,
 			j+=2;
 		}
 
-		safe_unpack16(&subgrp->node_cnt, buffer);
+		safe_unpack16(&subgrp->cnode_cnt, buffer);
 		safe_unpack16(&uint16_tmp, buffer);
 		subgrp->state = uint16_tmp;
 	}
@@ -127,24 +126,6 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-/* This is defined here so we can get it on non-bluegene systems since
- * it is needed in pack/unpack functions, and bluegene.c isn't
- * compiled for non-bluegene machines, and it didn't make since to
- * compile the whole file just for this one function.
- */
-extern char *give_geo(uint16_t int_geo[SYSTEM_DIMENSIONS])
-{
-	char *geo = NULL;
-	int i;
-
-	for (i=0; i<SYSTEM_DIMENSIONS; i++) {
-		if (geo)
-			xstrcat(geo, "x");
-		xstrfmtcat(geo, "%c", alpha_num[int_geo[i]]);
-	}
-	return geo;
-}
-
 extern int select_nodeinfo_pack(select_nodeinfo_t *nodeinfo, Buf buffer,
 				uint16_t protocol_version)
 {
@@ -210,14 +191,20 @@ unpack_error:
 extern select_nodeinfo_t *select_nodeinfo_alloc(uint32_t size)
 {
 	select_nodeinfo_t *nodeinfo = xmalloc(sizeof(struct select_nodeinfo));
+	//uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+
+	if (bg_conf) {
+		if (!g_bitmap_size) {
+		/* if (cluster_flags & CLUSTER_FLAG_BGQ) */
+		/* 	g_bitmap_size = bg_conf->mp_cnode_cnt; */
+		/* else */
+			g_bitmap_size = bg_conf->ionodes_per_mp;
+		}
+
+		if (!size || size == NO_VAL)
+			size = g_bitmap_size;
+	}
 
-#ifdef HAVE_BG_L_P
-	if (bg_conf && (!size || size == NO_VAL))
-		size = bg_conf->numpsets;
-#else
-	if (!size || size == NO_VAL)
-		fatal("we shouldn't be here in select_nodeinfo_alloc %u", size);
-#endif
 	nodeinfo->bitmap_size = size;
 	nodeinfo->magic = NODEINFO_MAGIC;
 	nodeinfo->subgrp_list = list_create(_free_node_subgrp);
@@ -241,16 +228,23 @@ extern int select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
 
 extern int select_nodeinfo_set_all(time_t last_query_time)
 {
-#ifdef HAVE_BG_L_P
 	ListIterator itr = NULL;
 	struct node_record *node_ptr = NULL;
 	int i=0;
 	bg_record_t *bg_record = NULL;
 	static time_t last_set_all = 0;
+	//uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
 	if (!blocks_are_created)
 		return SLURM_NO_CHANGE_IN_DATA;
 
+	if (!g_bitmap_size) {
+		/* if (cluster_flags & CLUSTER_FLAG_BGQ) */
+		/* 	g_bitmap_size = bg_conf->mp_cnode_cnt; */
+		/* else */
+			g_bitmap_size = bg_conf->ionodes_per_mp;
+	}
+
 	/* only set this once when the last_bg_update is newer than
 	   the last time we set things up. */
 	if (last_set_all && (last_bg_update-1 < last_set_all)) {
@@ -273,25 +267,26 @@ extern int select_nodeinfo_set_all(time_t last_query_time)
 		xassert(nodeinfo);
 		xassert(nodeinfo->subgrp_list);
 		list_flush(nodeinfo->subgrp_list);
-		if (nodeinfo->bitmap_size != bg_conf->numpsets)
-			nodeinfo->bitmap_size = bg_conf->numpsets;
+		if (nodeinfo->bitmap_size != g_bitmap_size)
+			nodeinfo->bitmap_size = g_bitmap_size;
 	}
 	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = list_next(itr))) {
 		enum node_states state = NODE_STATE_UNKNOWN;
 		node_subgrp_t *subgrp = NULL;
 		select_nodeinfo_t *nodeinfo;
+		bitstr_t *bitmap;
 
 		/* Only mark unidle blocks */
 		if (bg_record->job_running == NO_JOB_RUNNING)
 			continue;
 
-		if (bg_record->state == RM_PARTITION_ERROR)
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG)
 			state = NODE_STATE_ERROR;
 		else if (bg_record->job_running > NO_JOB_RUNNING) {
 			/* we don't need to set the allocated here
 			 * since the whole midplane is allocated */
-			if (bg_record->conn_type < SELECT_SMALL)
+			if (bg_record->conn_type[0] < SELECT_SMALL)
 				continue;
 			state = NODE_STATE_ALLOCATED;
 		} else {
@@ -300,9 +295,14 @@ extern int select_nodeinfo_set_all(time_t last_query_time)
 			      bg_block_state_string(bg_record->state));
 			continue;
 		}
-
-		for(i=0; i<node_record_count; i++) {
-			if (!bit_test(bg_record->bitmap, i))
+		/* if ((cluster_flags & CLUSTER_FLAG_BGQ) */
+		/*     && (state != NODE_STATE_ERROR)) */
+		/* 	bitmap = bg_record->cnodes_used_bitmap; */
+		/* else */
+			bitmap = bg_record->ionode_bitmap;
+
+		for (i=0; i<node_record_count; i++) {
+			if (!bit_test(bg_record->mp_bitmap, i))
 				continue;
 			node_ptr = &(node_record_table_ptr[i]);
 
@@ -311,20 +311,25 @@ extern int select_nodeinfo_set_all(time_t last_query_time)
 			xassert(nodeinfo);
 			xassert(nodeinfo->subgrp_list);
 
-			subgrp = _find_subgrp(
-				nodeinfo->subgrp_list,
-				state, bg_conf->numpsets);
-
-			if (subgrp->node_cnt < bg_conf->bp_node_cnt) {
-				if (bg_record->node_cnt
-				    < bg_conf->bp_node_cnt) {
-					bit_or(subgrp->bitmap,
-					       bg_record->ionode_bitmap);
-					subgrp->node_cnt += bg_record->node_cnt;
+			subgrp = _find_subgrp(nodeinfo->subgrp_list,
+					      state, g_bitmap_size);
+
+			if (subgrp->cnode_cnt < bg_conf->mp_cnode_cnt) {
+				/* if (cluster_flags & CLUSTER_FLAG_BGQ) { */
+				/* 	bit_or(subgrp->bitmap, bitmap); */
+				/* 	subgrp->cnode_cnt += */
+				/* 		bit_set_count(bitmap); */
+				/* } else */ if (bg_record->cnode_cnt
+					   < bg_conf->mp_cnode_cnt) {
+					bit_or(subgrp->bitmap, bitmap);
+					subgrp->cnode_cnt +=
+						bg_record->cnode_cnt;
 				} else {
 					bit_nset(subgrp->bitmap,
-						 0, (bg_conf->numpsets-1));
-					subgrp->node_cnt = bg_conf->bp_node_cnt;
+						 0,
+						 (g_bitmap_size-1));
+					subgrp->cnode_cnt =
+						bg_conf->mp_cnode_cnt;
 				}
 			}
 		}
@@ -333,9 +338,6 @@ extern int select_nodeinfo_set_all(time_t last_query_time)
 	slurm_mutex_unlock(&block_state_mutex);
 
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
@@ -377,7 +379,7 @@ extern int select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
 		itr = list_iterator_create(nodeinfo->subgrp_list);
 		while ((subgrp = list_next(itr))) {
 			if (subgrp->state == state) {
-				*uint16 = subgrp->node_cnt;
+				*uint16 = subgrp->cnode_cnt;
 				break;
 			}
 		}
diff --git a/src/plugins/select/bluegene/plugin/nodeinfo.h b/src/plugins/select/bluegene/bg_node_info.h
similarity index 91%
rename from src/plugins/select/bluegene/plugin/nodeinfo.h
rename to src/plugins/select/bluegene/bg_node_info.h
index bcee05de1..2cf1912d9 100644
--- a/src/plugins/select/bluegene/plugin/nodeinfo.h
+++ b/src/plugins/select/bluegene/bg_node_info.h
@@ -1,14 +1,14 @@
 /*****************************************************************************\
- *  nodeinfo.h - definitions of functions used for the select_nodeinfo_t
+ *  bg_node_info.h - definitions of functions used for the select_nodeinfo_t
  *              structure
  *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,24 +41,24 @@
 #define _HAVE_SELECT_NODEINFO_H
 
 #include "src/common/node_select.h"
+#include "ba_common.h"
 #define NODEINFO_MAGIC 0x85ac
 
 typedef struct {
 	bitstr_t *bitmap;
+	uint16_t cnode_cnt;
 	int *inx;
-	uint16_t node_cnt;
 	enum node_states state;
 	char *str;
 } node_subgrp_t;
 
 struct select_nodeinfo {
+	ba_mp_t *ba_mp;
 	uint16_t bitmap_size;
 	uint16_t magic;		/* magic number */
 	List subgrp_list;
 };
 
-extern char *give_geo(uint16_t int_geo[SYSTEM_DIMENSIONS]);
-
 extern int select_nodeinfo_pack(select_nodeinfo_t *nodeinfo, Buf buffer,
 				uint16_t protocol_version);
 
diff --git a/src/plugins/select/bluegene/bg_read_config.c b/src/plugins/select/bluegene/bg_read_config.c
new file mode 100644
index 000000000..833f2542b
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_read_config.c
@@ -0,0 +1,851 @@
+/*****************************************************************************\
+ *  bg_read_config.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/slurm_xlator.h"	/* Must be first */
+#include "bg_core.h"
+#include "bg_read_config.h"
+#include "src/common/node_select.h"
+#include "src/common/xstring.h"
+#include "src/common/uid.h"
+#include "src/common/proc_args.h"
+
+#include <stdlib.h>
+
+static s_p_options_t bg_conf_file_options[] = {
+#ifdef HAVE_BGL
+	{"BlrtsImage", S_P_STRING},
+	{"LinuxImage", S_P_STRING},
+	{"RamDiskImage", S_P_STRING},
+	{"AltBlrtsImage", S_P_ARRAY, parse_image, NULL},
+	{"AltLinuxImage", S_P_ARRAY, parse_image, NULL},
+	{"AltRamDiskImage", S_P_ARRAY, parse_image, NULL},
+#elif defined HAVE_BGP
+	{"CnloadImage", S_P_STRING},
+	{"IoloadImage", S_P_STRING},
+	{"AltCnloadImage", S_P_ARRAY, parse_image, NULL},
+	{"AltIoloadImage", S_P_ARRAY, parse_image, NULL},
+#endif
+	{"DenyPassthrough", S_P_STRING},
+	{"LayoutMode", S_P_STRING},
+	{"MloaderImage", S_P_STRING},
+	{"BridgeAPILogFile", S_P_STRING},
+	{"BridgeAPIVerbose", S_P_UINT16},
+	{"BasePartitionNodeCnt", S_P_UINT16},
+	{"NodeCardNodeCnt", S_P_UINT16},
+	{"Numpsets", S_P_UINT16},
+	{"IONodesPerMP", S_P_UINT16},
+	{"BPs", S_P_ARRAY, parse_blockreq, destroy_select_ba_request},
+	{"MPs", S_P_ARRAY, parse_blockreq, destroy_select_ba_request},
+	/* these are just going to be put into a list that will be
+	   freed later don't free them after reading them */
+	{"AltMloaderImage", S_P_ARRAY, parse_image, NULL},
+	{NULL}
+};
+
+static int _reopen_bridge_log(void)
+{
+	int rc = SLURM_SUCCESS;
+
+	if (bg_conf->bridge_api_file == NULL)
+		return rc;
+
+#if defined HAVE_BG_FILES
+	rc = bridge_set_log_params(bg_conf->bridge_api_file,
+				   bg_conf->bridge_api_verb);
+#endif
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("Bridge api file set to %s, verbose level %d",
+		     bg_conf->bridge_api_file, bg_conf->bridge_api_verb);
+	return rc;
+}
+
+static char *_get_bg_conf(void)
+{
+	char *val = getenv("SLURM_CONF");
+	char *rc = NULL;
+	int i;
+
+	if (!val)
+		return xstrdup(BLUEGENE_CONFIG_FILE);
+
+	/* Replace file name on end of path */
+	i = strlen(val) - strlen("slurm.conf") + strlen("bluegene.conf") + 1;
+	rc = xmalloc(i);
+	strcpy(rc, val);
+	val = strrchr(rc, (int)'/');
+	if (val)	/* absolute path */
+		val++;
+	else		/* not absolute path */
+		val = rc;
+	strcpy(val, "bluegene.conf");
+	return rc;
+}
+
+static void _destroy_bitmap(void *object)
+{
+	bitstr_t *bitstr = (bitstr_t *)object;
+
+	if (bitstr) {
+		FREE_NULL_BITMAP(bitstr);
+	}
+}
+
+extern void destroy_image_group_list(void *ptr)
+{
+	image_group_t *image_group = (image_group_t *)ptr;
+	if (image_group) {
+		xfree(image_group->name);
+		xfree(image_group);
+	}
+}
+
+extern void destroy_image(void *ptr)
+{
+	image_t *n = (image_t *)ptr;
+	if (n) {
+		xfree(n->name);
+		if (n->groups) {
+			list_destroy(n->groups);
+			n->groups = NULL;
+		}
+		xfree(n);
+	}
+}
+
+extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
+			  const char *key, const char *value,
+			  const char *line, char **leftover)
+{
+	s_p_options_t block_options[] = {
+		{"Type", S_P_STRING},
+		{"32CNBlocks", S_P_UINT16},
+		{"128CNBlocks", S_P_UINT16},
+#ifdef HAVE_BGL
+		{"Nodecards", S_P_UINT16},
+		{"Quarters", S_P_UINT16},
+		{"BlrtsImage", S_P_STRING},
+		{"LinuxImage", S_P_STRING},
+		{"RamDiskImage", S_P_STRING},
+#else
+		{"16CNBlocks", S_P_UINT16},
+		{"64CNBlocks", S_P_UINT16},
+		{"256CNBlocks", S_P_UINT16},
+		{"CnloadImage", S_P_STRING},
+		{"IoloadImage", S_P_STRING},
+#endif
+		{"MloaderImage", S_P_STRING},
+		{NULL}
+	};
+	s_p_hashtbl_t *tbl;
+	char *tmp = NULL;
+	select_ba_request_t *n = NULL;
+	hostlist_t hl = NULL;
+
+	tbl = s_p_hashtbl_create(block_options);
+	s_p_parse_line(tbl, *leftover, leftover);
+	if (!value) {
+		return 0;
+	}
+	n = xmalloc(sizeof(select_ba_request_t));
+	hl = hostlist_create(value);
+	n->save_name = hostlist_ranged_string_xmalloc(hl);
+	hostlist_destroy(hl);
+#ifdef HAVE_BGL
+	s_p_get_string(&n->blrtsimage, "BlrtsImage", tbl);
+	s_p_get_string(&n->linuximage, "LinuxImage", tbl);
+	s_p_get_string(&n->ramdiskimage, "RamDiskImage", tbl);
+#else
+	s_p_get_string(&n->linuximage, "CnloadImage", tbl);
+	s_p_get_string(&n->ramdiskimage, "IoloadImage", tbl);
+#endif
+	s_p_get_string(&n->mloaderimage, "MloaderImage", tbl);
+
+	s_p_get_string(&tmp, "Type", tbl);
+	if (tmp) {
+		verify_conn_type(tmp, n->conn_type);
+		xfree(tmp);
+	}
+
+	if (!s_p_get_uint16(&n->small32, "32CNBlocks", tbl)) {
+#ifdef HAVE_BGL
+		s_p_get_uint16(&n->small32, "Nodecards", tbl);
+#else
+		;
+#endif
+	}
+	if (!s_p_get_uint16(&n->small128, "128CNBlocks", tbl)) {
+#ifdef HAVE_BGL
+		s_p_get_uint16(&n->small128, "Quarters", tbl);
+#else
+		;
+#endif
+	}
+
+#ifndef HAVE_BGL
+	s_p_get_uint16(&n->small16, "16CNBlocks", tbl);
+	s_p_get_uint16(&n->small64, "64CNBlocks", tbl);
+	s_p_get_uint16(&n->small256, "256CNBlocks", tbl);
+#endif
+	if (n->small16 || n->small32 || n->small64
+	    || n->small128 || n->small256) {
+		if (n->conn_type[0] < SELECT_SMALL) {
+			error("Block def on midplane(s) %s is "
+			      "asking for small blocks but given "
+			      "TYPE=%s, setting it to Small",
+			      n->save_name, conn_type_string(n->conn_type[0]));
+			n->conn_type[0] = SELECT_SMALL;
+		}
+	} else {
+		if (n->conn_type[0] == (uint16_t)NO_VAL) {
+			n->conn_type[0] = SELECT_TORUS;
+		} else if (n->conn_type[0] >= SELECT_SMALL) {
+			error("Block def on midplane(s) %s is given "
+			      "TYPE=%s but isn't asking for any small "
+			      "blocks.  Giving it Torus.",
+			      n->save_name, conn_type_string(n->conn_type[0]));
+			n->conn_type[0] = SELECT_TORUS;
+		}
+#ifndef HAVE_BG_L_P
+		int i;
+		int first_conn_type = n->conn_type[0];
+
+		for (i=1; i<SYSTEM_DIMENSIONS; i++) {
+			if (n->conn_type[i] == (uint16_t)NO_VAL)
+				n->conn_type[i] = first_conn_type;
+			else if (n->conn_type[i] >= SELECT_SMALL) {
+				error("Block def on midplane(s) %s dim %d "
+				      "is given TYPE=%s but isn't asking "
+				      "for any small blocks.  Giving it %s.",
+				      n->save_name, i,
+				      conn_type_string(n->conn_type[i]),
+				      conn_type_string(first_conn_type));
+				n->conn_type[1] = first_conn_type;
+			}
+		}
+#endif
+	}
+	s_p_hashtbl_destroy(tbl);
+
+	*dest = (void *)n;
+	return 1;
+}
+
+extern int parse_image(void **dest, slurm_parser_enum_t type,
+		       const char *key, const char *value,
+		       const char *line, char **leftover)
+{
+	s_p_options_t image_options[] = {
+		{(char *)"GROUPS", S_P_STRING},
+		{NULL}
+	};
+	s_p_hashtbl_t *tbl = NULL;
+	char *tmp = NULL;
+	image_t *n = NULL;
+	image_group_t *image_group = NULL;
+	int i = 0, j = 0;
+
+	tbl = s_p_hashtbl_create(image_options);
+	s_p_parse_line(tbl, *leftover, leftover);
+
+	n = (image_t *)xmalloc(sizeof(image_t));
+	n->name = xstrdup(value);
+	n->def = false;
+	n->groups = list_create(destroy_image_group_list);
+	s_p_get_string(&tmp, "Groups", tbl);
+	if (tmp) {
+		for(i=0; i<(int)strlen(tmp); i++) {
+			if ((tmp[i] == ':') || (tmp[i] == ',')) {
+				image_group = (image_group_t *)
+					xmalloc(sizeof(image_group_t));
+				image_group->name = (char *)xmalloc(i-j+2);
+				snprintf(image_group->name,
+					 (i-j)+1, "%s", tmp+j);
+				gid_from_string (image_group->name,
+						 &image_group->gid);
+				list_append(n->groups, image_group);
+				j=i;
+				j++;
+			}
+		}
+		if (j != i) {
+			image_group = (image_group_t *)
+				xmalloc(sizeof(image_group_t));
+			image_group->name = (char *)xmalloc(i-j+2);
+			snprintf(image_group->name, (i-j)+1, "%s", tmp+j);
+			if (gid_from_string (image_group->name,
+			                     &image_group->gid) < 0)
+				fatal("Invalid bluegene.conf parameter "
+				      "Groups=%s",
+				      image_group->name);
+			list_append(n->groups, image_group);
+		}
+		xfree(tmp);
+	}
+	s_p_hashtbl_destroy(tbl);
+
+	*dest = (void *)n;
+	return 1;
+}
+
+/*
+ * Read and process the bluegene.conf configuration file so to interpret what
+ * blocks are static/dynamic, torus/mesh, etc.
+ */
+extern int read_bg_conf(void)
+{
+	int i;
+	int count = 0;
+	s_p_hashtbl_t *tbl = NULL;
+	char *layout = NULL;
+	select_ba_request_t **blockreq_array = NULL;
+	image_t **image_array = NULL;
+	image_t *image = NULL;
+	static time_t last_config_update = (time_t) 0;
+	struct stat config_stat;
+	ListIterator itr = NULL;
+	char* bg_conf_file = NULL;
+	static int *dims = NULL;
+
+	if (!dims)
+		dims = select_g_ba_get_dims();
+
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("Reading the bluegene.conf file");
+
+	/* check if config file has changed */
+	bg_conf_file = _get_bg_conf();
+
+	if (stat(bg_conf_file, &config_stat) < 0)
+		fatal("can't stat bluegene.conf file %s: %m", bg_conf_file);
+	if (last_config_update) {
+		_reopen_bridge_log();
+		if (last_config_update == config_stat.st_mtime) {
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+				info("%s unchanged", bg_conf_file);
+		} else {
+			info("Restart slurmctld for %s changes "
+			     "to take effect",
+			     bg_conf_file);
+		}
+		last_config_update = config_stat.st_mtime;
+		xfree(bg_conf_file);
+		return SLURM_SUCCESS;
+	}
+	last_config_update = config_stat.st_mtime;
+
+	/* initialization */
+	/* bg_conf defined in bg_node_alloc.h */
+	if (!(tbl = config_make_tbl(bg_conf_file)))
+		fatal("something wrong with opening/reading bluegene "
+		      "conf file");
+	xfree(bg_conf_file);
+
+#ifdef HAVE_BGL
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltBlrtsImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->blrts_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_blrtsimage, "BlrtsImage", tbl)) {
+		if (!list_count(bg_conf->blrts_list))
+			fatal("BlrtsImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->blrts_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_blrtsimage = xstrdup(image->name);
+		info("Warning: using %s as the default BlrtsImage.  "
+		     "If this isn't correct please set BlrtsImage",
+		     bg_conf->default_blrtsimage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default BlrtsImage %s",
+			     bg_conf->default_blrtsimage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_blrtsimage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->blrts_list, image);
+	}
+
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltLinuxImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->linux_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_linuximage, "LinuxImage", tbl)) {
+		if (!list_count(bg_conf->linux_list))
+			fatal("LinuxImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->linux_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_linuximage = xstrdup(image->name);
+		info("Warning: using %s as the default LinuxImage.  "
+		     "If this isn't correct please set LinuxImage",
+		     bg_conf->default_linuximage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default LinuxImage %s",
+			     bg_conf->default_linuximage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_linuximage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->linux_list, image);
+	}
+
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltRamDiskImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->ramdisk_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
+			    "RamDiskImage", tbl)) {
+		if (!list_count(bg_conf->ramdisk_list))
+			fatal("RamDiskImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->ramdisk_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_ramdiskimage = xstrdup(image->name);
+		info("Warning: using %s as the default RamDiskImage.  "
+		     "If this isn't correct please set RamDiskImage",
+		     bg_conf->default_ramdiskimage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default RamDiskImage %s",
+			     bg_conf->default_ramdiskimage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_ramdiskimage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->ramdisk_list, image);
+	}
+#elif defined HAVE_BGP
+
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltCnloadImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->linux_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_linuximage, "CnloadImage", tbl)) {
+		if (!list_count(bg_conf->linux_list))
+			fatal("CnloadImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->linux_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_linuximage = xstrdup(image->name);
+		info("Warning: using %s as the default CnloadImage.  "
+		     "If this isn't correct please set CnloadImage",
+		     bg_conf->default_linuximage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default CnloadImage %s",
+			     bg_conf->default_linuximage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_linuximage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->linux_list, image);
+	}
+
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltIoloadImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->ramdisk_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
+			    "IoloadImage", tbl)) {
+		if (!list_count(bg_conf->ramdisk_list))
+			fatal("IoloadImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->ramdisk_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_ramdiskimage = xstrdup(image->name);
+		info("Warning: using %s as the default IoloadImage.  "
+		     "If this isn't correct please set IoloadImage",
+		     bg_conf->default_ramdiskimage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default IoloadImage %s",
+			     bg_conf->default_ramdiskimage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_ramdiskimage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->ramdisk_list, image);
+	}
+
+#endif
+	if (s_p_get_array((void ***)&image_array,
+			  &count, "AltMloaderImage", tbl)) {
+		for (i = 0; i < count; i++) {
+			list_append(bg_conf->mloader_list, image_array[i]);
+			image_array[i] = NULL;
+		}
+	}
+	if (!s_p_get_string(&bg_conf->default_mloaderimage,
+			    "MloaderImage", tbl)) {
+		if (!list_count(bg_conf->mloader_list))
+			fatal("MloaderImage not configured "
+			      "in bluegene.conf");
+		itr = list_iterator_create(bg_conf->mloader_list);
+		image = list_next(itr);
+		image->def = true;
+		list_iterator_destroy(itr);
+		bg_conf->default_mloaderimage = xstrdup(image->name);
+		info("Warning: using %s as the default MloaderImage.  "
+		     "If this isn't correct please set MloaderImage",
+		     bg_conf->default_mloaderimage);
+	} else {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("default MloaderImage %s",
+			     bg_conf->default_mloaderimage);
+		image = xmalloc(sizeof(image_t));
+		image->name = xstrdup(bg_conf->default_mloaderimage);
+		image->def = true;
+		image->groups = NULL;
+		/* we want it to be first */
+		list_push(bg_conf->mloader_list, image);
+	}
+
+	if (!s_p_get_uint16(
+		    &bg_conf->mp_cnode_cnt, "BasePartitionNodeCnt", tbl)) {
+		error("BasePartitionNodeCnt not configured in bluegene.conf "
+		      "defaulting to 512 as BasePartitionNodeCnt");
+		bg_conf->mp_cnode_cnt = 512;
+		bg_conf->quarter_cnode_cnt = 128;
+	} else {
+		if (bg_conf->mp_cnode_cnt <= 0)
+			fatal("You should have more than 0 nodes "
+			      "per base partition");
+
+		bg_conf->quarter_cnode_cnt = bg_conf->mp_cnode_cnt/4;
+	}
+	/* bg_conf->cpus_per_mp should had already been set from the
+	 * node_init */
+	if (bg_conf->cpus_per_mp < bg_conf->mp_cnode_cnt) {
+		fatal("For some reason we have only %u cpus per mp, but "
+		      "have %u cnodes per mp.  You need at least the same "
+		      "number of cpus as you have cnodes per mp.  "
+		      "Check the NodeName Procs= "
+		      "definition in the slurm.conf.",
+		      bg_conf->cpus_per_mp, bg_conf->mp_cnode_cnt);
+	}
+
+	bg_conf->cpu_ratio = bg_conf->cpus_per_mp/bg_conf->mp_cnode_cnt;
+	if (!bg_conf->cpu_ratio)
+		fatal("We appear to have less than 1 cpu on a cnode.  "
+		      "You specified %u for BasePartitionNodeCnt "
+		      "in the blugene.conf and %u cpus "
+		      "for each node in the slurm.conf",
+		      bg_conf->mp_cnode_cnt, bg_conf->cpus_per_mp);
+
+	num_unused_cpus = 1;
+	for (i = 0; i<SYSTEM_DIMENSIONS; i++)
+		num_unused_cpus *= dims[i];
+	num_unused_cpus *= bg_conf->cpus_per_mp;
+
+	if (!s_p_get_uint16(
+		    &bg_conf->nodecard_cnode_cnt, "NodeCardNodeCnt", tbl)) {
+		error("NodeCardNodeCnt not configured in bluegene.conf "
+		      "defaulting to 32 as NodeCardNodeCnt");
+		bg_conf->nodecard_cnode_cnt = 32;
+	}
+
+	if (bg_conf->nodecard_cnode_cnt<=0)
+		fatal("You should have more than 0 nodes per nodecard");
+
+	bg_conf->mp_nodecard_cnt =
+		bg_conf->mp_cnode_cnt / bg_conf->nodecard_cnode_cnt;
+
+	if (!s_p_get_uint16(&bg_conf->ionodes_per_mp, "Numpsets", tbl))
+		fatal("Warning: Numpsets not configured in bluegene.conf");
+	if (!bg_conf->ionodes_per_mp) {
+		if (!s_p_get_uint16(&bg_conf->ionodes_per_mp,
+				    "IONodesPerMP", tbl))
+			fatal("Warning: IONodesPerMP not configured "
+			      "in bluegene.conf");
+	}
+
+#ifdef HAVE_BGQ
+	/* You can only have 16 ionodes per midplane */
+	if (bg_conf->ionodes_per_mp > bg_conf->mp_nodecard_cnt)
+		bg_conf->ionodes_per_mp = bg_conf->mp_nodecard_cnt;
+#endif
+
+	if (bg_conf->ionodes_per_mp) {
+		bitstr_t *tmp_bitmap = NULL;
+		int small_size = 1;
+
+		/* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK */
+		if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt) {
+#ifdef HAVE_BGQ
+			bg_conf->quarter_ionode_cnt = 1;
+			bg_conf->nodecard_ionode_cnt = 1;
+#else
+			bg_conf->quarter_ionode_cnt = 2;
+			bg_conf->nodecard_ionode_cnt = 2;
+#endif
+		} else {
+			bg_conf->quarter_ionode_cnt = bg_conf->ionodes_per_mp/4;
+			bg_conf->nodecard_ionode_cnt =
+				bg_conf->quarter_ionode_cnt/4;
+		}
+
+		/* How many nodecards per ionode */
+		bg_conf->nc_ratio =
+			((double)bg_conf->mp_cnode_cnt
+			 / (double)bg_conf->nodecard_cnode_cnt)
+			/ (double)bg_conf->ionodes_per_mp;
+		/* How many ionodes per nodecard */
+		bg_conf->io_ratio =
+			(double)bg_conf->ionodes_per_mp /
+			((double)bg_conf->mp_cnode_cnt
+			 / (double)bg_conf->nodecard_cnode_cnt);
+
+		/* How many cnodes per ionode */
+		bg_conf->ionode_cnode_cnt =
+			bg_conf->nodecard_cnode_cnt * bg_conf->nc_ratio;
+
+		//info("got %f %f", bg_conf->nc_ratio, bg_conf->io_ratio);
+		/* figure out the smallest block we can have on the
+		   system */
+#ifdef HAVE_BGL
+		if (bg_conf->io_ratio >= 1)
+			bg_conf->smallest_block=32;
+		else
+			bg_conf->smallest_block=128;
+#else
+
+		if (bg_conf->io_ratio >= 2)
+			bg_conf->smallest_block=16;
+		else if (bg_conf->io_ratio == 1)
+			bg_conf->smallest_block=32;
+		else if (bg_conf->io_ratio == .5)
+			bg_conf->smallest_block=64;
+		else if (bg_conf->io_ratio == .25)
+			bg_conf->smallest_block=128;
+		else if (bg_conf->io_ratio == .125)
+			bg_conf->smallest_block=256;
+		else {
+			error("unknown ioratio %f.  Can't figure out "
+			      "smallest block size, setting it to midplane",
+			      bg_conf->io_ratio);
+			bg_conf->smallest_block = 512;
+		}
+#endif
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("Smallest block possible on this system is %u",
+			     bg_conf->smallest_block);
+		/* below we are creating all the possible bitmaps for
+		 * each size of small block
+		 */
+		if ((int)bg_conf->nodecard_ionode_cnt < 1) {
+			bg_conf->nodecard_ionode_cnt = 0;
+		} else {
+			bg_lists->valid_small32 = list_create(_destroy_bitmap);
+			/* This is suppose to be = and not ==, we only
+			   want to decrement when small_size equals
+			   something.
+			*/
+			if ((small_size = bg_conf->nodecard_ionode_cnt))
+				small_size--;
+			i = 0;
+			while (i<bg_conf->ionodes_per_mp) {
+				tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+				bit_nset(tmp_bitmap, i, i+small_size);
+				i += small_size+1;
+				list_append(bg_lists->valid_small32,
+					    tmp_bitmap);
+			}
+		}
+		/* If we only have 1 nodecard just jump to the end
+		   since this will never need to happen below.
+		   Pretty much a hack to avoid seg fault;). */
+		if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)
+			goto no_calc;
+
+		bg_lists->valid_small128 = list_create(_destroy_bitmap);
+		if ((small_size = bg_conf->quarter_ionode_cnt))
+			small_size--;
+		i = 0;
+		while (i<bg_conf->ionodes_per_mp) {
+			tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+			bit_nset(tmp_bitmap, i, i+small_size);
+			i += small_size+1;
+			list_append(bg_lists->valid_small128, tmp_bitmap);
+		}
+
+#ifndef HAVE_BGL
+		bg_lists->valid_small64 = list_create(_destroy_bitmap);
+		if ((small_size = bg_conf->nodecard_ionode_cnt * 2))
+			small_size--;
+		i = 0;
+		while (i<bg_conf->ionodes_per_mp) {
+			tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+			bit_nset(tmp_bitmap, i, i+small_size);
+			i += small_size+1;
+			list_append(bg_lists->valid_small64, tmp_bitmap);
+		}
+
+		bg_lists->valid_small256 = list_create(_destroy_bitmap);
+		if ((small_size = bg_conf->quarter_ionode_cnt * 2))
+			small_size--;
+		i = 0;
+		while (i<bg_conf->ionodes_per_mp) {
+			tmp_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+			bit_nset(tmp_bitmap, i, i+small_size);
+			i += small_size+1;
+			list_append(bg_lists->valid_small256, tmp_bitmap);
+		}
+#endif
+	} else {
+		fatal("your ionodes_per_mp is 0");
+	}
+
+no_calc:
+
+	if (!s_p_get_uint16(&bg_conf->bridge_api_verb, "BridgeAPIVerbose", tbl))
+		info("Warning: BridgeAPIVerbose not configured "
+		     "in bluegene.conf");
+	if (!s_p_get_string(&bg_conf->bridge_api_file,
+			    "BridgeAPILogFile", tbl))
+		info("BridgeAPILogFile not configured in bluegene.conf");
+	else
+		_reopen_bridge_log();
+
+	if (s_p_get_string(&layout, "DenyPassthrough", tbl)) {
+		if (strstr(layout, "A"))
+			ba_deny_pass |= PASS_DENY_A;
+		if (strstr(layout, "X"))
+			ba_deny_pass |= PASS_DENY_X;
+		if (strstr(layout, "Y"))
+			ba_deny_pass |= PASS_DENY_Y;
+		if (strstr(layout, "Z"))
+			ba_deny_pass |= PASS_DENY_Z;
+		if (!strcasecmp(layout, "ALL"))
+			ba_deny_pass |= PASS_DENY_ALL;
+		bg_conf->deny_pass = ba_deny_pass;
+		xfree(layout);
+	}
+
+	if (!s_p_get_string(&layout, "LayoutMode", tbl)) {
+		info("Warning: LayoutMode was not specified in bluegene.conf "
+		     "defaulting to STATIC partitioning");
+		bg_conf->layout_mode = LAYOUT_STATIC;
+	} else {
+		if (!strcasecmp(layout,"STATIC"))
+			bg_conf->layout_mode = LAYOUT_STATIC;
+		else if (!strcasecmp(layout,"OVERLAP"))
+			bg_conf->layout_mode = LAYOUT_OVERLAP;
+		else if (!strcasecmp(layout,"DYNAMIC"))
+			bg_conf->layout_mode = LAYOUT_DYNAMIC;
+		else {
+			fatal("I don't understand this LayoutMode = %s",
+			      layout);
+		}
+		xfree(layout);
+	}
+
+	/* add blocks defined in file */
+	if (bg_conf->layout_mode != LAYOUT_DYNAMIC) {
+		if (!s_p_get_array((void ***)&blockreq_array,
+				   &count, "BPs", tbl)) {
+			info("WARNING: no blocks defined in bluegene.conf, "
+			     "only making full system block");
+			if (bg_conf->mp_cnode_cnt
+			    == bg_conf->nodecard_cnode_cnt)
+				fatal("On a sub-midplane system you need to "
+				      "define the blocks you want on your "
+				      "system.");
+			/* create_full_system_block(NULL); */
+		}
+
+		for (i = 0; i < count; i++) {
+			add_bg_record(bg_lists->main, NULL,
+				      blockreq_array[i], 0, 0);
+		}
+	} else if (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)
+		/* we can't do dynamic here on a sub-midplane system */
+		fatal("On a sub-midplane system we can only do OVERLAP or "
+		      "STATIC LayoutMode.  Please update your bluegene.conf.");
+
+	s_p_hashtbl_destroy(tbl);
+
+	return SLURM_SUCCESS;
+}
+
+extern s_p_hashtbl_t *config_make_tbl(char *filename)
+{
+	s_p_hashtbl_t *tbl = NULL;
+
+	xassert(filename);
+
+	tbl = s_p_hashtbl_create(bg_conf_file_options);
+
+	if (s_p_parse_file(tbl, NULL, filename, false) == SLURM_ERROR) {
+		s_p_hashtbl_destroy(tbl);
+		tbl = NULL;
+	}
+
+	return tbl;
+}
diff --git a/src/plugins/select/bluegene/bg_read_config.h b/src/plugins/select/bluegene/bg_read_config.h
new file mode 100644
index 000000000..041d93d11
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_read_config.h
@@ -0,0 +1,79 @@
+/*****************************************************************************\
+ *  bg_read_config.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BG_READ_CONFIG_H_
+#define _BG_READ_CONFIG_H_
+
+#include "src/common/list.h"
+#include "src/common/xmalloc.h"
+#include "src/common/read_config.h"
+#include "src/common/parse_spec.h"
+
+/* structure filled in from reading bluegene.conf file for specifying
+ * images */
+typedef struct {
+	bool def;                      /* Whether image is the default
+					  image or not */
+	List groups;                   /* list of groups able to use
+					* the image contains
+					* image_group_t's */
+	char *name;                    /* Name of image */
+} image_t;
+
+typedef struct {
+	char *name;
+	gid_t gid;
+} image_group_t;
+
+extern void destroy_image_group_list(void *ptr);
+extern void destroy_image(void *ptr);
+
+/* Parse a block request from the bluegene.conf file */
+extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
+			  const char *key, const char *value,
+			  const char *line, char **leftover);
+
+/* Parse imagine information from blugene.conf file */
+extern int parse_image(void **dest, slurm_parser_enum_t type,
+		       const char *key, const char *value,
+		       const char *line, char **leftover);
+
+extern int read_bg_conf(void);
+extern s_p_hashtbl_t *config_make_tbl(char *filename);
+
+#endif
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/bg_record_functions.c
similarity index 63%
rename from src/plugins/select/bluegene/plugin/bg_record_functions.c
rename to src/plugins/select/bluegene/bg_record_functions.c
index ee355f788..64b104507 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.c
+++ b/src/plugins/select/bluegene/bg_record_functions.c
@@ -8,7 +8,7 @@
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,8 +37,8 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "bluegene.h"
-#include "dynamic_block.h"
+#include "bg_core.h"
+#include "bg_dynamic_block.h"
 
 #include "src/common/uid.h"
 #include "src/common/slurm_accounting_storage.h"
@@ -47,8 +47,9 @@
 
 /* some local functions */
 static int _set_block_nodes_accounting(bg_record_t *bg_record, char *reason);
-static int _addto_node_list(bg_record_t *bg_record, int *start, int *end);
-static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b);
+static void _addto_mp_list(bg_record_t *bg_record,
+			   uint16_t *start, uint16_t *end);
+static int _ba_mp_cmpf_inc(ba_mp_t *node_a, ba_mp_t *node_b);
 
 extern void print_bg_record(bg_record_t* bg_record)
 {
@@ -60,20 +61,20 @@ extern void print_bg_record(bg_record_t* bg_record)
 	info(" bg_record: ");
 	if (bg_record->bg_block_id)
 		info("\tbg_block_id: %s", bg_record->bg_block_id);
-	info("\tnodes: %s", bg_record->nodes);
-	info("\tsize: %d BPs %u Nodes %d cpus",
-	     bg_record->bp_count,
-	     bg_record->node_cnt,
+	info("\tnodes: %s", bg_record->mp_str);
+	info("\tsize: %d MPs %u Nodes %d cpus",
+	     bg_record->mp_count,
+	     bg_record->cnode_cnt,
 	     bg_record->cpu_cnt);
 	info("\tgeo: %ux%ux%u", bg_record->geo[X], bg_record->geo[Y],
 	     bg_record->geo[Z]);
-	info("\tconn_type: %s", conn_type_string(bg_record->conn_type));
+	info("\tconn_type: %s", conn_type_string(bg_record->conn_type[0]));
 #ifdef HAVE_BGL
 	info("\tnode_use: %s", node_use_string(bg_record->node_use));
 #endif
-	if (bg_record->bitmap) {
+	if (bg_record->mp_bitmap) {
 		char bitstring[BITSIZE];
-		bit_fmt(bitstring, BITSIZE, bg_record->bitmap);
+		bit_fmt(bitstring, BITSIZE, bg_record->mp_bitmap);
 		info("\tbitmap: %s", bitstring);
 	}
 #else
@@ -82,7 +83,7 @@ extern void print_bg_record(bg_record_t* bg_record)
 		format_node_name(bg_record, tmp_char, sizeof(tmp_char));
 		info("Record: BlockID:%s Nodes:%s Conn:%s",
 		     bg_record->bg_block_id, tmp_char,
-		     conn_type_string(bg_record->conn_type));
+		     conn_type_string(bg_record->conn_type[0]));
 	}
 #endif
 }
@@ -93,286 +94,196 @@ extern void destroy_bg_record(void *object)
 
 	if (bg_record) {
 		bg_record->magic = 0;
-		xfree(bg_record->bg_block_id);
-		xfree(bg_record->nodes);
-		xfree(bg_record->ionodes);
-		xfree(bg_record->user_name);
-		xfree(bg_record->target_name);
-		if (bg_record->bg_block_list) {
-			list_destroy(bg_record->bg_block_list);
-			bg_record->bg_block_list = NULL;
+		if (bg_record->ba_mp_list) {
+			list_destroy(bg_record->ba_mp_list);
+			bg_record->ba_mp_list = NULL;
 		}
-		FREE_NULL_BITMAP(bg_record->bitmap);
+		xfree(bg_record->bg_block_id);
+		xfree(bg_record->blrtsimage);
+		xfree(bg_record->ionode_str);
 		FREE_NULL_BITMAP(bg_record->ionode_bitmap);
 
-#ifdef HAVE_BGL
-		xfree(bg_record->blrtsimage);
-#endif
+		if (bg_record->job_list) {
+			list_destroy(bg_record->job_list);
+			bg_record->job_list = NULL;
+		}
+
 		xfree(bg_record->linuximage);
 		xfree(bg_record->mloaderimage);
+		FREE_NULL_BITMAP(bg_record->mp_bitmap);
+		xfree(bg_record->mp_str);
+		FREE_NULL_BITMAP(bg_record->mp_used_bitmap);
 		xfree(bg_record->ramdiskimage);
 		xfree(bg_record->reason);
+		xfree(bg_record->target_name);
+		xfree(bg_record->user_name);
 
 		xfree(bg_record);
 	}
 }
 
-/* see if a record already of like bitmaps exists in a list */
-extern int block_exist_in_list(List my_list, bg_record_t *bg_record)
-{
-	ListIterator itr = list_iterator_create(my_list);
-	bg_record_t *found_record = NULL;
-	int rc = 0;
-
-	while ((found_record = list_next(itr))) {
-		/* If there is a free_cnt that means the block is
-		   going away, so we should skip it.
-		*/
-		if (found_record->free_cnt)
-			continue;
-		/* check for full node bitmap compare */
-		if (bit_equal(bg_record->bitmap, found_record->bitmap)
-		    && bit_equal(bg_record->ionode_bitmap,
-				 found_record->ionode_bitmap)) {
-			/* now make sure the conn_type is the same for
-			   regular sized blocks */
-			if ((bg_record->node_cnt >= bg_conf->bp_node_cnt)
-			    && bg_record->conn_type != found_record->conn_type)
-				continue;
-
-			if (bg_record->ionodes)
-				debug("This block %s[%s] "
-				      "is already in the list %s",
-				      bg_record->nodes,
-				      bg_record->ionodes,
-				      found_record->bg_block_id);
-			else
-				debug("This block %s "
-				      "is already in the list %s",
-				      bg_record->nodes,
-				      found_record->bg_block_id);
-
-			rc = 1;
-			break;
-		}
-	}
-	list_iterator_destroy(itr);
-	return rc;
-}
-
-/* see if the exact record already exists in a list */
-extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record)
-{
-	ListIterator itr = NULL;
-	bg_record_t *found_record = NULL;
-	int rc = 0;
-
-	if (!my_list || !bg_record)
-		return rc;
-
-	itr = list_iterator_create(my_list);
-	while ((found_record = list_next(itr))) {
-		if (bg_record == found_record) {
-			rc = 1;
-			break;
-		}
-	}
-	list_iterator_destroy(itr);
-	return rc;
-}
-
 extern void process_nodes(bg_record_t *bg_record, bool startup)
 {
-	int j=0, number;
+	int j=0;
 	int diff=0;
 	int largest_diff=-1;
-	int best_start[SYSTEM_DIMENSIONS];
-	int start[SYSTEM_DIMENSIONS];
-	int end[SYSTEM_DIMENSIONS];
+	uint16_t best_start[SYSTEM_DIMENSIONS];
+	uint16_t start[SYSTEM_DIMENSIONS];
+	uint16_t end[SYSTEM_DIMENSIONS];
 	bool start_set=0;
 	ListIterator itr;
-	ba_node_t* ba_node = NULL;
-	char *p = '\0';
-
-	if (!bg_record->bg_block_list
-	    || !list_count(bg_record->bg_block_list)) {
-		if (!bg_record->bg_block_list) {
-			bg_record->bg_block_list =
-				list_create(destroy_ba_node);
-		}
+	ba_mp_t* ba_mp = NULL;
+	int dim;
+	static char tmp_char[SYSTEM_DIMENSIONS+1],
+		tmp_char2[SYSTEM_DIMENSIONS+1];
+	static int *cluster_dims = NULL;
+
+	if (!cluster_dims) {
+		/* do some initing that only needs to happen once. */
+		cluster_dims = select_g_ba_get_dims();
+		memset(tmp_char, 0, sizeof(tmp_char));
+		memset(tmp_char2, 0, sizeof(tmp_char2));
+	}
+
+	if (!bg_record->ba_mp_list || !list_count(bg_record->ba_mp_list)) {
+		char *nodes = bg_record->mp_str;
+
+		if (!bg_record->ba_mp_list)
+			bg_record->ba_mp_list = list_create(destroy_ba_mp);
+
 		memset(&best_start, 0, sizeof(best_start));
-		bg_record->bp_count = 0;
-		if ((bg_record->conn_type >= SELECT_SMALL) && (!startup))
+		//bg_record->mp_count = 0;
+		if ((bg_record->conn_type[0] >= SELECT_SMALL) && (!startup))
 			error("process_nodes: "
 			      "We shouldn't be here there could be some "
 			      "badness if we use this logic %s",
-			      bg_record->nodes);
-		while (bg_record->nodes[j] != '\0') {
-			if ((bg_record->nodes[j] == '['
-			     || bg_record->nodes[j] == ',')
-			    && (bg_record->nodes[j+8] == ']'
-				|| bg_record->nodes[j+8] == ',')
-			    && (bg_record->nodes[j+4] == 'x'
-				|| bg_record->nodes[j+4] == '-')) {
-				j++;
-				number = xstrntol(bg_record->nodes + j, &p,
-						  SYSTEM_DIMENSIONS,
-						  HOSTLIST_BASE);
-				hostlist_parse_int_to_array(
-					number, start, SYSTEM_DIMENSIONS,
-					HOSTLIST_BASE);
-				j += 4;
-				number = xstrntol(bg_record->nodes + j, &p,
-						  SYSTEM_DIMENSIONS,
-						  HOSTLIST_BASE);
-				hostlist_parse_int_to_array(
-					number, end, SYSTEM_DIMENSIONS,
-					HOSTLIST_BASE);
-				j += 3;
-				diff = end[X]-start[X];
-				if (diff > largest_diff) {
-					best_start[X] = start[X];
-					best_start[Y] = start[Y];
-					best_start[Z] = start[Z];
-					debug3("process_nodes: "
-					       "start is now %dx%dx%d",
-					       best_start[X],
-					       best_start[Y],
-					       best_start[Z]);
-					largest_diff = diff;
-				}
-				bg_record->bp_count += _addto_node_list(
-					bg_record,
-					start,
-					end);
-				if (bg_record->nodes[j] != ',')
-					break;
-				j--;
-			} else if ((bg_record->nodes[j] >= '0'
-				    && bg_record->nodes[j] <= '9')
-				   || (bg_record->nodes[j] >= 'A'
-				       && bg_record->nodes[j] <= 'Z')) {
-
-				number = xstrntol(bg_record->nodes + j, &p,
-						  SYSTEM_DIMENSIONS,
-						  HOSTLIST_BASE);
-				hostlist_parse_int_to_array(
-					number, start, SYSTEM_DIMENSIONS,
-					HOSTLIST_BASE);
-				j+=3;
+			      bg_record->mp_str);
+		while (nodes[j] != '\0') {
+			int mid = j   + SYSTEM_DIMENSIONS + 1;
+			int fin = mid + SYSTEM_DIMENSIONS + 1;
+			if (((nodes[j] == '[')   || (nodes[j] == ','))   &&
+			    ((nodes[mid] == 'x') || (nodes[mid] == '-')) &&
+			    ((nodes[fin] == ']') || (nodes[fin] == ','))) {
+				j++;	/* Skip leading '[' or ',' */
+				for (dim = 0; dim < SYSTEM_DIMENSIONS;
+				     dim++, j++)
+					start[dim] = select_char2coord(
+						nodes[j]);
+				j++;	/* Skip middle 'x' or '-' */
+				for (dim = 0; dim < SYSTEM_DIMENSIONS;
+				     dim++, j++)
+					end[dim] = select_char2coord(nodes[j]);
+				diff = end[0]-start[0];
+				_addto_mp_list(bg_record, start, end);
+			} else if ((nodes[j] >= '0'&& nodes[j] <= '9')
+				   || (nodes[j] >= 'A' && nodes[j] <= 'Z')) {
+				for (dim = 0; dim < SYSTEM_DIMENSIONS;
+				     dim++, j++)
+					start[dim] = select_char2coord(
+						nodes[j]);
 				diff = 0;
-				if (diff > largest_diff) {
-					best_start[X] = start[X];
-					best_start[Y] = start[Y];
-					best_start[Z] = start[Z];
-					debug3("process_nodes: "
-					       "start is now %dx%dx%d",
-					       best_start[X],
-					       best_start[Y],
-					       best_start[Z]);
-					largest_diff = diff;
+				_addto_mp_list(bg_record, start, start);
+			} else {
+				j++;
+				continue;
+			}
+
+			if (diff > largest_diff) {
+				largest_diff = diff;
+				memcpy(best_start, start, sizeof(best_start));
+
+				if (bg_conf->slurm_debug_level
+				    >= LOG_LEVEL_DEBUG3) {
+					for (dim = 0;
+					     dim < SYSTEM_DIMENSIONS;
+					     dim++)
+						tmp_char[dim] =	alpha_num[
+							best_start[dim]];
+					debug3("process_nodes: start is now %s",
+					       tmp_char);
 				}
-				bg_record->bp_count += _addto_node_list(
-					bg_record,
-					start,
-					start);
-				if (bg_record->nodes[j] != ',')
-					break;
-				j--;
 			}
-			j++;
+			if (bg_record->mp_str[j] != ',')
+				break;
+
 		}
 		if (largest_diff == -1)
 			fatal("No hostnames given here");
 
-		bg_record->start[X] = best_start[X];
-		bg_record->start[Y] = best_start[Y];
-		bg_record->start[Z] = best_start[Z];
+		memcpy(bg_record->start, best_start, sizeof(bg_record->start));
 		start_set = 1;
-		debug2("process_nodes: "
-		       "start is %dx%dx%d",
-		       bg_record->start[X],
-		       bg_record->start[Y],
-		       bg_record->start[Z]);
+		if (bg_conf->slurm_debug_level >= LOG_LEVEL_DEBUG3) {
+			for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+				tmp_char[dim] = alpha_num[best_start[dim]];
+				tmp_char2[dim] =
+					alpha_num[bg_record->start[dim]];
+			}
+			debug3("process_nodes: start is %s %s",
+			       tmp_char, tmp_char2);
+		}
 	}
 
-	bg_record->geo[X] = 0;
-	bg_record->geo[Y] = 0;
-	bg_record->geo[Z] = 0;
-	end[X] = -1;
-	end[Y] = -1;
-	end[Z] = -1;
-	if (!start_set) {
-		bg_record->start[X] = HOSTLIST_BASE;
-		bg_record->start[Y] = HOSTLIST_BASE;
-		bg_record->start[Z] = HOSTLIST_BASE;
+	memset(bg_record->geo, 0, sizeof(bg_record->geo));
+	for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+		end[dim] = (int16_t)-1;
+		if (!start_set)
+			bg_record->start[dim] = HOSTLIST_BASE;
 	}
 
-	list_sort(bg_record->bg_block_list, (ListCmpF) _ba_node_cmpf_inc);
+	list_sort(bg_record->ba_mp_list, (ListCmpF) _ba_mp_cmpf_inc);
 
-	itr = list_iterator_create(bg_record->bg_block_list);
-	while ((ba_node = list_next(itr)) != NULL) {
-		if (!ba_node->used)
+	bg_record->mp_count = 0;
+	itr = list_iterator_create(bg_record->ba_mp_list);
+	while ((ba_mp = list_next(itr))) {
+		if (!ba_mp->used)
 			continue;
-		debug4("process_nodes: "
-		       "%c%c%c is included in this block",
-		       alpha_num[ba_node->coord[X]],
-		       alpha_num[ba_node->coord[Y]],
-		       alpha_num[ba_node->coord[Z]]);
-
-		if (ba_node->coord[X]>end[X]) {
-			bg_record->geo[X]++;
-			end[X] = ba_node->coord[X];
-		}
-		if (ba_node->coord[Y]>end[Y]) {
-			bg_record->geo[Y]++;
-			end[Y] = ba_node->coord[Y];
-		}
-		if (ba_node->coord[Z]>end[Z]) {
-			bg_record->geo[Z]++;
-			end[Z] = ba_node->coord[Z];
-		}
-		if (!start_set) {
-			if (ba_node->coord[X]<bg_record->start[X]) {
-				bg_record->start[X] = ba_node->coord[X];
-			}
-			if (ba_node->coord[Y]<bg_record->start[Y]) {
-				bg_record->start[Y] = ba_node->coord[Y];
-			}
-			if (ba_node->coord[Z]<bg_record->start[Z]) {
-				bg_record->start[Z] = ba_node->coord[Z];
+		bg_record->mp_count++;
+		debug3("process_nodes: %s is included in this block",
+		       ba_mp->coord_str);
+
+		for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+			if (ba_mp->coord[dim] > (int16_t)end[dim]) {
+				bg_record->geo[dim]++;
+				end[dim] = ba_mp->coord[dim];
 			}
+			if (!start_set && (ba_mp->coord[dim] <
+					   (int16_t)bg_record->start[dim]))
+				bg_record->start[dim] =	ba_mp->coord[dim];
 		}
 	}
 	list_iterator_destroy(itr);
-	debug3("process_nodes: "
-	       "geo = %c%c%c bp count is %d start is %c%c%c",
-	       alpha_num[bg_record->geo[X]],
-	       alpha_num[bg_record->geo[Y]],
-	       alpha_num[bg_record->geo[Z]],
-	       bg_record->bp_count,
-	       alpha_num[bg_record->start[X]],
-	       alpha_num[bg_record->start[Y]],
-	       alpha_num[bg_record->start[Z]]);
+	if (bg_conf->slurm_debug_level >= LOG_LEVEL_DEBUG3) {
+		for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+			tmp_char[dim] = alpha_num[bg_record->geo[dim]];
+			tmp_char2[dim] = alpha_num[bg_record->start[dim]];
+		}
+		debug3("process_nodes: geo = %s mp count is %d start is %s",
+		       tmp_char, bg_record->mp_count, tmp_char2);
+	}
 	/* This check is for sub midplane systems to figure out what
 	   the largest block can be.
 	*/
-	if ((DIM_SIZE[X] > 1) || (DIM_SIZE[Y] > 1) || (DIM_SIZE[Z] > 1)) {
+	for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+		if (cluster_dims[dim] > 1)
+			break;
+	}
+	if (dim < SYSTEM_DIMENSIONS) {
 		/* means we have more than 1 base partition */
-		if ((bg_record->geo[X] == DIM_SIZE[X])
-		    && (bg_record->geo[Y] == DIM_SIZE[Y])
-		    && (bg_record->geo[Z] == DIM_SIZE[Z])) {
-			bg_record->full_block = 1;
+		for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+			if (bg_record->geo[dim] != cluster_dims[dim])
+				break;
 		}
-	} else if (bg_record->node_cnt == bg_conf->bp_node_cnt)
+		if (dim == SYSTEM_DIMENSIONS)
+			bg_record->full_block = 1;
+	} else if (bg_record->cnode_cnt == bg_conf->mp_cnode_cnt)
 		bg_record->full_block = 1;
 
-	if (node_name2bitmap(bg_record->nodes,
+	FREE_NULL_BITMAP(bg_record->mp_bitmap);
+	if (node_name2bitmap(bg_record->mp_str,
 			     false,
-			     &bg_record->bitmap)) {
-		fatal("process_nodes: "
-		      "1 Unable to convert nodes %s to bitmap",
-		      bg_record->nodes);
+			     &bg_record->mp_bitmap)) {
+		fatal("process_nodes: Unable to convert nodes %s to bitmap",
+		      bg_record->mp_str);
 	}
 	return;
 }
@@ -393,6 +304,10 @@ extern List copy_bg_list(List in_list)
 			error("trying to copy a bad record");
 			continue;
 		}
+		/* we don't care about blocks being freed */
+		if (bg_record->free_cnt)
+			continue;
+
 		new_record = xmalloc(sizeof(bg_record_t));
 		new_record->original = bg_record;
 		copy_bg_record(bg_record, new_record);
@@ -408,7 +323,7 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 {
 	int i;
 	ListIterator itr = NULL;
-	ba_node_t *ba_node = NULL, *new_ba_node = NULL;
+	ba_mp_t *ba_mp = NULL, *new_ba_mp = NULL;
 
 	if (!fir_record || !sec_record) {
 		error("copy_bg_record: "
@@ -419,72 +334,84 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 	xfree(sec_record->bg_block_id);
 	sec_record->bg_block_id = xstrdup(fir_record->bg_block_id);
 
-	if (sec_record->bg_block_list)
-		list_destroy(sec_record->bg_block_list);
-	sec_record->bg_block_list = list_create(destroy_ba_node);
-	if (fir_record->bg_block_list) {
-		itr = list_iterator_create(fir_record->bg_block_list);
-		while ((ba_node = list_next(itr))) {
-			new_ba_node = ba_copy_node(ba_node);
-			list_append(sec_record->bg_block_list, new_ba_node);
+	if (sec_record->ba_mp_list)
+		list_destroy(sec_record->ba_mp_list);
+	sec_record->ba_mp_list = list_create(destroy_ba_mp);
+	if (fir_record->ba_mp_list) {
+		itr = list_iterator_create(fir_record->ba_mp_list);
+		while ((ba_mp = list_next(itr))) {
+			new_ba_mp = ba_copy_mp(ba_mp);
+			list_append(sec_record->ba_mp_list, new_ba_mp);
 		}
 		list_iterator_destroy(itr);
 	}
 
-	FREE_NULL_BITMAP(sec_record->bitmap);
-	if (fir_record->bitmap
-	    && (sec_record->bitmap = bit_copy(fir_record->bitmap)) == NULL) {
-		error("Unable to copy bitmap for %s", fir_record->nodes);
-		sec_record->bitmap = NULL;
+	FREE_NULL_BITMAP(sec_record->mp_bitmap);
+	if (fir_record->mp_bitmap
+	    && (sec_record->mp_bitmap = bit_copy(fir_record->mp_bitmap)) == NULL) {
+		error("Unable to copy bitmap for %s", fir_record->mp_str);
+		sec_record->mp_bitmap = NULL;
 	}
 
-#ifdef HAVE_BGL
-	xfree(sec_record->blrtsimage);
-	sec_record->blrtsimage = xstrdup(fir_record->blrtsimage);
-#endif
-
 	sec_record->boot_state = fir_record->boot_state;
 	sec_record->boot_count = fir_record->boot_count;
-	sec_record->bp_count = fir_record->bp_count;
-	sec_record->conn_type = fir_record->conn_type;
+
+	FREE_NULL_BITMAP(sec_record->mp_used_bitmap);
+	if (fir_record->mp_used_bitmap
+	    && (sec_record->mp_used_bitmap
+		= bit_copy(fir_record->mp_used_bitmap)) == NULL) {
+		error("Unable to copy mp_used_bitmap for %s",
+		      fir_record->mp_str);
+		sec_record->mp_used_bitmap = NULL;
+	}
+	sec_record->cnode_cnt = fir_record->cnode_cnt;
+
+	memcpy(sec_record->conn_type, fir_record->conn_type,
+	       sizeof(sec_record->conn_type));
 	sec_record->cpu_cnt = fir_record->cpu_cnt;
 	sec_record->free_cnt = fir_record->free_cnt;
 	sec_record->full_block = fir_record->full_block;
 
-	for(i=0;i<HIGHEST_DIMENSIONS;i++) {
+	for(i=0;i<SYSTEM_DIMENSIONS;i++) {
 		sec_record->geo[i] = fir_record->geo[i];
 		sec_record->start[i] = fir_record->start[i];
 	}
 
-	xfree(sec_record->ionodes);
-	sec_record->ionodes = xstrdup(fir_record->ionodes);
+	xfree(sec_record->ionode_str);
+	sec_record->ionode_str = xstrdup(fir_record->ionode_str);
 
 	FREE_NULL_BITMAP(sec_record->ionode_bitmap);
 	if (fir_record->ionode_bitmap
 	    && (sec_record->ionode_bitmap
 		= bit_copy(fir_record->ionode_bitmap)) == NULL) {
 		error("Unable to copy ionode_bitmap for %s",
-		      fir_record->nodes);
+		      fir_record->mp_str);
 		sec_record->ionode_bitmap = NULL;
 	}
 
 	sec_record->job_ptr = fir_record->job_ptr;
 	sec_record->job_running = fir_record->job_running;
 
+	sec_record->magic = fir_record->magic;
+
+	xfree(sec_record->blrtsimage);
+	sec_record->blrtsimage = xstrdup(fir_record->blrtsimage);
+
 	xfree(sec_record->linuximage);
 	sec_record->linuximage = xstrdup(fir_record->linuximage);
 
-	sec_record->magic = fir_record->magic;
-
 	xfree(sec_record->mloaderimage);
 	sec_record->mloaderimage = xstrdup(fir_record->mloaderimage);
 
+	xfree(sec_record->ramdiskimage);
+	sec_record->ramdiskimage = xstrdup(fir_record->ramdiskimage);
+
 	sec_record->modifying = fir_record->modifying;
 
-	xfree(sec_record->nodes);
-	sec_record->nodes = xstrdup(fir_record->nodes);
+	sec_record->mp_count = fir_record->mp_count;
 
-	sec_record->node_cnt = fir_record->node_cnt;
+	xfree(sec_record->mp_str);
+	sec_record->mp_str = xstrdup(fir_record->mp_str);
 
 #ifdef HAVE_BGL
 	sec_record->node_use = fir_record->node_use;
@@ -493,13 +420,10 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 	 * for a reason. */
 	/* sec_record->original = fir_record; */
 
-	xfree(sec_record->ramdiskimage);
-	sec_record->ramdiskimage = xstrdup(fir_record->ramdiskimage);
 	xfree(sec_record->reason);
 	sec_record->reason = xstrdup(fir_record->reason);
 
 	sec_record->state = fir_record->state;
-	sec_record->switch_count = fir_record->switch_count;
 
 	xfree(sec_record->target_name);
 	sec_record->target_name = xstrdup(fir_record->target_name);
@@ -517,21 +441,21 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
  */
 extern int bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 {
-	int size_a = rec_a->node_cnt;
-	int size_b = rec_b->node_cnt;
+	int size_a = rec_a->cnode_cnt;
+	int size_b = rec_b->cnode_cnt;
 
 	/* We only look at this if we are ordering blocks larger than
 	 * a midplane, order of ionodes is how we order otherwise. */
-	if ((size_a >= bg_conf->bp_node_cnt)
-	    || (size_b >= bg_conf->bp_node_cnt)) {
+	if ((size_a >= bg_conf->mp_cnode_cnt)
+	    || (size_b >= bg_conf->mp_cnode_cnt)) {
 		if (size_a < size_b)
 			return -1;
 		else if (size_a > size_b)
 			return 1;
 	}
 
-	if (rec_a->nodes && rec_b->nodes) {
-		size_a = strcmp(rec_a->nodes, rec_b->nodes);
+	if (rec_a->mp_str && rec_b->mp_str) {
+		size_a = strcmp(rec_a->mp_str, rec_b->mp_str);
 		if (size_a < 0)
 			return -1;
 		else if (size_a > 0)
@@ -550,7 +474,7 @@ extern int bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 }
 
 /*
- * Comparator used for sorting blocks from earliest avaliable to lastest
+ * Comparator used for sorting blocks from earliest available to lastest
  *
  * returns: -1: rec_a < rec_b   0: rec_a == rec_b   1: rec_a > rec_b
  *
@@ -577,33 +501,6 @@ extern int bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b)
 	return bg_record_cmpf_inc(rec_a, rec_b);
 }
 
-/* if looking at the main list this should have some nice
- * block_state_mutex locks around it.
- */
-extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id)
-{
-	ListIterator itr;
-	bg_record_t *bg_record = NULL;
-
-	xassert(my_list);
-
-	if (!bg_block_id)
-		return NULL;
-
-	itr = list_iterator_create(my_list);
-	while ((bg_record = list_next(itr))) {
-		if (bg_record->bg_block_id)
-			if (!strcasecmp(bg_record->bg_block_id, bg_block_id))
-				break;
-	}
-	list_iterator_destroy(itr);
-
-	if (bg_record)
-		return bg_record;
-	else
-		return NULL;
-}
-
 /* All changes to the bg_list target_name must
    be done before this function is called.
    also slurm_conf_lock() must be called before calling this
@@ -611,6 +508,8 @@ extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id)
 */
 extern int update_block_user(bg_record_t *bg_record, int set)
 {
+	int rc=0;
+
 	if (!bg_record->target_name) {
 		error("Must set target_name to run update_block_user.");
 		return -1;
@@ -619,11 +518,10 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 		error("No user_name");
 		bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
 	}
-#ifdef HAVE_BG_FILES
-	int rc=0;
+
 	if (set) {
-		if ((rc = remove_all_users(bg_record->bg_block_id,
-					   bg_record->target_name))
+		if ((rc = bridge_block_remove_all_users(
+			     bg_record, bg_record->target_name))
 		    == REMOVE_USER_ERR) {
 			error("1 Something happened removing "
 			      "users from block %s",
@@ -636,10 +534,10 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 				     bg_record->target_name,
 				     bg_record->bg_block_id);
 
-				if ((rc = bridge_add_block_user(
-					     bg_record->bg_block_id,
+				if ((rc = bridge_block_add_user(
+					     bg_record,
 					     bg_record->target_name))
-				    != STATUS_OK) {
+				    != SLURM_SUCCESS) {
 					error("bridge_add_block_user"
 					      "(%s,%s): %s",
 					      bg_record->bg_block_id,
@@ -650,7 +548,6 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 			}
 		}
 	}
-#endif
 
 	if (strcmp(bg_record->target_name, bg_record->user_name)) {
 		uid_t pw_uid;
@@ -668,6 +565,31 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 	return 0;
 }
 
+extern int set_block_user(bg_record_t *bg_record)
+{
+	int rc = 0;
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("resetting the boot state flag and "
+		     "counter for block %s.",
+		     bg_record->bg_block_id);
+	bg_record->boot_state = BG_BLOCK_FREE;
+	bg_record->boot_count = 0;
+
+	if ((rc = update_block_user(bg_record, 1)) == 1) {
+		last_bg_update = time(NULL);
+		rc = SLURM_SUCCESS;
+	} else if (rc == -1) {
+		error("Unable to add user name to block %s. "
+		      "Cancelling job.",
+		      bg_record->bg_block_id);
+		rc = SLURM_ERROR;
+	}
+	xfree(bg_record->target_name);
+	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
+
+	return rc;
+}
+
 /* Try to requeue job running on block and put block in an error state.
  * block_state_mutex must be unlocked before calling this.
  */
@@ -688,7 +610,7 @@ extern void requeue_and_error(bg_record_t *bg_record, char *reason)
 	slurm_mutex_unlock(&block_state_mutex);
 
 	if (rc)
-		put_block_in_error_state(bg_record, BLOCK_ERROR_STATE, reason);
+		put_block_in_error_state(bg_record, reason);
 	else
 		error("requeue_and_error: block disappeared");
 
@@ -696,15 +618,15 @@ extern void requeue_and_error(bg_record_t *bg_record, char *reason)
 }
 
 /* block_state_mutex must be locked before calling this. */
-extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
+extern int add_bg_record(List records, List *used_nodes,
+			 select_ba_request_t *blockreq,
 			 bool no_check, bitoff_t io_start)
 {
 	bg_record_t *bg_record = NULL;
-	ba_node_t *ba_node = NULL;
+	ba_mp_t *ba_mp = NULL;
 	ListIterator itr;
 	uid_t pw_uid;
 	int i, len;
-	int small_count = 0;
 
 	xassert(bg_conf->slurm_user_name);
 
@@ -723,61 +645,72 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	else
 		bg_record->user_uid = pw_uid;
 
-	bg_record->bg_block_list = list_create(destroy_ba_node);
-	if (used_nodes) {
-		if (copy_node_path(used_nodes, &bg_record->bg_block_list)
+	if (used_nodes && *used_nodes) {
+#ifdef HAVE_BGQ
+		bg_record->ba_mp_list = *used_nodes;
+		*used_nodes = NULL;
+#else
+		bg_record->ba_mp_list = list_create(destroy_ba_mp);
+		if (copy_node_path(*used_nodes, &bg_record->ba_mp_list)
 		    == SLURM_ERROR)
 			error("add_bg_record: "
 			      "couldn't copy the path for the allocation");
-		bg_record->bp_count = list_count(used_nodes);
-	}
+#endif
+	} else
+		bg_record->ba_mp_list = list_create(destroy_ba_mp);
+
 	/* bg_record->boot_state = 0; 	Implicit */
-	/* bg_record->state = 0;	Implicit */
+	bg_record->state = BG_BLOCK_FREE;
+
 #ifdef HAVE_BGL
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("add_bg_record: asking for %s %d %d %s",
-		     blockreq->block, blockreq->small32, blockreq->small128,
-		     conn_type_string(blockreq->conn_type));
+		     blockreq->save_name, blockreq->small32, blockreq->small128,
+		     conn_type_string(blockreq->conn_type[0]));
 #else
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 		info("add_bg_record: asking for %s %d %d %d %d %d %s",
-		     blockreq->block, blockreq->small256,
+		     blockreq->save_name, blockreq->small256,
 		     blockreq->small128, blockreq->small64,
 		     blockreq->small32, blockreq->small16,
-		     conn_type_string(blockreq->conn_type));
+		     conn_type_string(blockreq->conn_type[0]));
 #endif
 	/* Set the bitmap blank here if it is a full node we don't
-	   want anything set we also don't want the bg_record->ionodes set.
+	   want anything set we also don't want the bg_record->ionode_str set.
 	*/
-	bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
+	bg_record->ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+	bg_record->mp_used_bitmap = bit_alloc(node_record_count);
 
-	len = strlen(blockreq->block);
+	len = strlen(blockreq->save_name);
 	i=0;
 	while (i<len
-	       && blockreq->block[i] != '['
-	       && (blockreq->block[i] < '0' || blockreq->block[i] > 'Z'
-		   || (blockreq->block[i] > '9' && blockreq->block[i] < 'A')))
+	       && blockreq->save_name[i] != '['
+	       && (blockreq->save_name[i] < '0' || blockreq->save_name[i] > 'Z'
+		   || (blockreq->save_name[i] > '9'
+		       && blockreq->save_name[i] < 'A')))
 		i++;
 
 	if (i<len) {
 		len -= i;
 
 		len += strlen(bg_conf->slurm_node_prefix)+1;
-		bg_record->nodes = xmalloc(len);
-		snprintf(bg_record->nodes, len, "%s%s",
-			 bg_conf->slurm_node_prefix, blockreq->block+i);
+		bg_record->mp_str = xmalloc(len);
+		snprintf(bg_record->mp_str, len, "%s%s",
+			 bg_conf->slurm_node_prefix, blockreq->save_name+i);
 	} else
-		fatal("add_bg_record: BPs=%s is in a weird format",
-		      blockreq->block);
+		fatal("add_bg_record: MPs=%s is in a weird format",
+		      blockreq->save_name);
 
 	process_nodes(bg_record, false);
 
 #ifdef HAVE_BGL
 	bg_record->node_use = SELECT_COPROCESSOR_MODE;
 #endif
-	bg_record->conn_type = blockreq->conn_type;
-	bg_record->cpu_cnt = bg_conf->cpus_per_bp * bg_record->bp_count;
-	bg_record->node_cnt = bg_conf->bp_node_cnt * bg_record->bp_count;
+	memcpy(bg_record->conn_type, blockreq->conn_type,
+	       sizeof(bg_record->conn_type));
+
+	bg_record->cpu_cnt = bg_conf->cpus_per_mp * bg_record->mp_count;
+	bg_record->cnode_cnt = bg_conf->mp_cnode_cnt * bg_record->mp_count;
 	bg_record->job_running = NO_JOB_RUNNING;
 
 #ifdef HAVE_BGL
@@ -786,38 +719,39 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	else
 		bg_record->blrtsimage = xstrdup(bg_conf->default_blrtsimage);
 #endif
+
+#ifdef HAVE_BG_L_P
 	if (blockreq->linuximage)
 		bg_record->linuximage = xstrdup(blockreq->linuximage);
 	else
 		bg_record->linuximage = xstrdup(bg_conf->default_linuximage);
-
-	if (blockreq->mloaderimage)
-		bg_record->mloaderimage = xstrdup(blockreq->mloaderimage);
-	else
-		bg_record->mloaderimage =
-			xstrdup(bg_conf->default_mloaderimage);
-
 	if (blockreq->ramdiskimage)
 		bg_record->ramdiskimage = xstrdup(blockreq->ramdiskimage);
 	else
 		bg_record->ramdiskimage =
 			xstrdup(bg_conf->default_ramdiskimage);
+#endif
+	if (blockreq->mloaderimage)
+		bg_record->mloaderimage = xstrdup(blockreq->mloaderimage);
+	else
+		bg_record->mloaderimage =
+			xstrdup(bg_conf->default_mloaderimage);
 
-	if (bg_record->conn_type != SELECT_SMALL) {
+	if (bg_record->conn_type[0] < SELECT_SMALL) {
 		/* this needs to be an append so we keep things in the
 		   order we got them, they will be sorted later */
 		list_append(records, bg_record);
 		/* this isn't a correct list so we need to set it later for
-		   now we just used it to be the bp number */
+		   now we just used it to be the mp number */
 		if (!used_nodes) {
 			debug4("add_bg_record: "
 			       "we didn't get a request list so we are "
-			       "destroying this bp list");
-			list_destroy(bg_record->bg_block_list);
-			bg_record->bg_block_list = NULL;
+			       "destroying this mp list");
+			list_destroy(bg_record->ba_mp_list);
+			bg_record->ba_mp_list = NULL;
 		}
 	} else {
-		List bg_block_list = NULL;
+		List ba_mp_list = NULL;
 
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 			info("add_bg_record: adding a small block");
@@ -832,26 +766,29 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 32 node block with "
-				      "Numpsets=%u. (Try setting it "
+				      "IonodesPerMP=%u. (Try setting it "
 				      "to at least 16)",
-				      bg_conf->numpsets);
-#ifndef HAVE_BGL
+				      bg_conf->ionodes_per_mp);
+#ifdef HAVE_BGP
 			if (blockreq->small16)
 				fatal("add_bg_record: "
 				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 16 node block with "
-				      "Numpsets=%u. (Try setting it to "
+				      "IonodesPerMP=%u. (Try setting it to "
 				      "at least 32)",
-				      bg_conf->numpsets);
+				      bg_conf->ionodes_per_mp);
+#endif
+
+#ifndef HAVE_BGL
 			if ((bg_conf->io_ratio < 0.5) && blockreq->small64)
 				fatal("add_bg_record: "
 				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 64 node block with "
-				      "Numpsets=%u. (Try setting it "
+				      "IonodesPerMP=%u. (Try setting it "
 				      "to at least 8)",
-				      bg_conf->numpsets);
+				      bg_conf->ionodes_per_mp);
 #endif
 		}
 
@@ -863,17 +800,16 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 			blockreq->small128=4;
 		}
 
-		i = (blockreq->small32*bg_conf->nodecard_node_cnt) +
-			(blockreq->small128*bg_conf->quarter_node_cnt);
-		if (i != bg_conf->bp_node_cnt)
+		i = (blockreq->small32*bg_conf->nodecard_cnode_cnt) +
+			(blockreq->small128*bg_conf->quarter_cnode_cnt);
+		if (i != bg_conf->mp_cnode_cnt)
 			fatal("add_bg_record: "
 			      "There is an error in your bluegene.conf file.\n"
 			      "I am unable to request %d nodes consisting of "
 			      "%u 32CnBlocks and\n%u 128CnBlocks in one "
 			      "base partition with %u nodes.",
 			      i, blockreq->small32, blockreq->small128,
-			      bg_conf->bp_node_cnt);
-		small_count = blockreq->small32+blockreq->small128;
+			      bg_conf->mp_cnode_cnt);
 #else
 		if (!blockreq->small16 && !blockreq->small32
 		    && !blockreq->small64 && !blockreq->small128
@@ -889,7 +825,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 			+ (blockreq->small64*64)
 			+ (blockreq->small128*128)
 			+ (blockreq->small256*256);
-		if (i != bg_conf->bp_node_cnt)
+		if (i != bg_conf->mp_cnode_cnt)
 			fatal("add_bg_record: "
 			      "There is an error in your bluegene.conf file.\n"
 			      "I am unable to request %d nodes consisting of "
@@ -899,12 +835,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 			      "in one base partition with %u nodes.",
 			      i, blockreq->small16, blockreq->small32,
 			      blockreq->small64, blockreq->small128,
-			      blockreq->small256, bg_conf->bp_node_cnt);
-		small_count = blockreq->small16
-			+ blockreq->small32
-			+ blockreq->small64
-			+ blockreq->small128
-			+ blockreq->small256;
+			      blockreq->small256, bg_conf->mp_cnode_cnt);
 #endif
 	no_check:
 		/* Automatically create 2-way split if
@@ -912,34 +843,33 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 		 * Here we go through each node listed and do the same thing
 		 * for each node.
 		 */
-		bg_block_list = bg_record->bg_block_list;
-		bg_record->bg_block_list = list_create(NULL);
-		itr = list_iterator_create(bg_block_list);
-		while ((ba_node = list_next(itr)) != NULL) {
-			xfree(bg_record->nodes);
-			bg_record->nodes = xstrdup_printf(
-				"%s%c%c%c",
+		ba_mp_list = bg_record->ba_mp_list;
+		bg_record->ba_mp_list = list_create(NULL);
+		itr = list_iterator_create(ba_mp_list);
+		while ((ba_mp = list_next(itr)) != NULL) {
+			xfree(bg_record->mp_str);
+			bg_record->mp_str = xstrdup_printf(
+				"%s%s",
 				bg_conf->slurm_node_prefix,
-				alpha_num[ba_node->coord[X]],
-				alpha_num[ba_node->coord[Y]],
-				alpha_num[ba_node->coord[Z]]);
-			list_append(bg_record->bg_block_list, ba_node);
+				ba_mp->coord_str);
+			list_append(bg_record->ba_mp_list, ba_mp);
 			handle_small_record_request(records, blockreq,
 						    bg_record, io_start);
-			list_flush(bg_record->bg_block_list);
+			list_flush(bg_record->ba_mp_list);
 		}
 		list_iterator_destroy(itr);
 		destroy_bg_record(bg_record);
-		list_destroy(bg_block_list);
+		list_destroy(ba_mp_list);
 	}
 
 	return SLURM_SUCCESS;
 }
 
-extern int handle_small_record_request(List records, blockreq_t *blockreq,
+extern int handle_small_record_request(List records,
+				       select_ba_request_t *blockreq,
 				       bg_record_t *bg_record, bitoff_t start)
 {
-	bitstr_t *ionodes = bit_alloc(bg_conf->numpsets);
+	bitstr_t *ionodes = bit_alloc(bg_conf->ionodes_per_mp);
 	int i=0, ionode_cnt = 0;
 	bg_record_t *found_record = NULL;
 
@@ -948,7 +878,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	xassert(bg_record);
 
 	xassert(start >= 0);
-	xassert(start < bg_conf->numpsets);
+	xassert(start < bg_conf->ionodes_per_mp);
 
 #ifndef HAVE_BGL
 	for(i=0; i<blockreq->small16; i++) {
@@ -1017,7 +947,6 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	}
 #endif
 
-
 	FREE_NULL_BITMAP(ionodes);
 
 	return SLURM_SUCCESS;
@@ -1025,12 +954,12 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 
 extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size)
 {
-	if (bg_record->ionodes) {
+	if (bg_record->ionode_str) {
 		snprintf(buf, buf_size, "%s[%s]",
-			 bg_record->nodes,
-			 bg_record->ionodes);
+			 bg_record->mp_str,
+			 bg_record->ionode_str);
 	} else {
-		snprintf(buf, buf_size, "%s", bg_record->nodes);
+		snprintf(buf, buf_size, "%s", bg_record->mp_str);
 	}
 	return SLURM_SUCCESS;
 }
@@ -1040,7 +969,7 @@ extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size)
  * slurm_drain_node, or slurm_fail_job so if slurmctld_locked is called we
  * will call the functions without locking the locks again.
  */
-extern int down_nodecard(char *bp_name, bitoff_t io_start,
+extern int down_nodecard(char *mp_name, bitoff_t io_start,
 			 bool slurmctld_locked)
 {
 	List requests = NULL;
@@ -1049,14 +978,14 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 	bg_record_t *bg_record = NULL, *found_record = NULL, tmp_record;
 	bg_record_t *smallest_bg_record = NULL;
 	struct node_record *node_ptr = NULL;
-	int bp_bit = 0;
+	int mp_bit = 0;
 	static int io_cnt = NO_VAL;
 	static int create_size = NO_VAL;
-	static blockreq_t blockreq;
+	static select_ba_request_t blockreq;
 	int rc = SLURM_SUCCESS;
 	char *reason = "select_bluegene: nodecard down";
 
-	xassert(bp_name);
+	xassert(mp_name);
 
 	if (io_cnt == NO_VAL) {
 		io_cnt = 1;
@@ -1066,52 +995,52 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 
 		/* make sure we create something that is able to be
 		   created */
-		if (bg_conf->smallest_block < bg_conf->nodecard_node_cnt)
-			create_size = bg_conf->nodecard_node_cnt;
+		if (bg_conf->smallest_block < bg_conf->nodecard_cnode_cnt)
+			create_size = bg_conf->nodecard_cnode_cnt;
 		else
 			create_size = bg_conf->smallest_block;
 	}
 
-	node_ptr = find_node_record(bp_name);
+	node_ptr = find_node_record(mp_name);
 	if (!node_ptr) {
 		error ("down_sub_node_blocks: invalid node specified '%s'",
-		       bp_name);
+		       mp_name);
 		return EINVAL;
 	}
 
 	/* this is here for sanity check to make sure we don't core on
 	   these bits when we set them below. */
-	if (io_start >= bg_conf->numpsets
-	    || (io_start+io_cnt) >= bg_conf->numpsets) {
+	if (io_start >= bg_conf->ionodes_per_mp
+	    || (io_start+io_cnt) >= bg_conf->ionodes_per_mp) {
 		debug("io %d-%d not configured on this "
 		      "system, only %d ionodes per midplane",
-		      io_start, io_start+io_cnt, bg_conf->numpsets);
+		      io_start, io_start+io_cnt, bg_conf->ionodes_per_mp);
 		return EINVAL;
 	}
-	bp_bit = (node_ptr - node_record_table_ptr);
+	mp_bit = (node_ptr - node_record_table_ptr);
 
-	memset(&blockreq, 0, sizeof(blockreq_t));
+	memset(&blockreq, 0, sizeof(select_ba_request_t));
 
-	blockreq.conn_type = SELECT_SMALL;
-	blockreq.block = bp_name;
+	blockreq.conn_type[0] = SELECT_SMALL;
+	blockreq.save_name = mp_name;
 
 	debug3("here setting node %d of %d and ionodes %d-%d of %d",
-	       bp_bit, node_record_count, io_start,
-	       io_start+io_cnt, bg_conf->numpsets);
+	       mp_bit, node_record_count, io_start,
+	       io_start+io_cnt, bg_conf->ionodes_per_mp);
 
 	memset(&tmp_record, 0, sizeof(bg_record_t));
-	tmp_record.bp_count = 1;
-	tmp_record.node_cnt = bg_conf->nodecard_node_cnt;
-	tmp_record.bitmap = bit_alloc(node_record_count);
-	bit_set(tmp_record.bitmap, bp_bit);
+	tmp_record.mp_count = 1;
+	tmp_record.cnode_cnt = bg_conf->nodecard_cnode_cnt;
+	tmp_record.mp_bitmap = bit_alloc(node_record_count);
+	bit_set(tmp_record.mp_bitmap, mp_bit);
 
-	tmp_record.ionode_bitmap = bit_alloc(bg_conf->numpsets);
+	tmp_record.ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
 	bit_nset(tmp_record.ionode_bitmap, io_start, io_start+io_cnt);
 
 	slurm_mutex_lock(&block_state_mutex);
 	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = list_next(itr))) {
-		if (!bit_test(bg_record->bitmap, bp_bit))
+		if (!bit_test(bg_record->mp_bitmap, mp_bit))
 			continue;
 
 		if (!blocks_overlap(bg_record, &tmp_record))
@@ -1124,11 +1053,11 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 				slurm_fail_job(bg_record->job_running);
 
 		}
-		/* If Running Dynamic mode and the the block is
+		/* If Running Dynamic mode and the block is
 		   smaller than the create size just continue on.
 		*/
 		if ((bg_conf->layout_mode == LAYOUT_DYNAMIC)
-		    && (bg_record->node_cnt < create_size)) {
+		    && (bg_record->cnode_cnt < create_size)) {
 			if (!delete_list)
 				delete_list = list_create(NULL);
 			list_append(delete_list, bg_record);
@@ -1138,7 +1067,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 		/* keep track of the smallest size that is at least
 		   the size of create_size. */
 		if (!smallest_bg_record ||
-		    (smallest_bg_record->node_cnt > bg_record->node_cnt))
+		    (smallest_bg_record->cnode_cnt > bg_record->cnode_cnt))
 			smallest_bg_record = bg_record;
 	}
 	list_iterator_destroy(itr);
@@ -1155,25 +1084,25 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 		   opposed to draining the node.
 		*/
 		if (smallest_bg_record
-		    && (smallest_bg_record->node_cnt < bg_conf->bp_node_cnt)){
-			if (smallest_bg_record->state == RM_PARTITION_ERROR) {
+		    && (smallest_bg_record->cnode_cnt < bg_conf->mp_cnode_cnt)){
+			if (smallest_bg_record->state & BG_BLOCK_ERROR_FLAG) {
 				rc = SLURM_NO_CHANGE_IN_DATA;
 				goto cleanup;
 			}
 
 			rc = put_block_in_error_state(
-				smallest_bg_record, BLOCK_ERROR_STATE, reason);
+				smallest_bg_record, reason);
 			goto cleanup;
 		}
 
 		debug("No block under 1 midplane available for this nodecard.  "
 		      "Draining the whole node.");
-		if (!node_already_down(bp_name)) {
+		if (!node_already_down(mp_name)) {
 			if (slurmctld_locked)
-				drain_nodes(bp_name, reason,
+				drain_nodes(mp_name, reason,
 					    slurm_get_slurm_user_id());
 			else
-				slurm_drain_nodes(bp_name, reason,
+				slurm_drain_nodes(mp_name, reason,
 						  slurm_get_slurm_user_id());
 		}
 		rc = SLURM_SUCCESS;
@@ -1184,7 +1113,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 
 	if (delete_list) {
 		int cnt_set = 0;
-		bitstr_t *iobitmap = bit_alloc(bg_conf->numpsets);
+		bitstr_t *iobitmap = bit_alloc(bg_conf->ionodes_per_mp);
 		/* don't lock here since it is handled inside
 		   the put_block_in_error_state
 		*/
@@ -1211,11 +1140,11 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 		   block) set io_start = 0. */
 		if ((io_start = bit_ffs(iobitmap)) == -1) {
 			io_start = 0;
-			if (create_size > bg_conf->nodecard_node_cnt)
+			if (create_size > bg_conf->nodecard_cnode_cnt)
 				blockreq.small128 = 4;
 			else
 				blockreq.small32 = 16;
-		} else if (create_size <= bg_conf->nodecard_node_cnt)
+		} else if (create_size <= bg_conf->nodecard_cnode_cnt)
 			blockreq.small32 = 1;
 		else
 			/* this should never happen */
@@ -1225,7 +1154,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 	} else if (smallest_bg_record) {
 		debug2("smallest dynamic block is %s",
 		       smallest_bg_record->bg_block_id);
-		if (smallest_bg_record->state == RM_PARTITION_ERROR) {
+		if (smallest_bg_record->state & BG_BLOCK_ERROR_FLAG) {
 			rc = SLURM_NO_CHANGE_IN_DATA;
 			goto cleanup;
 		}
@@ -1233,23 +1162,23 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 		while (smallest_bg_record->job_running > NO_JOB_RUNNING)
 			sleep(1);
 
-		if (smallest_bg_record->node_cnt == create_size) {
+		if (smallest_bg_record->cnode_cnt == create_size) {
 			rc = put_block_in_error_state(
-				smallest_bg_record, BLOCK_ERROR_STATE, reason);
+				smallest_bg_record, reason);
 			goto cleanup;
 		}
 
-		if (create_size > smallest_bg_record->node_cnt) {
+		if (create_size > smallest_bg_record->cnode_cnt) {
 			/* we should never get here.  This means we
 			 * have a create_size that is bigger than a
 			 * block that is already made.
 			 */
 			rc = put_block_in_error_state(
-				smallest_bg_record, BLOCK_ERROR_STATE, reason);
+				smallest_bg_record, reason);
 			goto cleanup;
 		}
-		debug3("node count is %d", smallest_bg_record->node_cnt);
-		switch(smallest_bg_record->node_cnt) {
+		debug3("node count is %d", smallest_bg_record->cnode_cnt);
+		switch(smallest_bg_record->cnode_cnt) {
 #ifndef HAVE_BGL
 		case 64:
 			blockreq.small32 = 2;
@@ -1267,7 +1196,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 			break;
 		}
 
-		if (create_size != bg_conf->nodecard_node_cnt) {
+		if (create_size != bg_conf->nodecard_cnode_cnt) {
 			blockreq.small128 = blockreq.small32 / 4;
 			blockreq.small32 = 0;
 			io_start = 0;
@@ -1293,14 +1222,14 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 			blockreq.small128 = 4;
 			break;
 		case 512:
-			if (!node_already_down(bp_name)) {
+			if (!node_already_down(mp_name)) {
 				char *reason = "select_bluegene: nodecard down";
 				if (slurmctld_locked)
-					drain_nodes(bp_name, reason,
+					drain_nodes(mp_name, reason,
 						    slurm_get_slurm_user_id());
 				else
 					slurm_drain_nodes(
-						bp_name, reason,
+						mp_name, reason,
 						slurm_get_slurm_user_id());
 			}
 			rc = SLURM_SUCCESS;
@@ -1338,7 +1267,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 		list_iterator_destroy(itr);
 
 		/* we need to add this record since it doesn't exist */
-		if (configure_block(bg_record) == SLURM_ERROR) {
+		if (bridge_block_create(bg_record) == SLURM_ERROR) {
 			destroy_bg_record(bg_record);
 			error("down_sub_node_blocks: "
 			      "unable to configure block in api");
@@ -1355,8 +1284,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 			/* here we know the error block doesn't exist
 			   so just set the state here */
 			slurm_mutex_unlock(&block_state_mutex);
-			rc = put_block_in_error_state(
-				bg_record, BLOCK_ERROR_STATE, reason);
+			rc = put_block_in_error_state(bg_record, reason);
 			slurm_mutex_lock(&block_state_mutex);
 		}
 	}
@@ -1373,38 +1301,38 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start,
 	last_bg_update = time(NULL);
 
 cleanup:
-	FREE_NULL_BITMAP(tmp_record.bitmap);
+	FREE_NULL_BITMAP(tmp_record.mp_bitmap);
 	FREE_NULL_BITMAP(tmp_record.ionode_bitmap);
 
 	return rc;
 
 }
 
-extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap)
+extern int up_nodecard(char *mp_name, bitstr_t *ionode_bitmap)
 {
 	ListIterator itr = NULL;
 	bg_record_t *bg_record = NULL;
 	struct node_record *node_ptr = NULL;
-	int bp_bit = 0;
+	int mp_bit = 0;
 	int ret = 0;
 
-	xassert(bp_name);
+	xassert(mp_name);
 	xassert(ionode_bitmap);
 
-	node_ptr = find_node_record(bp_name);
+	node_ptr = find_node_record(mp_name);
 	if (!node_ptr) {
 		error ("down_sub_node_blocks: invalid node specified %s",
-		       bp_name);
+		       mp_name);
 		return EINVAL;
 	}
-	bp_bit = (node_ptr - node_record_table_ptr);
+	mp_bit = (node_ptr - node_record_table_ptr);
 
 	slurm_mutex_lock(&block_state_mutex);
 	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = list_next(itr))) {
 		if (bg_record->job_running != BLOCK_ERROR_STATE)
 			continue;
-		if (!bit_test(bg_record->bitmap, bp_bit))
+		if (!bit_test(bg_record->mp_bitmap, mp_bit))
 			continue;
 
 		if (!bit_overlap(bg_record->ionode_bitmap, ionode_bitmap)) {
@@ -1418,12 +1346,12 @@ extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap)
 	/* FIX ME: This needs to call the opposite of
 	   slurm_drain_nodes which does not yet exist.
 	*/
-	if ((ret = node_already_down(bp_name))) {
+	if ((ret = node_already_down(mp_name))) {
 		/* means it was drained */
 		if (ret == 2) {
 			/* debug("node %s put back into service after " */
 /* 			      "being in an error state", */
-/* 			      bp_name); */
+/* 			      mp_name); */
 		}
 	}
 
@@ -1431,8 +1359,7 @@ extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap)
 }
 
 /* block_state_mutex must be unlocked before calling this. */
-extern int put_block_in_error_state(bg_record_t *bg_record,
-				    int state, char *reason)
+extern int put_block_in_error_state(bg_record_t *bg_record, char *reason)
 {
 	uid_t pw_uid;
 
@@ -1467,8 +1394,6 @@ extern int put_block_in_error_state(bg_record_t *bg_record,
 		return SLURM_ERROR;
 	}
 
-	info("Setting Block %s to ERROR state. (reason: '%s')",
-	     bg_record->bg_block_id, reason);
 	/* we add the block to these lists so we don't try to schedule
 	   on them. */
 	if (!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) {
@@ -1478,14 +1403,13 @@ extern int put_block_in_error_state(bg_record_t *bg_record,
 	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
 		list_push(bg_lists->booted, bg_record);
 
-	bg_record->job_running = state;
-	bg_record->state = RM_PARTITION_ERROR;
+	bg_record->job_running = BLOCK_ERROR_STATE;
+	bg_record->state |= BG_BLOCK_ERROR_FLAG;
 
 	xfree(bg_record->user_name);
 	xfree(bg_record->target_name);
 	bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
 	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
-	bg_record->reason = xstrdup(reason);
 
 	if (uid_from_string (bg_record->user_name, &pw_uid) < 0)
 		error("No such user: %s", bg_record->user_name);
@@ -1494,8 +1418,15 @@ extern int put_block_in_error_state(bg_record_t *bg_record,
 
 	/* Only send if reason is set.  If it isn't set then
 	   accounting should already know about this error state */
-	if (reason)
+	if (reason) {
+		info("Setting Block %s to ERROR state. (reason: '%s')",
+		     bg_record->bg_block_id, reason);
+		xfree(bg_record->reason);
+		bg_record->reason = xstrdup(reason);
 		_set_block_nodes_accounting(bg_record, reason);
+	}
+
+	last_bg_update = time(NULL);
 	slurm_mutex_unlock(&block_state_mutex);
 
 	trigger_block_error();
@@ -1510,28 +1441,21 @@ extern int resume_block(bg_record_t *bg_record)
 	if (bg_record->job_running > NO_JOB_RUNNING)
 		return SLURM_SUCCESS;
 
-	if (bg_record->state == RM_PARTITION_ERROR) {
-		/* Since we are resuming we don't want to set it to
-		   something that could get us in trouble later, so
-		   just set it to NAV and the poller will update it
-		   next time around.
-		*/
-		bg_record->state = RM_PARTITION_NAV;
+	if (bg_record->state & BG_BLOCK_ERROR_FLAG) {
+		bg_record->state &= (~BG_BLOCK_ERROR_FLAG);
 		info("Block %s put back into service after "
 		     "being in an error state.",
 		     bg_record->bg_block_id);
 	}
 
-#ifndef HAVE_BG_FILES
-	/* On a simulated system the block is always returned free. */
-	bg_record->state = RM_PARTITION_FREE;
-#endif
-
 	if (remove_from_bg_list(bg_lists->job_running, bg_record)
 	    == SLURM_SUCCESS)
 		num_unused_cpus += bg_record->cpu_cnt;
-	if (bg_record->state != RM_PARTITION_READY)
+
+	if (bg_record->state != BG_BLOCK_INITED)
 		remove_from_bg_list(bg_lists->booted, bg_record);
+	else if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+		list_push(bg_lists->booted, bg_record);
 
 	bg_record->job_running = NO_JOB_RUNNING;
 	xfree(bg_record->reason);
@@ -1542,6 +1466,58 @@ extern int resume_block(bg_record_t *bg_record)
 	return SLURM_SUCCESS;
 }
 
+/* block_state_mutex should be locked before calling this function */
+extern int bg_reset_block(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+	if (bg_record) {
+		if (bg_record->job_running > NO_JOB_RUNNING) {
+			bg_record->job_running = NO_JOB_RUNNING;
+			bg_record->job_ptr = NULL;
+		}
+		/* remove user from list */
+
+		if (bg_record->target_name) {
+			if (strcmp(bg_record->target_name,
+				  bg_conf->slurm_user_name)) {
+				xfree(bg_record->target_name);
+				bg_record->target_name =
+					xstrdup(bg_conf->slurm_user_name);
+			}
+			update_block_user(bg_record, 1);
+		} else {
+			bg_record->target_name =
+				xstrdup(bg_conf->slurm_user_name);
+		}
+
+
+		/* Don't reset these (boot_(state/count)), they will be
+		   reset when state changes, and needs to outlast a job
+		   allocation.
+		*/
+		/* bg_record->boot_state = 0; */
+		/* bg_record->boot_count = 0; */
+
+		last_bg_update = time(NULL);
+		/* Only remove from the job_running list if
+		   job_running == NO_JOB_RUNNING, since blocks in
+		   error state could also be in this list and we don't
+		   want to remove them.
+		*/
+		if (bg_record->job_running == NO_JOB_RUNNING)
+			if (remove_from_bg_list(bg_lists->job_running,
+					       bg_record)
+			   == SLURM_SUCCESS) {
+				num_unused_cpus += bg_record->cpu_cnt;
+			}
+	} else {
+		error("No block given to reset");
+		rc = SLURM_ERROR;
+	}
+
+	return rc;
+}
+
 /************************* local functions ***************************/
 
 /* block_state_mutex should be locked before calling */
@@ -1573,12 +1549,12 @@ static int _check_all_blocks_error(int node_inx, time_t event_time,
 	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = list_next(itr))) {
 		/* only look at other nodes in error state */
-		if (bg_record->state != RM_PARTITION_ERROR)
+		if (!(bg_record->state & BG_BLOCK_ERROR_FLAG))
 			continue;
-		if (!bit_test(bg_record->bitmap, node_inx))
+		if (!bit_test(bg_record->mp_bitmap, node_inx))
 			continue;
-		if (bg_record->cpu_cnt >= bg_conf->cpus_per_bp) {
-			total_cpus = bg_conf->cpus_per_bp;
+		if (bg_record->cpu_cnt >= bg_conf->cpus_per_mp) {
+			total_cpus = bg_conf->cpus_per_mp;
 			break;
 		} else
 			total_cpus += bg_record->cpu_cnt;
@@ -1590,23 +1566,21 @@ static int _check_all_blocks_error(int node_inx, time_t event_time,
 
 	if (send_node.cpus) {
 		if (!reason)
-			reason = "update block: setting partial node down.";
-		if (!node_ptr->reason)
-			node_ptr->reason = xstrdup(reason);
-		node_ptr->reason_time = event_time;
-		node_ptr->reason_uid = slurm_get_slurm_user_id();
+			reason = "update_block: setting partial node down.";
 
+		if (!node_ptr->reason
+		    || !strncmp(node_ptr->reason, "update_block", 12)) {
+			xfree(node_ptr->reason);
+			node_ptr->reason = xstrdup(reason);
+			node_ptr->reason_time = event_time;
+			node_ptr->reason_uid = slurm_get_slurm_user_id();
+		}
 		send_node.node_state = NODE_STATE_ERROR;
 		rc = clusteracct_storage_g_node_down(acct_db_conn,
 						     &send_node, event_time,
 						     reason,
 						     node_ptr->reason_uid);
 	} else {
-		if (node_ptr->reason)
-			xfree(node_ptr->reason);
-		node_ptr->reason_time = 0;
-		node_ptr->reason_uid = NO_VAL;
-
 		send_node.node_state = NODE_STATE_IDLE;
 		rc = clusteracct_storage_g_node_up(acct_db_conn,
 						   &send_node, event_time);
@@ -1626,7 +1600,7 @@ static int _set_block_nodes_accounting(bg_record_t *bg_record, char *reason)
 	int i = 0;
 
 	for(i = 0; i < node_record_count; i++) {
-		if (!bit_test(bg_record->bitmap, i))
+		if (!bit_test(bg_record->mp_bitmap, i))
 			continue;
 		rc = _check_all_blocks_error(i, now, reason);
 	}
@@ -1634,77 +1608,104 @@ static int _set_block_nodes_accounting(bg_record_t *bg_record, char *reason)
 	return rc;
 }
 
-static int _addto_node_list(bg_record_t *bg_record, int *start, int *end)
+static void _append_ba_mps(List my_list, int dim,
+			   uint16_t *start, uint16_t *end, uint16_t *coords)
 {
-	int node_count=0;
-	int x,y,z;
-	char node_name_tmp[255];
-	ba_node_t *ba_node = NULL;
-
-	if ((start[X] < 0) || (start[Y] < 0) || (start[Z] < 0)) {
-		fatal("bluegene.conf starting coordinate is invalid: %d%d%d",
-		      start[X], start[Y], start[Z]);
+	ba_mp_t *curr_mp;
+
+	if (dim > SYSTEM_DIMENSIONS)
+		return;
+	if (dim < SYSTEM_DIMENSIONS) {
+		for (coords[dim] = start[dim];
+		     coords[dim] <= end[dim];
+		     coords[dim]++) {
+			/* handle the outter dims here */
+			_append_ba_mps(my_list, dim+1, start, end, coords);
+		}
+		return;
 	}
-	if ((end[X] >= DIM_SIZE[X]) || (end[Y] >= DIM_SIZE[Y])
-	    ||  (end[Z] >= DIM_SIZE[Z])) {
-		fatal("bluegene.conf matrix size exceeds space defined in "
-		      "slurm.conf %c%c%cx%d%d%d => %c%c%c",
-		      alpha_num[start[X]], alpha_num[start[Y]],
-		      alpha_num[start[Z]],
-		      end[X], end[Y], end[Z],
-		      alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]],
-		      alpha_num[DIM_SIZE[Z]]);
+	curr_mp = ba_copy_mp(coord2ba_mp(coords));
+	if (curr_mp) {
+		curr_mp->used = 1;
+		list_append(my_list, curr_mp);
 	}
-	debug3("adding bps: %c%c%cx%c%c%c",
-	       alpha_num[start[X]], alpha_num[start[Y]], alpha_num[start[Z]],
-	       alpha_num[end[X]], alpha_num[end[Y]], alpha_num[end[Z]]);
-	debug3("slurm.conf:    %c%c%c",
-	       alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]],
-	       alpha_num[DIM_SIZE[Z]]);
-
-	for (x = start[X]; x <= end[X]; x++) {
-		for (y = start[Y]; y <= end[Y]; y++) {
-			for (z = start[Z]; z <= end[Z]; z++) {
-
-				snprintf(node_name_tmp, sizeof(node_name_tmp),
-					 "%s%c%c%c",
-					 bg_conf->slurm_node_prefix,
-					 alpha_num[x], alpha_num[y],
-					 alpha_num[z]);
-
-				ba_node = ba_copy_node(
-					&ba_system_ptr->grid[x][y][z]);
-				ba_node->used = 1;
-				list_append(bg_record->bg_block_list, ba_node);
-				node_count++;
+}
+
+static void _addto_mp_list(bg_record_t *bg_record,
+			   uint16_t *start, uint16_t *end)
+{
+	uint16_t coords[SYSTEM_DIMENSIONS];
+	int dim;
+	static int *cluster_dims = NULL;
+	static char start_char[SYSTEM_DIMENSIONS+1],
+		end_char[SYSTEM_DIMENSIONS+1],
+		dim_char[SYSTEM_DIMENSIONS+1];
+
+	if (!cluster_dims) {
+		/* do some setup that only needs to happen once. */
+		cluster_dims = select_g_ba_get_dims();
+		memset(start_char, 0, sizeof(start_char));
+		memset(end_char, 0, sizeof(end_char));
+		memset(dim_char, 0, sizeof(dim_char));
+		for (dim = 0; dim<SYSTEM_DIMENSIONS; dim++)
+			dim_char[dim] = alpha_num[cluster_dims[dim]];
+	}
+
+	for (dim = 0; dim<SYSTEM_DIMENSIONS; dim++) {
+		if ((int16_t)start[dim] < 0) {
+			for (dim = 0; dim<SYSTEM_DIMENSIONS; dim++)
+				start_char[dim] = alpha_num[start[dim]];
+			fatal("bluegene.conf starting coordinate "
+			      "is invalid: %s",
+			      start_char);
+		}
+
+		if (end[dim] >= cluster_dims[dim]) {
+			for (dim = 0; dim<SYSTEM_DIMENSIONS; dim++) {
+				start_char[dim] = alpha_num[start[dim]];
+				end_char[dim] = alpha_num[end[dim]];
+				dim_char[dim] = alpha_num[cluster_dims[dim]];
 			}
+			fatal("bluegene.conf matrix size exceeds space "
+			      "defined in "
+			      "slurm.conf %sx%s => %s",
+			      start_char, end_char, dim_char);
+		}
+	}
+	if (bg_conf->slurm_debug_level >= LOG_LEVEL_DEBUG3) {
+		for (dim = 0; dim<SYSTEM_DIMENSIONS; dim++) {
+			start_char[dim] = alpha_num[start[dim]];
+			end_char[dim] = alpha_num[end[dim]];
 		}
+		debug3("adding mps: %sx%s", start_char, end_char);
+		debug3("slurm.conf:    %s", dim_char);
 	}
-	return node_count;
+	_append_ba_mps(bg_record->ba_mp_list, 0, start, end, coords);
+
 }
 
-static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b)
+static int _coord_cmpf_inc(uint16_t *coord_a, uint16_t *coord_b, int dim)
 {
-	if (node_a->coord[X] < node_b->coord[X])
+	if (dim >= SYSTEM_DIMENSIONS)
+		return 0;
+	else if (coord_a[dim] < coord_b[dim])
 		return -1;
-	else if (node_a->coord[X] > node_b->coord[X])
+	else if (coord_a[dim] > coord_b[dim])
 		return 1;
 
-	if (node_a->coord[Y] < node_b->coord[Y])
-		return -1;
-	else if (node_a->coord[Y] > node_b->coord[Y])
-		return 1;
+	return _coord_cmpf_inc(coord_a, coord_b, dim+1);
 
-	if (node_a->coord[Z] < node_b->coord[Z])
-		return -1;
-	else if (node_a->coord[Z] > node_b->coord[Z])
-		return 1;
+}
 
-	error("You have the node %c%c%c in the list twice",
-	      alpha_num[node_a->coord[X]],
-	      alpha_num[node_a->coord[Y]],
-	      alpha_num[node_a->coord[Z]]);
-	return 0;
+static int _ba_mp_cmpf_inc(ba_mp_t *mp_a, ba_mp_t *mp_b)
+{
+	int rc = _coord_cmpf_inc(mp_a->coord, mp_b->coord, 0);
+
+	if (!rc) {
+		error("You have the mp %s in the list twice",
+		      mp_a->coord_str);
+	}
+	return rc;
 }
 
 
diff --git a/src/plugins/select/bluegene/bg_record_functions.h b/src/plugins/select/bluegene/bg_record_functions.h
new file mode 100644
index 000000000..867be5ece
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_record_functions.h
@@ -0,0 +1,90 @@
+/*****************************************************************************\
+ *  bg_record_functions.h - header for creating blocks in a static environment.
+ *
+ *  $Id: bg_record_functions.h 12954 2008-01-04 20:37:49Z da $
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BLUEGENE_BG_RECORD_FUNCTIONS_H_
+#define _BLUEGENE_BG_RECORD_FUNCTIONS_H_
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <pwd.h>
+
+#include "src/common/xstring.h"
+#include "src/common/bitstring.h"
+#include "src/common/hostlist.h"
+#include "src/common/list.h"
+#include "src/common/macros.h"
+#include "src/common/node_select.h"
+#include "src/common/parse_time.h"
+#include "src/slurmctld/slurmctld.h"
+#include "ba_common.h"
+
+/* Log a bg_record's contents */
+extern void print_bg_record(bg_record_t *record);
+extern void destroy_bg_record(void *object);
+extern void process_nodes(bg_record_t *bg_reord, bool startup);
+extern List copy_bg_list(List in_list);
+extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record);
+extern int bg_record_cmpf_inc(bg_record_t *rec_a, bg_record_t *rec_b);
+extern int bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b);
+
+/* change username of a block bg_record_t target_name needs to be
+   updated before call of function.
+*/
+extern int set_block_user(bg_record_t *bg_record);
+extern int update_block_user(bg_record_t *bg_block_id, int set);
+extern void requeue_and_error(bg_record_t *bg_record, char *reason);
+
+extern int add_bg_record(List records, List *used_nodes, select_ba_request_t *blockreq,
+			 bool no_check, bitoff_t io_start);
+extern int handle_small_record_request(List records, select_ba_request_t *blockreq,
+				       bg_record_t *bg_record, bitoff_t start);
+
+extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size);
+extern int down_nodecard(char *bp_name, bitoff_t io_start,
+			 bool slurmctld_locked);
+extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap);
+extern int put_block_in_error_state(bg_record_t *bg_record, char *reason);
+extern int resume_block(bg_record_t *bg_record);
+extern int bg_reset_block(bg_record_t *bg_record);
+
+#endif /* _BLUEGENE_BG_RECORD_FUNCTIONS_H_ */
diff --git a/src/plugins/select/bluegene/bg_status.c b/src/plugins/select/bluegene/bg_status.c
new file mode 100644
index 000000000..3f4429e6c
--- /dev/null
+++ b/src/plugins/select/bluegene/bg_status.c
@@ -0,0 +1,322 @@
+/*****************************************************************************\
+ *  bg_status.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "bg_core.h"
+#include "src/slurmctld/proc_req.h"
+#include "src/slurmctld/trigger_mgr.h"
+#include "src/slurmctld/locks.h"
+
+#define RETRY_BOOT_COUNT 3
+
+typedef struct {
+	int jobid;
+} kill_job_struct_t;
+
+static void _destroy_kill_struct(void *object);
+
+static void _destroy_kill_struct(void *object)
+{
+	kill_job_struct_t *freeit = (kill_job_struct_t *)object;
+
+	if (freeit) {
+		xfree(freeit);
+	}
+}
+
+static int _block_is_deallocating(bg_record_t *bg_record, List kill_job_list)
+{
+	int jobid = bg_record->job_running;
+	char *user_name = NULL;
+
+	if (bg_record->modifying)
+		return SLURM_SUCCESS;
+
+	user_name = xstrdup(bg_conf->slurm_user_name);
+	if (bridge_block_remove_all_users(bg_record, NULL) == REMOVE_USER_ERR) {
+		error("Something happened removing users from block %s",
+		      bg_record->bg_block_id);
+	}
+
+	if (!bg_record->target_name) {
+		error("Target Name was not set for block %s.",
+		      bg_record->bg_block_id);
+		bg_record->target_name = xstrdup(bg_record->user_name);
+	}
+
+	if (!bg_record->user_name) {
+		error("User Name was not set for block %s.",
+		      bg_record->bg_block_id);
+		bg_record->user_name = xstrdup(user_name);
+	}
+
+	if (bg_record->boot_state) {
+		error("State went to free on a boot for block %s.",
+		      bg_record->bg_block_id);
+	} else if (jobid > NO_JOB_RUNNING) {
+		if (kill_job_list) {
+			kill_job_struct_t *freeit =
+				(kill_job_struct_t *)
+				xmalloc(sizeof(freeit));
+			freeit->jobid = jobid;
+			list_push(kill_job_list, freeit);
+		}
+
+		error("Block %s was in a ready state "
+		      "for user %s but is being freed. "
+		      "Job %d was lost.",
+		      bg_record->bg_block_id,
+		      bg_record->user_name,
+		      jobid);
+	} else {
+		debug("Block %s was in a ready state "
+		      "but is being freed. No job running.",
+		      bg_record->bg_block_id);
+	}
+
+	if (remove_from_bg_list(bg_lists->job_running, bg_record)
+	    == SLURM_SUCCESS)
+		num_unused_cpus += bg_record->cpu_cnt;
+	remove_from_bg_list(bg_lists->booted, bg_record);
+
+	xfree(user_name);
+
+	return SLURM_SUCCESS;
+}
+
+extern int bg_status_update_block_state(bg_record_t *bg_record,
+					uint16_t state,
+					List kill_job_list)
+{
+	bool skipped_dealloc = false;
+	kill_job_struct_t *freeit = NULL;
+	int updated = 0;
+	uint16_t real_state = bg_record->state & (~BG_BLOCK_ERROR_FLAG);
+
+	if (real_state == state)
+		return 0;
+
+	debug("state of Block %s was %s and now is %s",
+	      bg_record->bg_block_id,
+	      bg_block_state_string(bg_record->state),
+	      bg_block_state_string(state));
+
+	/*
+	  check to make sure block went
+	  through freeing correctly
+	*/
+	if ((real_state != BG_BLOCK_TERM
+	     && !(bg_record->state & BG_BLOCK_ERROR_FLAG))
+	    && state == BG_BLOCK_FREE)
+		skipped_dealloc = 1;
+	else if ((real_state == BG_BLOCK_INITED)
+		 && (state == BG_BLOCK_BOOTING)) {
+		/* This means the user did a reboot through
+		   mpirun but we missed the state
+		   change */
+		debug("Block %s skipped rebooting, "
+		      "but it really is.  "
+		      "Setting target_name back to %s",
+		      bg_record->bg_block_id,
+		      bg_record->user_name);
+		xfree(bg_record->target_name);
+		bg_record->target_name = xstrdup(bg_record->user_name);
+	} else if ((real_state == BG_BLOCK_TERM)
+		   && (state == BG_BLOCK_BOOTING))
+		/* This is a funky state IBM says
+		   isn't a bug, but all their
+		   documentation says this doesn't
+		   happen, but IBM says oh yeah, you
+		   weren't really suppose to notice
+		   that. So we will just skip this
+		   state and act like this didn't happen. */
+		goto nochange_state;
+	real_state = state;
+	if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+		state |= BG_BLOCK_ERROR_FLAG;
+
+	bg_record->state = state;
+
+	if (real_state == BG_BLOCK_TERM || skipped_dealloc)
+		_block_is_deallocating(bg_record, kill_job_list);
+	else if (real_state == BG_BLOCK_BOOTING) {
+		debug("Setting bootflag for %s", bg_record->bg_block_id);
+		bg_record->boot_state = 1;
+	} else if (real_state == BG_BLOCK_FREE) {
+		if (remove_from_bg_list(bg_lists->job_running, bg_record)
+		    == SLURM_SUCCESS)
+			num_unused_cpus += bg_record->cpu_cnt;
+		remove_from_bg_list(bg_lists->booted,
+				    bg_record);
+	} else if (real_state & BG_BLOCK_ERROR_FLAG) {
+		if (bg_record->boot_state)
+			error("Block %s in an error state while booting.",
+			      bg_record->bg_block_id);
+		else
+			error("Block %s in an error state.",
+			      bg_record->bg_block_id);
+		remove_from_bg_list(bg_lists->booted, bg_record);
+		trigger_block_error();
+	} else if (real_state == BG_BLOCK_INITED) {
+		if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+			list_push(bg_lists->booted, bg_record);
+	}
+	updated = 1;
+nochange_state:
+
+	/* check the boot state */
+	debug3("boot state for block %s is %d",
+	       bg_record->bg_block_id, bg_record->boot_state);
+	if (bg_record->boot_state) {
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG) {
+			/* If we get an error on boot that
+			 * means it is a transparent L3 error
+			 * and should be trying to fix
+			 * itself.  If this is the case we
+			 * just hang out waiting for the state
+			 * to go to free where we will try to
+			 * boot again below.
+			 */
+			return updated;
+		}
+
+		switch (real_state) {
+		case BG_BLOCK_BOOTING:
+			debug3("checking to make sure user %s "
+			       "is the user.",
+			       bg_record->target_name);
+
+			if (update_block_user(bg_record, 0) == 1)
+				last_bg_update = time(NULL);
+			if (bg_record->job_ptr) {
+				bg_record->job_ptr->job_state |=
+					JOB_CONFIGURING;
+				last_job_update = time(NULL);
+			}
+			break;
+		case BG_BLOCK_FREE:
+			if (bg_record->boot_count < RETRY_BOOT_COUNT) {
+				bridge_block_boot(bg_record);
+
+				if (bg_record->magic == BLOCK_MAGIC) {
+					debug("boot count for block %s is %d",
+					      bg_record->bg_block_id,
+					      bg_record->boot_count);
+					bg_record->boot_count++;
+				}
+			} else {
+				char *reason = (char *)
+					"status_check: Boot fails ";
+
+				error("Couldn't boot Block %s for user %s",
+				      bg_record->bg_block_id,
+				      bg_record->target_name);
+
+				slurm_mutex_unlock(&block_state_mutex);
+				requeue_and_error(bg_record, reason);
+				slurm_mutex_lock(&block_state_mutex);
+
+				bg_record->boot_state = 0;
+				bg_record->boot_count = 0;
+				if (remove_from_bg_list(
+					    bg_lists->job_running, bg_record)
+				    == SLURM_SUCCESS)
+					num_unused_cpus += bg_record->cpu_cnt;
+
+				remove_from_bg_list(bg_lists->booted,
+						    bg_record);
+			}
+			break;
+		case BG_BLOCK_INITED:
+			debug("block %s is ready.",
+			      bg_record->bg_block_id);
+			if (bg_record->job_ptr) {
+				bg_record->job_ptr->job_state &=
+					(~JOB_CONFIGURING);
+				last_job_update = time(NULL);
+			}
+			/* boot flags are reset here */
+			if (kill_job_list &&
+			    set_block_user(bg_record) == SLURM_ERROR) {
+				freeit = (kill_job_struct_t *)
+					xmalloc(sizeof(kill_job_struct_t));
+				freeit->jobid = bg_record->job_running;
+				list_push(kill_job_list, freeit);
+			}
+			break;
+		case BG_BLOCK_TERM:
+			debug2("Block %s is in a deallocating state "
+			       "during a boot.  Doing nothing until "
+			       "free state.",
+			       bg_record->bg_block_id);
+			break;
+		case BG_BLOCK_REBOOTING:
+			debug2("Block %s is rebooting.",
+			       bg_record->bg_block_id);
+			break;
+		default:
+			debug("Hey the state of block "
+			      "%s is %d(%s) doing nothing.",
+			      bg_record->bg_block_id,
+			      real_state,
+			      bg_block_state_string(bg_record->state));
+			break;
+		}
+	}
+
+	return updated;
+}
+
+extern List bg_status_create_kill_job_list(void)
+{
+	return list_create(_destroy_kill_struct);
+}
+
+extern void bg_status_process_kill_job_list(List kill_job_list)
+{
+	kill_job_struct_t *freeit = NULL;
+
+	if (!kill_job_list)
+		return;
+
+	/* kill all the jobs from unexpectedly freed blocks */
+	while ((freeit = list_pop(kill_job_list))) {
+		debug2("Trying to requeue job %u", freeit->jobid);
+		bg_requeue_job(freeit->jobid, 0);
+		_destroy_kill_struct(freeit);
+	}
+}
diff --git a/src/plugins/select/bgq/bgq.h b/src/plugins/select/bluegene/bg_status.h
similarity index 70%
rename from src/plugins/select/bgq/bgq.h
rename to src/plugins/select/bluegene/bg_status.h
index bf374ce5d..cce5e3231 100644
--- a/src/plugins/select/bgq/bgq.h
+++ b/src/plugins/select/bluegene/bg_status.h
@@ -1,13 +1,13 @@
 /*****************************************************************************\
- *  bgq.h - hearder file for the Blue Gene/Q plugin.
+ *  bg_status.h
+ *
  *****************************************************************************
- *  Copyright (C) 2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -35,38 +35,19 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
+#ifndef _BG_STATUS_H_
+#define _BG_STATUS_H_
 
-#ifndef _BGQ_H_
-#define _BGQ_H_
-
-#include <iostream>
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_STDINT_H
-#    include <stdint.h>
-#  endif
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  endif
-#endif
+#include "bg_core.h"
 
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+extern int bg_status_update_block_state(bg_record_t *bg_record,
+					uint16_t state,
+					List kill_job_list);
+extern List bg_status_create_kill_job_list(void);
+extern void bg_status_process_kill_job_list(List kill_job_list);
 
-#ifdef WITH_PTHREADS
-#  include <pthread.h>
-#endif				/* WITH_PTHREADS */
+/* defined in the various bridge_status' */
+extern int bridge_block_check_mp_states(char *bg_block_id,
+					bool slurmctld_locked);
 
-extern "C" {
-
-#include "src/common/slurm_xlator.h"	/* Must be first */
-#include "src/common/macros.h"
-#include "src/slurmctld/slurmctld.h"
-
-}
-#endif /* _BGQ_H_ */
+#endif
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/bg_structs.h
similarity index 54%
rename from src/plugins/select/bluegene/plugin/bg_record_functions.h
rename to src/plugins/select/bluegene/bg_structs.h
index b384b9c7d..0d189b0b5 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.h
+++ b/src/plugins/select/bluegene/bg_structs.h
@@ -1,14 +1,13 @@
 /*****************************************************************************\
- *  bg_record_functions.h - header for creating blocks in a static environment.
+ *  bg_structs.h
  *
- *  $Id: bg_record_functions.h 12954 2008-01-04 20:37:49Z da $
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,57 +36,87 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _BLUEGENE_BG_RECORD_FUNCTIONS_H_
-#define _BLUEGENE_BG_RECORD_FUNCTIONS_H_
+#ifndef _BG_STRUCTS_H_
+#define _BG_STRUCTS_H_
+
+#include "bg_enums.h"
 
 #if HAVE_CONFIG_H
 #  include "config.h"
 #endif
 
-#include <stdlib.h>
-#include <sys/stat.h>
-#include <pwd.h>
-
-#include "src/common/bitstring.h"
-#include "src/common/hostlist.h"
+#include "slurm.h"
 #include "src/common/list.h"
-#include "src/common/macros.h"
-#include "src/common/node_select.h"
-#include "src/common/parse_time.h"
-#include "src/slurmctld/slurmctld.h"
+#include "src/common/bitstring.h"
 
-#include "../block_allocator/block_allocator.h"
+typedef struct {
+	List blrts_list;
+	char *bridge_api_file;
+	uint16_t bridge_api_verb;
+	uint16_t cpu_ratio;
+	uint32_t cpus_per_mp;
+	char *default_blrtsimage;
+	char *default_linuximage;
+	char *default_mloaderimage;
+	char *default_ramdiskimage;
+	uint16_t deny_pass;
+	double io_ratio;
+	uint16_t ionode_cnode_cnt;
+	uint16_t ionodes_per_mp;
+	bg_layout_t layout_mode;
+	List linux_list;
+	List mloader_list;
+	uint16_t mp_cnode_cnt;
+	uint16_t mp_nodecard_cnt;
+	double nc_ratio;
+	uint16_t nodecard_cnode_cnt;
+	uint16_t nodecard_ionode_cnt;
+	uint16_t quarter_cnode_cnt;
+	uint16_t quarter_ionode_cnt;
+	List ramdisk_list;
+	uint32_t slurm_debug_flags;
+	uint32_t slurm_debug_level;
+	char *slurm_node_prefix;
+	char *slurm_user_name;
+	uint32_t smallest_block;
+} bg_config_t;
 
-#define BLOCK_MAGIC 0x3afd
+typedef struct {
+	List booted;         /* blocks that are booted */
+	List job_running;    /* jobs running in these blocks */
+	List main;	    /* List of configured BG blocks */
+	List valid_small32;
+	List valid_small64;
+	List valid_small128;
+	List valid_small256;
+} bg_lists_t;
 
 typedef struct bg_record {
-	rm_partition_t *bg_block;       /* structure to hold info from db2 */
-	pm_partition_id_t bg_block_id;	/* ID returned from MMCS	*/
-	List bg_block_list;             /* node list of blocks in block */
-	bitstr_t *bitmap;               /* bitmap to check the name
-					   of block */
-#ifdef HAVE_BGL
+	void *bg_block;                 /* needed for L/P systems */
+	char *bg_block_id;     	        /* ID returned from MMCS */
+	List ba_mp_list;                /* List of midplanes in block */
 	char *blrtsimage;               /* BlrtsImage for this block */
-#endif
 	int boot_count;                 /* number of attemts boot attempts */
 	int boot_state;                 /* check to see if boot failed.
 					   -1 = fail,
 					   0 = not booting,
 					   1 = booting */
-	int bp_count;                   /* size */
-	rm_connection_type_t conn_type; /* MESH or Torus or NAV */
+	uint32_t cnode_cnt;             /* count of cnodes per block */
+	uint16_t conn_type[SYSTEM_DIMENSIONS];  /* MESH or Torus or NAV */
 	uint32_t cpu_cnt;               /* count of cpus per block */
 	int free_cnt;                   /* How many are trying
 					   to free this block at the
 					   same time */
 	bool full_block;                /* whether or not block is the full
 					   block */
-	uint16_t geo[HIGHEST_DIMENSIONS];  /* geometry */
-	char *ionodes; 		        /* String of ionodes in block
-					 * NULL if not a small block*/
+	uint16_t geo[SYSTEM_DIMENSIONS];  /* geometry */
 	bitstr_t *ionode_bitmap;        /* for small blocks bitmap to
 					   keep track which ionodes we
 					   are on.  NULL if not a small block*/
+	char *ionode_str;               /* String of ionodes in block
+					 * NULL if not a small block*/
+	List job_list;                  /* List of jobs running on a
+					   small block */
 	struct job_record *job_ptr;	/* pointer to job running on
 					 * block or NULL if no job */
 	int job_running;                /* job id of job running of if
@@ -100,19 +129,22 @@ typedef struct bg_record {
 	int modifying;                  /* flag to say the block is
 					   being modified or not at
 					   job launch usually */
-	char *nodes;			/* String of nodes in block */
-	uint32_t node_cnt;              /* count of cnodes per block */
-#ifdef HAVE_BGL
-	rm_partition_mode_t node_use;	/* either COPROCESSOR or VIRTUAL */
-#endif
+	bitstr_t *mp_bitmap;            /* bitmap to check the midplanes
+					   of block */
+	int mp_count;                   /* size */
+	char *mp_str;   		/* String of midplanes in block */
+	bitstr_t *mp_used_bitmap;       /* cnodes used in this bitmap */
+	char *mp_used_str;   		/* String of midplanes used in block */
+	uint16_t node_use;      	/* either COPROCESSOR or VIRTUAL */
 	struct bg_record *original;     /* if this is a copy this is a
 					   pointer to the original */
 	char *ramdiskimage;             /* RamDiskImage/IoloadImg for
 					 * this block */
 	char *reason;                   /* reason block is in error state */
-	rm_partition_state_t state;     /* Current state of the block */
-	uint16_t start[HIGHEST_DIMENSIONS];  /* start node */
-	uint32_t switch_count;          /* number of switches used. */
+	uint16_t state;                 /* Current state of the block */
+	uint16_t start[SYSTEM_DIMENSIONS];  /* start node */
+	uint32_t switch_count;          /* number of switches
+					 * used. On L/P */
 	char *target_name;		/* when a block is freed this
 					   is the name of the user we
 					   want on the block */
@@ -120,37 +152,4 @@ typedef struct bg_record {
 	uid_t user_uid;   		/* Owner of block uid	*/
 } bg_record_t;
 
-/* Log a bg_record's contents */
-extern void print_bg_record(bg_record_t *record);
-extern void destroy_bg_record(void *object);
-extern int block_exist_in_list(List my_list, bg_record_t *bg_record);
-extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record);
-extern void process_nodes(bg_record_t *bg_reord, bool startup);
-extern List copy_bg_list(List in_list);
-extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record);
-extern int bg_record_cmpf_inc(bg_record_t *rec_a, bg_record_t *rec_b);
-extern int bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b);
-
-/* return bg_record from a bg_list */
-extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id);
-
-/* change username of a block bg_record_t target_name needs to be
-   updated before call of function.
-*/
-extern int update_block_user(bg_record_t *bg_block_id, int set);
-extern void requeue_and_error(bg_record_t *bg_record, char *reason);
-
-extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
-			 bool no_check, bitoff_t io_start);
-extern int handle_small_record_request(List records, blockreq_t *blockreq,
-				       bg_record_t *bg_record, bitoff_t start);
-
-extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size);
-extern int down_nodecard(char *bp_name, bitoff_t io_start,
-			 bool slurmctld_locked);
-extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap);
-extern int put_block_in_error_state(bg_record_t *bg_record,
-				    int state, char *reason);
-extern int resume_block(bg_record_t *bg_record);
-
-#endif /* _BLUEGENE_BG_RECORD_FUNCTIONS_H_ */
+#endif
diff --git a/src/plugins/select/bluegene/bl/Makefile.am b/src/plugins/select/bluegene/bl/Makefile.am
new file mode 100644
index 000000000..3a93f161e
--- /dev/null
+++ b/src/plugins/select/bluegene/bl/Makefile.am
@@ -0,0 +1,14 @@
+# Makefile.am for bridge_linker on a bluegene system
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+
+noinst_LTLIBRARIES = libbridge_linker.la
+libbridge_linker_la_SOURCES = bridge_linker.c bridge_status.c \
+			bridge_switch_connections.c
+
+libbridge_linker_la_LDFLAGS = $(LIB_LDFLAGS) -lm
diff --git a/src/plugins/select/bluegene/bl/Makefile.in b/src/plugins/select/bluegene/bl/Makefile.in
new file mode 100644
index 000000000..2d85e8de3
--- /dev/null
+++ b/src/plugins/select/bluegene/bl/Makefile.in
@@ -0,0 +1,603 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am for bridge_linker on a bluegene system
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/select/bluegene/bl
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libbridge_linker_la_LIBADD =
+am_libbridge_linker_la_OBJECTS = bridge_linker.lo bridge_status.lo \
+	bridge_switch_connections.lo
+libbridge_linker_la_OBJECTS = $(am_libbridge_linker_la_OBJECTS)
+libbridge_linker_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(libbridge_linker_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libbridge_linker_la_SOURCES)
+DIST_SOURCES = $(libbridge_linker_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+noinst_LTLIBRARIES = libbridge_linker.la
+libbridge_linker_la_SOURCES = bridge_linker.c bridge_status.c \
+			bridge_switch_connections.c
+
+libbridge_linker_la_LDFLAGS = $(LIB_LDFLAGS) -lm
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/bl/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/bl/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libbridge_linker.la: $(libbridge_linker_la_OBJECTS) $(libbridge_linker_la_DEPENDENCIES) 
+	$(libbridge_linker_la_LINK)  $(libbridge_linker_la_OBJECTS) $(libbridge_linker_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_status.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_switch_connections.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/select/bluegene/bl/bridge_linker.c b/src/plugins/select/bluegene/bl/bridge_linker.c
new file mode 100644
index 000000000..06ac3f731
--- /dev/null
+++ b/src/plugins/select/bluegene/bl/bridge_linker.c
@@ -0,0 +1,2522 @@
+/*****************************************************************************\
+ *  bridge_linker.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2004 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+
+#include "../ba/block_allocator.h"
+#include "../bridge_linker.h"
+#include "src/common/uid.h"
+#include "bridge_status.h"
+#include "bridge_switch_connections.h"
+
+#define MAX_ADD_RETRY 2
+
+#if defined HAVE_BG_FILES
+typedef struct {
+	/* all the rm functions */
+	status_t (*set_serial)(const rm_serial_t serial);
+	status_t (*get_bg)(my_bluegene_t **bg);
+	status_t (*free_bg)(my_bluegene_t *bg);
+#ifdef HAVE_BGP
+	status_t (*new_ionode)(rm_ionode_t **ionode);
+	status_t (*free_ionode)(rm_ionode_t *ionode);
+#endif
+	status_t (*add_partition)(rm_partition_t *partition);
+	status_t (*get_partition)(pm_partition_id_t pid,
+				  rm_partition_t **partition);
+	status_t (*get_partition_info)(pm_partition_id_t pid,
+				       rm_partition_t **partition);
+	status_t (*modify_partition)(pm_partition_id_t pid,
+				     enum rm_modify_op op, const void *data);
+	status_t (*set_part_owner)(pm_partition_id_t pid, const char *name);
+	status_t (*add_part_user)(pm_partition_id_t pid, const char *name);
+	status_t (*remove_part_user)(pm_partition_id_t pid, const char *name);
+	status_t (*remove_partition)(pm_partition_id_t pid);
+	status_t (*get_partitions)(rm_partition_state_flag_t flag,
+				   rm_partition_list_t **part_list);
+	status_t (*get_partitions_info)(rm_partition_state_flag_t flag,
+					rm_partition_list_t **part_list);
+	status_t (*get_job)(db_job_id_t dbJobId, rm_job_t **job);
+	status_t (*get_jobs)(rm_job_state_flag_t flag, rm_job_list_t **jobs);
+	status_t (*get_nodecards)(rm_bp_id_t bpid,
+				  rm_nodecard_list_t **nc_list);
+	status_t (*new_nodecard)(rm_nodecard_t **nodecard);
+	status_t (*free_nodecard)(rm_nodecard_t *nodecard);
+	status_t (*new_partition)(rm_partition_t **partition);
+	status_t (*free_partition)(rm_partition_t *partition);
+	status_t (*free_job)(rm_job_t *job);
+	status_t (*free_partition_list)(rm_partition_list_t *part_list);
+	status_t (*free_job_list)(rm_job_list_t *job_list);
+	status_t (*free_nodecard_list)(rm_nodecard_list_t *nc_list);
+	status_t (*get_data)(rm_element_t* element,
+			     enum rm_specification field, void *data);
+	status_t (*set_data)(rm_element_t* element,
+			     enum rm_specification field, void *data);
+
+	/* all the jm functions */
+	status_t (*signal_job)(db_job_id_t jid, rm_signal_t sig);
+
+	/* all the pm functions */
+	status_t (*create_partition)(pm_partition_id_t pid);
+	status_t (*destroy_partition)(pm_partition_id_t pid);
+
+	/* set say message stuff */
+	void (*set_log_params)(FILE * stream, unsigned int level);
+
+} bridge_api_t;
+
+pthread_mutex_t api_file_mutex = PTHREAD_MUTEX_INITIALIZER;
+bridge_api_t bridge_api;
+#endif
+
+static bool initialized = false;
+bool have_db2 = true;
+void *handle = NULL;
+
+#if defined HAVE_BG_FILES
+/* translation from the enum to the actual port number */
+static int _port_enum(int port)
+{
+	switch(port) {
+	case RM_PORT_S0:
+		return 0;
+		break;
+	case RM_PORT_S1:
+		return 1;
+		break;
+	case RM_PORT_S2:
+		return 2;
+		break;
+	case RM_PORT_S3:
+		return 3;
+		break;
+	case RM_PORT_S4:
+		return 4;
+		break;
+	case RM_PORT_S5:
+		return 5;
+		break;
+	default:
+		return -1;
+	}
+}
+
+static int _bg_errtrans(int in)
+{
+	switch (in) {
+	case STATUS_OK:
+		return SLURM_SUCCESS;
+	case PARTITION_NOT_FOUND:
+		return BG_ERROR_BLOCK_NOT_FOUND;
+	case INCOMPATIBLE_STATE:
+		return BG_ERROR_INVALID_STATE;
+	case CONNECTION_ERROR:
+		return BG_ERROR_CONNECTION_ERROR;
+	case JOB_NOT_FOUND:
+		return BG_ERROR_JOB_NOT_FOUND;
+	case BP_NOT_FOUND:
+		return BG_ERROR_MP_NOT_FOUND;
+	case SWITCH_NOT_FOUND:
+		return BG_ERROR_SWITCH_NOT_FOUND;
+#ifndef HAVE_BGL
+	case PARTITION_ALREADY_DEFINED:
+		return BG_ERROR_BLOCK_ALREADY_DEFINED;
+#endif
+	case JOB_ALREADY_DEFINED:
+		return BG_ERROR_JOB_ALREADY_DEFINED;
+	case INTERNAL_ERROR:
+		return BG_ERROR_INTERNAL_ERROR;
+	case INVALID_INPUT:
+		return BG_ERROR_INVALID_INPUT;
+	case INCONSISTENT_DATA:
+		return BG_ERROR_INCONSISTENT_DATA;
+	default:
+		break;
+	}
+	return SLURM_ERROR;
+}
+
+static status_t _get_job(db_job_id_t dbJobId, rm_job_t **job)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_job))(dbJobId, job));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+static status_t _get_jobs(rm_job_state_flag_t flag, rm_job_list_t **jobs)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_jobs))(flag, jobs));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+}
+
+static status_t _free_job(rm_job_t *job)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_job))(job));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+static status_t _free_job_list(rm_job_list_t *job_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_job_list))(job_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+static status_t _signal_job(db_job_id_t jid, rm_signal_t sig)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.signal_job))(jid, sig));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+static status_t _remove_block_user(pm_partition_id_t pid,
+				   const char *name)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.remove_part_user))(pid, name));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+static status_t _new_block(rm_partition_t **partition)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.new_partition))(partition));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+}
+
+static status_t _add_block(rm_partition_t *partition)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.add_partition))(partition));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+}
+
+
+/* Kill a job and remove its record from MMCS */
+static int _remove_job(db_job_id_t job_id, char *block_id)
+{
+	int rc;
+	int count = 0;
+	rm_job_t *job_rec = NULL;
+	rm_job_state_t job_state;
+	bool is_history = false;
+
+	debug("removing job %d from MMCS on block %s", job_id, block_id);
+	while (1) {
+		if (count)
+			sleep(POLL_INTERVAL);
+		count++;
+
+		/* Find the job */
+		if ((rc = _get_job(job_id, &job_rec)) != SLURM_SUCCESS) {
+
+			if (rc == BG_ERROR_JOB_NOT_FOUND) {
+				debug("job %d removed from MMCS", job_id);
+				return SLURM_SUCCESS;
+			}
+
+			error("bridge_get_job(%d): %s", job_id,
+			      bg_err_str(rc));
+			continue;
+		}
+
+		if ((rc = bridge_get_data(job_rec, RM_JobState, &job_state))
+		    != SLURM_SUCCESS) {
+			(void) _free_job(job_rec);
+			if (rc == BG_ERROR_JOB_NOT_FOUND) {
+				debug("job %d not found in MMCS", job_id);
+				return SLURM_SUCCESS;
+			}
+
+			error("bridge_get_data(RM_JobState) for jobid=%d "
+			      "%s", job_id, bg_err_str(rc));
+			continue;
+		}
+
+		/* If this job is in the history table we
+		   should just exit here since it is marked
+		   incorrectly */
+		if ((rc = bridge_get_data(job_rec, RM_JobInHist,
+					  &is_history))
+		    != SLURM_SUCCESS) {
+			(void) _free_job(job_rec);
+			if (rc == BG_ERROR_JOB_NOT_FOUND) {
+				debug("job %d removed from MMCS", job_id);
+				return SLURM_SUCCESS;
+			}
+
+			error("bridge_get_data(RM_JobInHist) for jobid=%d "
+			      "%s", job_id, bg_err_str(rc));
+			continue;
+		}
+
+		if ((rc = _free_job(job_rec)) != SLURM_SUCCESS)
+			error("bridge_free_job: %s", bg_err_str(rc));
+
+		debug2("job %d on block %s is in state %d history %d",
+		       job_id, block_id, job_state, is_history);
+
+		/* check the state and process accordingly */
+		if (is_history) {
+			debug2("Job %d on block %s isn't in the "
+			       "active job table anymore, final state was %d",
+			       job_id, block_id, job_state);
+			return SLURM_SUCCESS;
+		} else if (job_state == RM_JOB_TERMINATED)
+			return SLURM_SUCCESS;
+		else if (job_state == RM_JOB_DYING) {
+			if (count > MAX_POLL_RETRIES)
+				error("Job %d on block %s isn't dying, "
+				      "trying for %d seconds", job_id,
+				      block_id, count*POLL_INTERVAL);
+			continue;
+		} else if (job_state == RM_JOB_ERROR) {
+			error("job %d on block %s is in a error state.",
+			      job_id, block_id);
+
+			//free_bg_block();
+			return SLURM_SUCCESS;
+		}
+
+		/* we have been told the next 2 lines do the same
+		 * thing, but I don't believe it to be true.  In most
+		 * cases when you do a signal of SIGTERM the mpirun
+		 * process gets killed with a SIGTERM.  In the case of
+		 * bridge_cancel_job it always gets killed with a
+		 * SIGKILL.  From IBM's point of view that is a bad
+		 * deally, so we are going to use signal ;).  Sending
+		 * a SIGKILL will kill the mpirun front end process,
+		 * and if you kill that jobs will never get cleaned up and
+		 * you end up with ciod unreacahble on the next job.
+		 */
+
+//		 rc = bridge_cancel_job(job_id);
+		rc = _signal_job(job_id, SIGTERM);
+
+		if (rc != SLURM_SUCCESS) {
+			if (rc == BG_ERROR_JOB_NOT_FOUND) {
+				debug("job %d on block %s removed from MMCS",
+				      job_id, block_id);
+				return SLURM_SUCCESS;
+			}
+			if (rc == BG_ERROR_INVALID_STATE)
+				debug("job %d on block %s is in an "
+				      "INCOMPATIBLE_STATE",
+				      job_id, block_id);
+			else
+				error("bridge_signal_job(%d): %s", job_id,
+				      bg_err_str(rc));
+		} else if (count > MAX_POLL_RETRIES)
+			error("Job %d on block %s is in state %d and "
+			      "isn't dying, and doesn't appear to be "
+			      "responding to SIGTERM, trying for %d seconds",
+			      job_id, block_id, job_state, count*POLL_INTERVAL);
+
+	}
+
+	error("Failed to remove job %d from MMCS", job_id);
+	return BG_ERROR_INTERNAL_ERROR;
+}
+#endif
+
+static void _remove_jobs_on_block_and_reset(rm_job_list_t *job_list,
+					    int job_cnt, char *block_id)
+{
+	bg_record_t *bg_record = NULL;
+	int job_remove_failed = 0;
+
+#if defined HAVE_BG_FILES && defined HAVE_BG_L_P
+	rm_element_t *job_elem = NULL;
+	pm_partition_id_t job_block;
+	db_job_id_t job_id;
+	int i, rc;
+#endif
+
+	if (!job_list)
+		job_cnt = 0;
+
+	if (!block_id) {
+		error("_remove_jobs_on_block_and_reset: no block name given");
+		return;
+	}
+
+#if defined HAVE_BG_FILES && defined HAVE_BG_L_P
+	for (i=0; i<job_cnt; i++) {
+		if (i) {
+			if ((rc = bridge_get_data(
+				     job_list, RM_JobListNextJob,
+				     &job_elem)) != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_JobListNextJob): %s",
+				      bg_err_str(rc));
+				continue;
+			}
+		} else {
+			if ((rc = bridge_get_data(
+				     job_list, RM_JobListFirstJob,
+				     &job_elem)) != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_JobListFirstJob): %s",
+				      bg_err_str(rc));
+				continue;
+			}
+		}
+
+		if (!job_elem) {
+			error("No Job Elem breaking out job count = %d", i);
+			break;
+		}
+		if ((rc = bridge_get_data(job_elem, RM_JobPartitionID,
+					  &job_block))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_JobPartitionID) %s: %s",
+			      job_block, bg_err_str(rc));
+			continue;
+		}
+
+		if (!job_block) {
+			error("No blockID returned from Database");
+			continue;
+		}
+
+		debug2("looking at block %s looking for %s",
+		       job_block, block_id);
+
+		if (strcmp(job_block, block_id)) {
+			free(job_block);
+			continue;
+		}
+
+		free(job_block);
+
+		if ((rc = bridge_get_data(job_elem, RM_JobDBJobID, &job_id))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_JobDBJobID): %s",
+			      bg_err_str(rc));
+			continue;
+		}
+		debug2("got job_id %d",job_id);
+		if ((rc = _remove_job(job_id, block_id))
+		    == BG_ERROR_INTERNAL_ERROR) {
+			job_remove_failed = 1;
+			break;
+		}
+	}
+#else
+	/* Simpulate better job completion since on a real system it
+	 * could take up minutes to kill a job. */
+	if (job_cnt)
+		sleep(2);
+#endif
+	/* remove the block's users */
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record = find_bg_record_in_list(bg_lists->main, block_id);
+	if (bg_record) {
+		debug("got the record %s user is %s",
+		      bg_record->bg_block_id,
+		      bg_record->user_name);
+
+		if (job_remove_failed) {
+			if (bg_record->mp_str)
+				slurm_drain_nodes(
+					bg_record->mp_str,
+					"_term_agent: Couldn't remove job",
+					slurm_get_slurm_user_id());
+			else
+				error("Block %s doesn't have a node list.",
+				      block_id);
+		}
+
+		bg_reset_block(bg_record);
+	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+		debug2("Hopefully we are destroying this block %s "
+		       "since it isn't in the bg_lists->main",
+		       block_id);
+	} else if (job_cnt) {
+		error("Could not find block %s previously assigned to job.  "
+		      "If this is happening at startup and you just changed "
+		      "your bluegene.conf this is expected.  Else you should "
+		      "probably restart your slurmctld since this shouldn't "
+		      "happen outside of that.",
+		      block_id);
+	}
+	slurm_mutex_unlock(&block_state_mutex);
+
+}
+
+/**
+ * initialize the BG block in the resource manager
+ */
+static void _pre_allocate(bg_record_t *bg_record)
+{
+#if defined HAVE_BG_FILES
+	int rc;
+	int send_psets=bg_conf->ionodes_per_mp;
+	rm_connection_type_t conn_type = bg_record->conn_type[0];
+#ifdef HAVE_BGL
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionBlrtsImg,
+				  bg_record->blrtsimage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionBlrtsImg): %s",
+		      bg_err_str(rc));
+
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionLinuxImg,
+				  bg_record->linuximage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionLinuxImg): %s",
+		      bg_err_str(rc));
+
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionRamdiskImg,
+				  bg_record->ramdiskimage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionRamdiskImg): %s",
+		      bg_err_str(rc));
+#else
+	struct tm my_tm;
+	struct timeval my_tv;
+
+	if ((rc = bridge_set_data(bg_record->bg_block,
+				  RM_PartitionCnloadImg,
+				  bg_record->linuximage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionLinuxCnloadImg): %s",
+		      bg_err_str(rc));
+
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionIoloadImg,
+				  bg_record->ramdiskimage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionIoloadImg): %s",
+		      bg_err_str(rc));
+
+	gettimeofday(&my_tv, NULL);
+	localtime_r(&my_tv.tv_sec, &my_tm);
+	bg_record->bg_block_id = xstrdup_printf(
+		"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
+		my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
+		my_tm.tm_hour, my_tm.tm_min, my_tm.tm_sec, my_tv.tv_usec/1000);
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionID,
+				  bg_record->bg_block_id)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionID): %s",
+		      bg_err_str(rc));
+#endif
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionMloaderImg,
+				  bg_record->mloaderimage)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionMloaderImg): %s",
+		      bg_err_str(rc));
+
+	/* Don't send a * uint16_t into this it messes things up. */
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionConnection,
+				  &conn_type)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionConnection): %s",
+		      bg_err_str(rc));
+
+	/* rc = bg_conf->mp_cnode_cnt/bg_record->cnode_cnt; */
+/* 	if (rc > 1) */
+/* 		send_psets = bg_conf->ionodes_per_mp/rc; */
+
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionPsetsPerBP,
+				  &send_psets)) != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionPsetsPerBP): %s",
+		      bg_err_str(rc));
+
+	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionUserName,
+				  bg_conf->slurm_user_name))
+	    != SLURM_SUCCESS)
+		error("bridge_set_data(RM_PartitionUserName): %s",
+		      bg_err_str(rc));
+
+#endif
+}
+
+/**
+ * add the block record to the DB
+ */
+static int _post_allocate(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+#if defined HAVE_BG_FILES
+	int i;
+	pm_partition_id_t block_id;
+	uid_t my_uid;
+
+	/* Add partition record to the DB */
+	debug2("adding block");
+
+	for(i=0;i<MAX_ADD_RETRY; i++) {
+		if ((rc = _add_block(bg_record->bg_block))
+		    != SLURM_SUCCESS) {
+			error("bridge_add_block(): %s", bg_err_str(rc));
+			rc = SLURM_ERROR;
+		} else {
+			rc = SLURM_SUCCESS;
+			break;
+		}
+		sleep(3);
+	}
+	if (rc == SLURM_ERROR) {
+		info("going to free it");
+		if ((rc = bridge_free_block(bg_record->bg_block))
+		    != SLURM_SUCCESS)
+			error("bridge_free_block(): %s", bg_err_str(rc));
+		fatal("couldn't add last block.");
+	}
+	debug2("done adding");
+
+	/* Get back the new block id */
+	if ((rc = bridge_get_data(bg_record->bg_block, RM_PartitionID,
+				  &block_id))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionID): %s",
+		      bg_err_str(rc));
+		bg_record->bg_block_id = xstrdup("UNKNOWN");
+	} else {
+		if (!block_id) {
+			error("No Block ID was returned from database");
+			return SLURM_ERROR;
+		}
+		bg_record->bg_block_id = xstrdup(block_id);
+
+		free(block_id);
+
+		xfree(bg_record->target_name);
+
+
+		bg_record->target_name =
+			xstrdup(bg_conf->slurm_user_name);
+
+		xfree(bg_record->user_name);
+		bg_record->user_name =
+			xstrdup(bg_conf->slurm_user_name);
+
+		if (uid_from_string (bg_record->user_name, &my_uid) < 0)
+			error("uid_from_string(%s): %m", bg_record->user_name);
+		else
+			bg_record->user_uid = my_uid;
+	}
+	/* We are done with the block */
+	if ((rc = bridge_free_block(bg_record->bg_block)) != SLURM_SUCCESS)
+		error("bridge_free_block(): %s", bg_err_str(rc));
+#else
+	/* We are just looking for a real number here no need for a
+	   base conversion
+	*/
+	if (!bg_record->bg_block_id) {
+		struct tm my_tm;
+		struct timeval my_tv;
+		gettimeofday(&my_tv, NULL);
+		localtime_r(&my_tv.tv_sec, &my_tm);
+		bg_record->bg_block_id = xstrdup_printf(
+			"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
+			my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
+			my_tm.tm_hour, my_tm.tm_min, my_tm.tm_sec,
+			my_tv.tv_usec/1000);
+		/* Since we divide by 1000 here we need to sleep that
+		   long to get a unique id. It takes longer than this
+		   in a real system so we don't worry about it. */
+		usleep(1000);
+	}
+#endif
+
+	return rc;
+}
+
+#if defined HAVE_BG_FILES
+
+static int _set_ionodes(bg_record_t *bg_record, int io_start, int io_nodes)
+{
+	char bitstring[BITSIZE];
+
+	if (!bg_record)
+		return SLURM_ERROR;
+
+	bg_record->ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+	/* Set the correct ionodes being used in this block */
+	bit_nset(bg_record->ionode_bitmap, io_start, io_start+io_nodes);
+	bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap);
+	bg_record->ionode_str = xstrdup(bitstring);
+	return SLURM_SUCCESS;
+}
+
+static int _get_syms(int n_syms, const char *names[], void *ptrs[])
+{
+        int i, count;
+#ifdef HAVE_BGL
+#ifdef BG_DB2_SO
+	void *db_handle = NULL;
+	db_handle = dlopen (BG_DB2_SO, RTLD_LAZY);
+	if (!db_handle) {
+		have_db2 = false;
+		debug("%s", dlerror());
+		return 0;
+	}
+	dlclose(db_handle);
+#else
+	fatal("No BG_DB2_SO is set, can't run.");
+#endif
+#endif // HAVE_BGL
+
+#ifdef BG_BRIDGE_SO
+	handle = dlopen (BG_BRIDGE_SO, RTLD_LAZY);
+	if (!handle) {
+		have_db2 = false;
+		debug("%s", dlerror());
+		return 0;
+	}
+#else
+	fatal("No BG_BRIDGE_SO is set, can't run.");
+#endif
+
+	dlerror();    /* Clear any existing error */
+        count = 0;
+        for ( i = 0; i < n_syms; ++i ) {
+                ptrs[i] = dlsym(handle, names[i]);
+                if (ptrs[i]) {
+			++count;
+		} else
+			fatal("Can't find %s in api", names[i]);
+	}
+        return count;
+}
+
+static int _block_get_and_set_mps(bg_record_t *bg_record)
+{
+	int rc, i, j;
+	int cnt = 0;
+	int switch_cnt = 0;
+	rm_switch_t *curr_switch = NULL;
+	rm_BP_t *curr_mp = NULL;
+	char *switchid = NULL;
+	rm_connection_t curr_conn;
+	int dim;
+	ba_mp_t *ba_node = NULL;
+	ba_switch_t *ba_switch = NULL;
+	ba_mp_t *ba_mp = NULL;
+	ListIterator itr = NULL;
+	rm_partition_t *block_ptr = (rm_partition_t *)bg_record->bg_block;
+
+	debug2("getting info for block %s", bg_record->bg_block_id);
+
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
+				  &switch_cnt)) != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionSwitchNum): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!switch_cnt) {
+		debug3("no switch_cnt");
+		if ((rc = bridge_get_data(block_ptr,
+					  RM_PartitionFirstBP,
+					  &curr_mp))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data: "
+			      "RM_PartitionFirstBP: %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+		if ((rc = bridge_get_data(curr_mp, RM_BPID, &switchid))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data: RM_SwitchBPID: %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+
+		ba_mp = loc2ba_mp(switchid);
+		if (!ba_mp) {
+			error("find_bp_loc: bpid %s not known", switchid);
+			goto end_it;
+		}
+		ba_node = ba_copy_mp(ba_mp);
+		ba_setup_mp(ba_node, 0, 0);
+		ba_node->used = BA_MP_USED_TRUE;
+		if (!bg_record->ba_mp_list)
+			bg_record->ba_mp_list = list_create(destroy_ba_mp);
+		list_push(bg_record->ba_mp_list, ba_node);
+		return SLURM_SUCCESS;
+	}
+	for (i=0; i<switch_cnt; i++) {
+		if (i) {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionNextSwitch,
+						  &curr_switch))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data: "
+				      "RM_PartitionNextSwitch: %s",
+				      bg_err_str(rc));
+				goto end_it;
+			}
+		} else {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionFirstSwitch,
+						  &curr_switch))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data: "
+				      "RM_PartitionFirstSwitch: %s",
+				      bg_err_str(rc));
+				goto end_it;
+			}
+		}
+		if ((rc = bridge_get_data(curr_switch, RM_SwitchDim, &dim))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data: RM_SwitchDim: %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+		if ((rc = bridge_get_data(curr_switch, RM_SwitchBPID,
+					  &switchid))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data: RM_SwitchBPID: %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+
+		ba_mp = loc2ba_mp(switchid);
+		if (!ba_mp) {
+			error("find_bp_loc: bpid %s not known", switchid);
+			goto end_it;
+		}
+
+		if ((rc = bridge_get_data(curr_switch, RM_SwitchConnNum, &cnt))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data: RM_SwitchBPID: %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+		debug2("switch id = %s dim %d conns = %d",
+		       switchid, dim, cnt);
+
+		if (bg_record->ba_mp_list) {
+			itr = list_iterator_create(bg_record->ba_mp_list);
+			while ((ba_node = list_next(itr))) {
+				if (ba_node->coord[X] == ba_mp->coord[X] &&
+				    ba_node->coord[Y] == ba_mp->coord[Y] &&
+				    ba_node->coord[Z] == ba_mp->coord[Z])
+					break;	/* we found it */
+			}
+			list_iterator_destroy(itr);
+		}
+
+		if (!ba_node) {
+			ba_node = ba_copy_mp(ba_mp);
+			ba_setup_mp(ba_node, 0, 0);
+			if (!bg_record->ba_mp_list)
+				bg_record->ba_mp_list =
+					list_create(destroy_ba_mp);
+			list_push(bg_record->ba_mp_list, ba_node);
+		}
+		ba_switch = &ba_node->axis_switch[dim];
+		for (j=0; j<cnt; j++) {
+			if (j) {
+				if ((rc = bridge_get_data(
+					     curr_switch,
+					     RM_SwitchNextConnection,
+					     &curr_conn))
+				    != SLURM_SUCCESS) {
+					error("bridge_get_data: "
+					      "RM_SwitchNextConnection: %s",
+					      bg_err_str(rc));
+					goto end_it;
+				}
+			} else {
+				if ((rc = bridge_get_data(
+					     curr_switch,
+					     RM_SwitchFirstConnection,
+					     &curr_conn))
+				    != SLURM_SUCCESS) {
+					error("bridge_get_data: "
+					      "RM_SwitchFirstConnection: %s",
+					      bg_err_str(rc));
+					goto end_it;
+				}
+			}
+
+			if (curr_conn.p1 == 1 && dim == X) {
+				if (ba_node->used) {
+					debug("I have already been to "
+					      "this node %s",
+					      ba_node->coord_str);
+					goto end_it;
+				}
+				ba_node->used = true;
+			}
+			debug3("connection going from %d -> %d",
+			       curr_conn.p1, curr_conn.p2);
+
+			if (ba_switch->int_wire[curr_conn.p1].used) {
+				debug("%s dim %d port %d "
+				      "is already in use",
+				      ba_node->coord_str,
+				      dim,
+				      curr_conn.p1);
+				goto end_it;
+			}
+			ba_switch->int_wire[curr_conn.p1].used = 1;
+			ba_switch->int_wire[curr_conn.p1].port_tar
+				= curr_conn.p2;
+
+			if (ba_switch->int_wire[curr_conn.p2].used) {
+				debug("%s dim %d port %d "
+				      "is already in use",
+				      ba_node->coord_str,
+				      dim,
+				      curr_conn.p2);
+				goto end_it;
+			}
+			ba_switch->int_wire[curr_conn.p2].used = 1;
+			ba_switch->int_wire[curr_conn.p2].port_tar
+				= curr_conn.p1;
+		}
+	}
+	return SLURM_SUCCESS;
+end_it:
+	if (bg_record->ba_mp_list) {
+		list_destroy(bg_record->ba_mp_list);
+		bg_record->ba_mp_list = NULL;
+	}
+	return SLURM_ERROR;
+}
+
+static bg_record_t *_translate_object_to_block(rm_partition_t *block_ptr,
+					       char *bg_block_id)
+{
+	int mp_cnt, i, nc_cnt, io_cnt, rc;
+	rm_element_t *mp_ptr = NULL;
+	rm_bp_id_t mpid;
+	char node_name_tmp[255], *user_name = NULL;
+
+	ba_mp_t *ba_mp = NULL;
+	char *tmp_char = NULL;
+
+	rm_nodecard_t *ncard = NULL;
+	int nc_id, io_start;
+
+	bool small = false;
+	hostlist_t hostlist;		/* expanded form of hosts */
+	bg_record_t *bg_record = (bg_record_t *)xmalloc(sizeof(bg_record_t));
+
+	bg_record->magic = BLOCK_MAGIC;
+	bg_record->bg_block = block_ptr;
+	bg_record->bg_block_id = xstrdup(bg_block_id);
+
+	/* we don't need anything else since we are just getting rid
+	   of the thing.
+	*/
+	if (!bg_recover)
+		return bg_record;
+
+#ifndef HAVE_BGL
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionSize, &mp_cnt))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionSize): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+
+	if (mp_cnt==0) {
+		error("it appear we have 0 cnodes in block %s", bg_block_id);
+		goto end_it;
+	}
+	bg_record->cnode_cnt = mp_cnt;
+	bg_record->cpu_cnt = bg_conf->cpu_ratio * bg_record->cnode_cnt;
+#endif
+
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionBPNum, &mp_cnt))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_BPNum): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+
+	if (mp_cnt==0) {
+		error("it appear we have 0 Midplanes in block %s", bg_block_id);
+		goto end_it;
+	}
+	bg_record->mp_count = mp_cnt;
+
+	debug3("has %d MPs", bg_record->mp_count);
+
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
+				  &bg_record->switch_count))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionSwitchNum): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionSmall,
+				  &small))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionSmall): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+
+	if (small) {
+		if ((rc = bridge_get_data(block_ptr,
+					  RM_PartitionOptions,
+					  &tmp_char))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionOptions): "
+			      "%s", bg_err_str(rc));
+			goto end_it;
+		} else if (tmp_char) {
+			switch(tmp_char[0]) {
+			case 's':
+				bg_record->conn_type[0] = SELECT_HTC_S;
+				break;
+			case 'd':
+				bg_record->conn_type[0] = SELECT_HTC_D;
+				break;
+			case 'v':
+				bg_record->conn_type[0] = SELECT_HTC_V;
+				break;
+			case 'l':
+				bg_record->conn_type[0] = SELECT_HTC_L;
+				break;
+			default:
+				bg_record->conn_type[0] = SELECT_SMALL;
+				break;
+			}
+
+			free(tmp_char);
+		} else
+			bg_record->conn_type[0] = SELECT_SMALL;
+
+		if ((rc = bridge_get_data(block_ptr,
+					  RM_PartitionFirstNodeCard,
+					  &ncard))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data("
+			      "RM_PartitionFirstNodeCard): %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+
+		if ((rc = bridge_get_data(block_ptr,
+					  RM_PartitionNodeCardNum,
+					  &nc_cnt))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data("
+			      "RM_PartitionNodeCardNum): %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+#ifdef HAVE_BGL
+		/* Translate nodecard count to ionode count */
+		if ((io_cnt = nc_cnt * bg_conf->io_ratio))
+			io_cnt--;
+
+		nc_id = 0;
+		if (nc_cnt == 1)
+			bridge_find_nodecard_num(
+				block_ptr, ncard, &nc_id);
+
+		bg_record->cnode_cnt =
+			nc_cnt * bg_conf->nodecard_cnode_cnt;
+		bg_record->cpu_cnt =
+			bg_conf->cpu_ratio * bg_record->cnode_cnt;
+
+		if ((rc = bridge_get_data(ncard,
+					  RM_NodeCardQuarter,
+					  &io_start))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(CardQuarter): %d",rc);
+			goto end_it;
+		}
+		io_start *= bg_conf->quarter_ionode_cnt;
+		io_start += bg_conf->nodecard_ionode_cnt * (nc_id%4);
+#else
+		/* Translate nodecard count to ionode count */
+		if ((io_cnt = nc_cnt * bg_conf->io_ratio))
+			io_cnt--;
+
+		if ((rc = bridge_get_data(ncard,
+					  RM_NodeCardID,
+					  &tmp_char))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_NodeCardID): %d",rc);
+			goto end_it;
+		}
+
+		if (!tmp_char)
+			goto end_it;
+
+		/* From the first nodecard id we can figure
+		   out where to start from with the alloc of ionodes.
+		*/
+		nc_id = atoi((char*)tmp_char+1);
+		free(tmp_char);
+		io_start = nc_id * bg_conf->io_ratio;
+		if (bg_record->cnode_cnt <
+		    bg_conf->nodecard_cnode_cnt) {
+			rm_ionode_t *ionode;
+
+			/* figure out the ionode we are using */
+			if ((rc = bridge_get_data(
+				     ncard,
+				     RM_NodeCardFirstIONode,
+				     &ionode)) != SLURM_SUCCESS) {
+				error("bridge_get_data("
+				      "RM_NodeCardFirstIONode): %d",
+				      rc);
+				goto end_it;
+			}
+			if ((rc = bridge_get_data(ionode,
+						  RM_IONodeID,
+						  &tmp_char))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data("
+				      "RM_NodeCardIONodeNum): %s",
+				      bg_err_str(rc));
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+
+			if (!tmp_char)
+				goto end_it;
+			/* just add the ionode num to the
+			 * io_start */
+			io_start += atoi((char*)tmp_char+1);
+			free(tmp_char);
+			/* make sure i is 0 since we are only using
+			 * 1 ionode */
+			io_cnt = 0;
+		}
+#endif
+		if (_set_ionodes(bg_record, io_start, io_cnt)
+		    == SLURM_ERROR)
+			error("couldn't create ionode_bitmap "
+			      "for ionodes %d to %d",
+			      io_start, io_start+io_cnt);
+		debug3("%s uses ionodes %s",
+		       bg_record->bg_block_id,
+		       bg_record->ionode_str);
+	} else {
+		rm_connection_type_t conn_type;
+#ifdef HAVE_BGL
+		bg_record->cpu_cnt = bg_conf->cpus_per_mp
+			* bg_record->mp_count;
+		bg_record->cnode_cnt =  bg_conf->mp_cnode_cnt
+			* bg_record->mp_count;
+#endif
+		/* Don't send a * uint16_t into this it messes things up. */
+		if ((rc = bridge_get_data(block_ptr,
+					  RM_PartitionConnection,
+					  &conn_type))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data"
+			      "(RM_PartitionConnection): %s",
+			      bg_err_str(rc));
+			goto end_it;
+		}
+		bg_record->conn_type[0] = conn_type;
+		/* Set the bitmap blank here if it is a full
+		   node we don't want anything set we also
+		   don't want the bg_record->ionodes set.
+		*/
+		bg_record->ionode_bitmap =
+			bit_alloc(bg_conf->ionodes_per_mp);
+	}
+
+	_block_get_and_set_mps(bg_record);
+
+	if (!bg_record->ba_mp_list)
+		fatal("couldn't get the wiring info for block %s",
+		      bg_record->bg_block_id);
+
+	hostlist = hostlist_create(NULL);
+
+	for (i=0; i<mp_cnt; i++) {
+		if (i) {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionNextBP,
+						  &mp_ptr))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data(RM_NextBP): %s",
+				      bg_err_str(rc));
+				rc = SLURM_ERROR;
+				break;
+			}
+		} else {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionFirstBP,
+						  &mp_ptr))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_FirstBP): %s",
+				      bg_err_str(rc));
+				rc = SLURM_ERROR;
+				break;
+			}
+		}
+		if ((rc = bridge_get_data(mp_ptr, RM_BPID, &mpid))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_BPID): %s",
+			      bg_err_str(rc));
+			rc = SLURM_ERROR;
+			break;
+		}
+
+		if (!mpid) {
+			error("No MP ID was returned from database");
+			goto end_it;
+		}
+
+		ba_mp = loc2ba_mp(mpid);
+
+		if (!ba_mp) {
+			fatal("Could not find coordinates for "
+			      "MP ID %s", (char *) mpid);
+		}
+		free(mpid);
+
+
+		snprintf(node_name_tmp,
+			 sizeof(node_name_tmp),
+			 "%s%s",
+			 bg_conf->slurm_node_prefix,
+			 ba_mp->coord_str);
+
+
+		hostlist_push(hostlist, node_name_tmp);
+	}
+	bg_record->mp_str = hostlist_ranged_string_xmalloc(hostlist);
+	hostlist_destroy(hostlist);
+	debug3("got nodes of %s", bg_record->mp_str);
+	// need to get the 000x000 range for nodes
+	// also need to get coords
+
+#ifdef HAVE_BGL
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionMode,
+				  &bg_record->node_use))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionMode): %s",
+		      bg_err_str(rc));
+	}
+#endif
+	process_nodes(bg_record, true);
+#ifdef HAVE_BGL
+	/* get the images of the block */
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionBlrtsImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionBlrtsImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No BlrtsImg was returned from database");
+		goto end_it;
+	}
+	bg_record->blrtsimage = xstrdup(user_name);
+
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionLinuxImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionLinuxImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No LinuxImg was returned from database");
+		goto end_it;
+	}
+	bg_record->linuximage = xstrdup(user_name);
+
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionRamdiskImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionRamdiskImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No RamdiskImg was returned from database");
+		goto end_it;
+	}
+	bg_record->ramdiskimage = xstrdup(user_name);
+
+#else
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionCnloadImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionCnloadImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No CnloadImg was returned from database");
+		goto end_it;
+	}
+	bg_record->linuximage = xstrdup(user_name);
+
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionIoloadImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionIoloadImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No IoloadImg was returned from database");
+		goto end_it;
+	}
+	bg_record->ramdiskimage = xstrdup(user_name);
+
+#endif
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionMloaderImg,
+				  &user_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionMloaderImg): %s",
+		      bg_err_str(rc));
+		goto end_it;
+	}
+	if (!user_name) {
+		error("No MloaderImg was returned from database");
+		goto end_it;
+	}
+	bg_record->mloaderimage = xstrdup(user_name);
+	/* This needs to happen or it will be trash after the
+	   free_block_list */
+	bg_record->bg_block = NULL;
+
+	return bg_record;
+end_it:
+	error("Something bad happened with load of %s", bg_block_id);
+	if (bg_recover) {
+		error("Can't use %s not adding", bg_block_id);
+		destroy_bg_record(bg_record);
+		bg_record = NULL;
+	}
+	return bg_record;
+}
+#endif
+
+extern int bridge_init(char *properties_file)
+{
+#ifdef HAVE_BG_FILES
+	static const char *syms[] = {
+		"rm_set_serial",
+#ifdef HAVE_BGP
+		"rm_get_BG",
+		"rm_free_BG",
+		"rm_new_ionode",
+		"rm_free_ionode",
+#else
+		"rm_get_BGL",
+		"rm_free_BGL",
+#endif
+		"rm_add_partition",
+		"rm_get_partition",
+		"rm_get_partition_info",
+		"rm_modify_partition",
+		"rm_set_part_owner",
+		"rm_add_part_user",
+		"rm_remove_part_user",
+		"rm_remove_partition",
+		"rm_get_partitions",
+		"rm_get_partitions_info",
+		"rm_get_job",
+		"rm_get_jobs",
+		"rm_get_nodecards",
+		"rm_new_nodecard",
+		"rm_free_nodecard",
+		"rm_new_partition",
+		"rm_free_partition",
+		"rm_free_job",
+		"rm_free_partition_list",
+		"rm_free_job_list",
+		"rm_free_nodecard_list",
+		"rm_get_data",
+		"rm_set_data",
+		"jm_signal_job",
+		"pm_create_partition",
+		"pm_destroy_partition",
+		"setSayMessageParams"
+	};
+	int n_syms;
+	int rc;
+
+	if (initialized)
+		return 1;
+
+	n_syms = sizeof( syms ) / sizeof( char * );
+
+	initialized = true;
+	if (!_get_syms(n_syms, syms, (void **) &bridge_api))
+		return 0;
+#ifdef BG_SERIAL
+	debug("setting the serial to %s", BG_SERIAL);
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.set_serial))(BG_SERIAL));
+	slurm_mutex_unlock(&api_file_mutex);
+	debug2("done %d", rc);
+#else
+	fatal("No BG_SERIAL is set, can't run.");
+#endif
+	bridge_status_init();
+#endif
+	return 1;
+
+}
+
+extern int bridge_fini()
+{
+	if (handle)
+		dlclose(handle);
+	bridge_status_fini();
+
+	initialized = false;
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_get_size(int *size)
+{
+#ifdef HAVE_BG_FILES
+	rm_size3D_t mp_size;
+	int rc = SLURM_ERROR;
+
+	if (!bg)
+		return rc;
+
+	if ((rc = bridge_get_data(bg, RM_Msize, &mp_size)) != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_Msize): %d", rc);
+		return rc;
+	}
+
+	size[X] = mp_size.X;
+	size[Y] = mp_size.Y;
+	size[Z] = mp_size.Z;
+
+#endif /* HAVE_BG_FILES */
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_setup_system()
+{
+#if defined HAVE_BG_FILES
+	static bool inited = false;
+	int rc;
+	rm_BP_t *my_mp = NULL;
+	int mp_num, i;
+	char *mp_id = NULL;
+	rm_location_t mp_loc;
+	ba_mp_t *curr_mp;
+
+	if (inited)
+		return SLURM_SUCCESS;
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	inited = true;
+
+	if (!have_db2) {
+		error("Can't access DB2 library, run from service node");
+		return -1;
+	}
+
+#ifdef HAVE_BGL
+	if (!getenv("DB2INSTANCE") || !getenv("VWSPATH")) {
+		error("Missing DB2INSTANCE or VWSPATH env var.  "
+		      "Execute 'db2profile'");
+		return -1;
+	}
+#endif
+
+	if (!bg) {
+		if ((rc = bridge_get_bg(&bg)) != SLURM_SUCCESS) {
+			error("bridge_get_BG(): %d", rc);
+			return -1;
+		}
+	}
+
+	if ((rc = bridge_get_data(bg, RM_BPNum, &mp_num)) != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_BPNum): %d", rc);
+		mp_num = 0;
+	}
+
+	for (i=0; i<mp_num; i++) {
+
+		if (i) {
+			if ((rc = bridge_get_data(bg, RM_NextBP, &my_mp))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data(RM_NextBP): %d", rc);
+				break;
+			}
+		} else {
+			if ((rc = bridge_get_data(bg, RM_FirstBP, &my_mp))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data(RM_FirstBP): %d", rc);
+				break;
+			}
+		}
+
+		if ((rc = bridge_get_data(my_mp, RM_BPID, &mp_id))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_BPID): %d", rc);
+			continue;
+		}
+
+		if (!mp_id) {
+			error("No BP ID was returned from database");
+			continue;
+		}
+
+		if ((rc = bridge_get_data(my_mp, RM_BPLoc, &mp_loc))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_BPLoc): %d", rc);
+			continue;
+		}
+
+		if (mp_loc.X > DIM_SIZE[X]
+		    || mp_loc.Y > DIM_SIZE[Y]
+		    || mp_loc.Z > DIM_SIZE[Z]) {
+			error("This location %c%c%c is not possible "
+			      "in our system %c%c%c",
+			      alpha_num[mp_loc.X],
+			      alpha_num[mp_loc.Y],
+			      alpha_num[mp_loc.Z],
+			      alpha_num[DIM_SIZE[X]],
+			      alpha_num[DIM_SIZE[Y]],
+			      alpha_num[DIM_SIZE[Z]]);
+			return 0;
+		}
+
+		curr_mp = &ba_main_grid[mp_loc.X][mp_loc.Y][mp_loc.Z];
+		curr_mp->loc = xstrdup(mp_id);
+
+		free(mp_id);
+	}
+#endif
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_block_create(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+
+#if defined HAVE_BG_FILES
+	_new_block((rm_partition_t **)&bg_record->bg_block);
+#endif
+	_pre_allocate(bg_record);
+
+	if (bg_record->cpu_cnt < bg_conf->cpus_per_mp)
+		rc = configure_small_block(bg_record);
+	else
+		rc = configure_block_switches(bg_record);
+
+	if (rc == SLURM_SUCCESS)
+		rc = _post_allocate(bg_record);
+
+	return rc;
+}
+
+extern int bridge_block_boot(bg_record_t *bg_record)
+{
+#if defined HAVE_BG_FILES
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	if ((rc = _bg_errtrans((*(bridge_api.set_part_owner))(
+				  bg_record->bg_block_id,
+				  bg_conf->slurm_user_name)))
+	    != SLURM_SUCCESS) {
+		error("bridge_set_block_owner(%s,%s): %s",
+		      bg_record->bg_block_id,
+		      bg_conf->slurm_user_name,
+		      bg_err_str(rc));
+		slurm_mutex_unlock(&api_file_mutex);
+		return rc;
+	}
+
+	rc = _bg_errtrans((*(bridge_api.create_partition))
+			  (bg_record->bg_block_id));
+	/* if (rc == BG_ERROR_INVALID_STATE) */
+	/* 	rc = BG_ERROR_BOOT_ERROR; */
+
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+#else
+	info("block %s is ready", bg_record->bg_block_id);
+	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+	 	list_push(bg_lists->booted, bg_record);
+	bg_record->state = BG_BLOCK_INITED;
+	last_bg_update = time(NULL);
+	return SLURM_SUCCESS;
+#endif
+}
+
+extern int bridge_block_free(bg_record_t *bg_record)
+{
+#if defined HAVE_BG_FILES
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.destroy_partition))
+			  (bg_record->bg_block_id));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+#else
+	return SLURM_SUCCESS;
+#endif
+}
+
+extern int bridge_block_remove(bg_record_t *bg_record)
+{
+#if defined HAVE_BG_FILES
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.remove_partition))
+			  (bg_record->bg_block_id));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+#else
+	return SLURM_SUCCESS;
+#endif
+}
+
+extern int bridge_block_add_user(bg_record_t *bg_record, char *user_name)
+{
+#if defined HAVE_BG_FILES
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.add_part_user))
+			  (bg_record->bg_block_id, user_name));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+#else
+	return SLURM_SUCCESS;
+#endif
+}
+
+extern int bridge_block_remove_user(bg_record_t *bg_record, char *user_name)
+{
+#if defined HAVE_BG_FILES
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.remove_part_user))
+			  (bg_record->bg_block_id, user_name));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+#else
+	return SLURM_SUCCESS;
+#endif
+}
+
+extern int bridge_block_remove_all_users(bg_record_t *bg_record,
+					 char *user_name)
+{
+	int returnc = REMOVE_USER_NONE;
+#ifdef HAVE_BG_FILES
+	char *user;
+	rm_partition_t *block_ptr = NULL;
+	int rc, i, user_count;
+
+	/* We can't use bridge_get_block_info here because users are
+	   filled in there.  This function is very slow but necessary
+	   here to get the correct block count and the users. */
+	if ((rc = bridge_get_block(bg_record->bg_block_id, &block_ptr))
+	    != SLURM_SUCCESS) {
+		if (rc == BG_ERROR_INCONSISTENT_DATA
+		    && bg_conf->layout_mode == LAYOUT_DYNAMIC)
+			return REMOVE_USER_FOUND;
+
+		error("bridge_get_block(%s): %s",
+		      bg_record->bg_block_id,
+		      bg_err_str(rc));
+		return REMOVE_USER_ERR;
+	}
+
+	if ((rc = bridge_get_data(block_ptr, RM_PartitionUsersNum,
+				  &user_count))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionUsersNum): %s",
+		      bg_err_str(rc));
+		returnc = REMOVE_USER_ERR;
+		user_count = 0;
+	} else
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("got %d users for %s", user_count,
+			     bg_record->bg_block_id);
+	for(i=0; i<user_count; i++) {
+		if (i) {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionNextUser,
+						  &user))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_PartitionNextUser): %s",
+				      bg_err_str(rc));
+				returnc = REMOVE_USER_ERR;
+				break;
+			}
+		} else {
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionFirstUser,
+						  &user))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_PartitionFirstUser): %s",
+				      bg_err_str(rc));
+				returnc = REMOVE_USER_ERR;
+				break;
+			}
+		}
+		if (!user) {
+			error("No user was returned from database");
+			continue;
+		}
+		if (!strcmp(user, bg_conf->slurm_user_name)) {
+			free(user);
+			continue;
+		}
+
+		if (user_name) {
+			if (!strcmp(user, user_name)) {
+				returnc = REMOVE_USER_FOUND;
+				free(user);
+				continue;
+			}
+		}
+
+		info("Removing user %s from Block %s",
+		     user, bg_record->bg_block_id);
+		if ((rc = _remove_block_user(bg_record->bg_block_id, user))
+		    != SLURM_SUCCESS) {
+			debug("user %s isn't on block %s",
+			      user,
+			      bg_record->bg_block_id);
+		}
+		free(user);
+	}
+	if ((rc = bridge_free_block(block_ptr)) != SLURM_SUCCESS) {
+		error("bridge_free_block(): %s", bg_err_str(rc));
+	}
+#endif
+	return returnc;
+}
+
+/*
+ * Download from MMCS the initial BG block information
+ */
+extern int bridge_blocks_load_curr(List curr_block_list)
+{
+	int rc = SLURM_SUCCESS;
+#if defined HAVE_BG_FILES
+
+	int mp_cnt;
+	rm_partition_t *block_ptr = NULL;
+	char *user_name = NULL;
+	bg_record_t *bg_record = NULL;
+	uid_t my_uid;
+
+	int block_number, block_count;
+	char *bg_block_id = NULL;
+
+	rm_partition_list_t *block_list = NULL;
+	rm_partition_state_flag_t state = PARTITION_ALL_FLAG;
+
+	bridge_setup_system();
+
+	if (bg_recover) {
+		if ((rc = bridge_get_blocks(state, &block_list))
+		    != SLURM_SUCCESS) {
+			error("2 rm_get_blocks(): %s", bg_err_str(rc));
+			return SLURM_ERROR;
+		}
+	} else {
+		if ((rc = bridge_get_blocks_info(state, &block_list))
+		    != SLURM_SUCCESS) {
+			error("2 rm_get_blocks_info(): %s", bg_err_str(rc));
+			return SLURM_ERROR;
+		}
+	}
+
+	if ((rc = bridge_get_data(block_list, RM_PartListSize, &block_count))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartListSize): %s",
+		      bg_err_str(rc));
+		block_count = 0;
+	}
+
+	info("querying the system for existing blocks");
+	for(block_number=0; block_number<block_count; block_number++) {
+		int state;
+		if (block_number) {
+			if ((rc = bridge_get_data(block_list,
+						  RM_PartListNextPart,
+						  &block_ptr))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_PartListNextPart): %s",
+				      bg_err_str(rc));
+				break;
+			}
+		} else {
+			if ((rc = bridge_get_data(block_list,
+						  RM_PartListFirstPart,
+						  &block_ptr))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_PartListFirstPart): %s",
+				      bg_err_str(rc));
+				break;
+			}
+		}
+
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionID,
+					  &bg_block_id))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionID): %s",
+			      bg_err_str(rc));
+			continue;
+		}
+
+		if (!bg_block_id) {
+			error("No Block ID was returned from database");
+			continue;
+		}
+
+		if (strncmp("RMP", bg_block_id, 3)) {
+			free(bg_block_id);
+			continue;
+		}
+
+		/* find BG Block record */
+		if (!(bg_record = find_bg_record_in_list(
+			      curr_block_list, bg_block_id))) {
+			info("%s not found in the state file, adding",
+			     bg_block_id);
+			bg_record = _translate_object_to_block(
+				block_ptr, bg_block_id);
+			if (bg_record)
+				list_push(curr_block_list, bg_record);
+		}
+		free(bg_block_id);
+		bg_record->modifying = 1;
+		/* New BG Block record */
+
+		bg_record->job_running = NO_JOB_RUNNING;
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
+					  &state))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionState): %s",
+			      bg_err_str(rc));
+			continue;
+		} else if (state == BG_BLOCK_BOOTING)
+			bg_record->boot_state = 1;
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+			state |= BG_BLOCK_ERROR_FLAG;
+		bg_record->state = state;
+		debug3("Block %s is in state %s",
+		       bg_record->bg_block_id,
+		       bg_block_state_string(bg_record->state));
+
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionUsersNum,
+					  &mp_cnt)) != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionUsersNum): %s",
+			      bg_err_str(rc));
+			continue;
+		}
+
+		xfree(bg_record->user_name);
+		xfree(bg_record->target_name);
+
+		if (mp_cnt==0) {
+			bg_record->user_name =
+				xstrdup(bg_conf->slurm_user_name);
+			bg_record->target_name =
+				xstrdup(bg_conf->slurm_user_name);
+		} else {
+			user_name = NULL;
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionFirstUser,
+						  &user_name))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_PartitionFirstUser): %s",
+				      bg_err_str(rc));
+				continue;
+			}
+			if (!user_name) {
+				error("No user name was "
+				      "returned from database");
+				continue;
+			}
+			bg_record->user_name = xstrdup(user_name);
+
+			if (!bg_record->boot_state)
+				bg_record->target_name =
+					xstrdup(bg_conf->slurm_user_name);
+			else
+				bg_record->target_name = xstrdup(user_name);
+			free(user_name);
+		}
+		if (uid_from_string (bg_record->user_name, &my_uid)<0){
+			error("uid_from_string(%s): %m",
+			      bg_record->user_name);
+		} else {
+			bg_record->user_uid = my_uid;
+		}
+	}
+	bridge_free_block_list(block_list);
+#endif
+	return rc;
+}
+
+extern void bridge_reset_block_list(List block_list)
+{
+	ListIterator itr = NULL;
+	bg_record_t *bg_record = NULL;
+	rm_job_list_t *job_list = NULL;
+	int jobs = 0;
+
+#if defined HAVE_BG_FILES
+	int live_states, rc;
+#endif
+
+	if (!block_list)
+		return;
+
+#if defined HAVE_BG_FILES
+	debug2("getting the job info");
+	live_states = JOB_ALL_FLAG
+		& (~JOB_TERMINATED_FLAG)
+		& (~JOB_KILLED_FLAG)
+		& (~JOB_ERROR_FLAG);
+
+	if ((rc = _get_jobs(live_states, &job_list)) != SLURM_SUCCESS) {
+		error("bridge_get_jobs(): %s", bg_err_str(rc));
+
+		return;
+	}
+
+	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_JobListSize): %s", bg_err_str(rc));
+		jobs = 0;
+	}
+	debug2("job count %d",jobs);
+#endif
+	itr = list_iterator_create(block_list);
+	while ((bg_record = list_next(itr))) {
+		info("Queue clearing of users of BG block %s",
+		     bg_record->bg_block_id);
+#ifndef HAVE_BG_FILES
+		/* simulate jobs running and need to be cleared from MMCS */
+		if (bg_record->job_ptr)
+			jobs = 1;
+#endif
+		_remove_jobs_on_block_and_reset(job_list, jobs,
+						bg_record->bg_block_id);
+	}
+	list_iterator_destroy(itr);
+
+#if defined HAVE_BG_FILES
+	if ((rc = _free_job_list(job_list)) != SLURM_SUCCESS)
+		error("bridge_free_job_list(): %s", bg_err_str(rc));
+#endif
+}
+
+extern void bridge_block_post_job(char *bg_block_id)
+{
+	int jobs = 0;
+	rm_job_list_t *job_list = NULL;
+
+#if defined HAVE_BG_FILES
+	int live_states, rc;
+
+	debug2("getting the job info");
+	live_states = JOB_ALL_FLAG
+		& (~JOB_TERMINATED_FLAG)
+		& (~JOB_KILLED_FLAG)
+		& (~JOB_ERROR_FLAG);
+
+	if ((rc = _get_jobs(live_states, &job_list)) != SLURM_SUCCESS) {
+		error("bridge_get_jobs(): %s", bg_err_str(rc));
+
+		return;
+	}
+
+	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_JobListSize): %s",
+		      bg_err_str(rc));
+		jobs = 0;
+	}
+	debug2("job count %d",jobs);
+#endif
+	_remove_jobs_on_block_and_reset(job_list, jobs,	bg_block_id);
+
+#if defined HAVE_BG_FILES
+	if ((rc = _free_job_list(job_list)) != SLURM_SUCCESS)
+		error("bridge_free_job_list(): %s", bg_err_str(rc));
+#endif
+}
+
+#if defined HAVE_BG_FILES
+extern status_t bridge_get_bg(my_bluegene_t **bg)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_bg))(bg));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+}
+
+extern status_t bridge_free_bg(my_bluegene_t *bg)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_bg))(bg));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern int bridge_set_log_params(char *api_file_name, unsigned int level)
+{
+	static FILE *fp = NULL;
+        FILE *fp2 = NULL;
+	int rc = SLURM_SUCCESS;
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	slurm_mutex_lock(&api_file_mutex);
+	if (fp)
+		fp2 = fp;
+
+	fp = fopen(api_file_name, "a");
+
+	if (fp == NULL) {
+		error("can't open file for bridgeapi.log at %s: %m",
+		      api_file_name);
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+
+
+	(*(bridge_api.set_log_params))(fp, level);
+	/* In the libraries linked to from the bridge there are stderr
+	   messages send which we would miss unless we dup this to the
+	   log */
+	//(void)dup2(fileno(fp), STDERR_FILENO);
+
+	if (fp2)
+		fclose(fp2);
+end_it:
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+}
+
+extern status_t bridge_get_data(rm_element_t* element,
+				enum rm_specification field, void *data)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	int *state = (int *) data;
+	rm_connection_t *curr_conn = (rm_connection_t *)data;
+
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_data))(element, field, data));
+
+	/* Since these like to change from system to system, we have a
+	   nice enum that doesn't in bg_enums.h, convert now. */
+	switch (field) {
+	case RM_PartitionState:
+		state = (int *) data;
+		switch (*state) {
+		case RM_PARTITION_FREE:
+			*state = BG_BLOCK_FREE;
+			break;
+		case RM_PARTITION_CONFIGURING:
+			*state = BG_BLOCK_BOOTING;
+			break;
+#ifdef HAVE_BGL
+		case RM_PARTITION_BUSY:
+			*state = BG_BLOCK_BUSY;
+			break;
+#else
+		case RM_PARTITION_REBOOTING:
+			*state = BG_BLOCK_REBOOTING;
+			break;
+#endif
+		case RM_PARTITION_READY:
+			*state = BG_BLOCK_INITED;
+			break;
+		case RM_PARTITION_DEALLOCATING:
+			*state = BG_BLOCK_TERM;
+			break;
+		case RM_PARTITION_ERROR:
+			*state = BG_BLOCK_ERROR_FLAG;
+			break;
+		case RM_PARTITION_NAV:
+			*state = BG_BLOCK_NAV;
+			break;
+		default:
+			break;
+		}
+		break;
+	case RM_PartitionOptions:
+		break;
+#ifdef HAVE_BGL
+	case RM_PartitionMode:
+		break;
+#endif
+	case RM_SwitchFirstConnection:
+	case RM_SwitchNextConnection:
+		curr_conn = (rm_connection_t *)data;
+		switch(curr_conn->p1) {
+		case RM_PORT_S1:
+			curr_conn->p1 = 1;
+			break;
+		case RM_PORT_S2:
+			curr_conn->p1 = 2;
+			break;
+		case RM_PORT_S4:
+			curr_conn->p1 = 4;
+			break;
+		default:
+			error("1 unknown port %d",
+			      _port_enum(curr_conn->p1));
+			return SLURM_ERROR;
+		}
+
+		switch(curr_conn->p2) {
+		case RM_PORT_S0:
+			curr_conn->p2 = 0;
+			break;
+		case RM_PORT_S3:
+			curr_conn->p2 = 3;
+			break;
+		case RM_PORT_S5:
+			curr_conn->p2 = 5;
+			break;
+		default:
+			error("2 unknown port %d",
+			      _port_enum(curr_conn->p2));
+			return SLURM_ERROR;
+		}
+		break;
+	case RM_PortID:
+		state = (int *) data;
+		(*state) = _port_enum(*state);
+		break;
+	default:
+		break;
+	}
+
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_set_data(rm_element_t* element,
+				enum rm_specification field, void *data)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.set_data))(element, field, data));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_free_nodecard_list(rm_nodecard_list_t *nc_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_nodecard_list))(nc_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_free_block(rm_partition_t *partition)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_partition))(partition));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_block_modify(char *bg_block_id,
+				    int op, const void *data)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.modify_partition))
+			  (bg_block_id, op, data));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_get_block(char *bg_block_id,
+				 rm_partition_t **partition)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_partition))
+			  (bg_block_id, partition));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_get_block_info(char *bg_block_id,
+				      rm_partition_t **partition)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	/* this is here to make sure we don't lock up things with
+	   polling and the long running get_BG call */
+	rc = pthread_mutex_trylock(&api_file_mutex);
+	if (rc == EBUSY)
+		return rc;
+	else if (rc) {
+		errno = rc;
+		error("%s:%d %s: pthread_mutex_trylock(): %m",
+		      __FILE__, __LINE__, __CURRENT_FUNC__);
+	}
+
+	//slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_partition_info))
+			  (bg_block_id, partition));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_get_blocks(rm_partition_state_flag_t flag,
+				  rm_partition_list_t **part_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_partitions))(flag, part_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_get_blocks_info(rm_partition_state_flag_t flag,
+				       rm_partition_list_t **part_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_partitions_info))(flag, part_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_free_block_list(rm_partition_list_t *part_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_partition_list))(part_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_new_nodecard(rm_nodecard_t **nodecard)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.new_nodecard))(nodecard));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_free_nodecard(rm_nodecard_t *nodecard)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_nodecard))(nodecard));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_get_nodecards(rm_bp_id_t bpid,
+				     rm_nodecard_list_t **nc_list)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.get_nodecards))(bpid, nc_list));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+#ifdef HAVE_BGP
+extern status_t bridge_new_ionode(rm_ionode_t **ionode)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.new_ionode))(ionode));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+
+extern status_t bridge_free_ionode(rm_ionode_t *ionode)
+{
+	int rc = BG_ERROR_CONNECTION_ERROR;
+	if (!bridge_init(NULL))
+		return rc;
+
+	slurm_mutex_lock(&api_file_mutex);
+	rc = _bg_errtrans((*(bridge_api.free_ionode))(ionode));
+	slurm_mutex_unlock(&api_file_mutex);
+	return rc;
+
+}
+#else
+extern int bridge_find_nodecard_num(rm_partition_t *block_ptr,
+				    rm_nodecard_t *ncard,
+				    int *nc_id)
+{
+	char *my_card_name = NULL;
+	char *card_name = NULL;
+	rm_bp_id_t mp_id = NULL;
+	int num = 0;
+	int i=0;
+	int rc;
+	rm_nodecard_list_t *ncard_list = NULL;
+	rm_BP_t *curr_mp = NULL;
+	rm_nodecard_t *ncard2;
+
+	xassert(block_ptr);
+	xassert(nc_id);
+
+	if ((rc = bridge_get_data(ncard,
+				  RM_NodeCardID,
+				  &my_card_name))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_NodeCardID): %s",
+		      bg_err_str(rc));
+	}
+
+	if ((rc = bridge_get_data(block_ptr,
+				  RM_PartitionFirstBP,
+				  &curr_mp))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_PartitionFirstBP): %s",
+		      bg_err_str(rc));
+	}
+	if ((rc = bridge_get_data(curr_mp, RM_BPID, &mp_id))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_BPID): %d", rc);
+		return SLURM_ERROR;
+	}
+
+	if ((rc = bridge_get_nodecards(mp_id, &ncard_list))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_nodecards(%s): %d",
+		      mp_id, rc);
+		free(mp_id);
+		return SLURM_ERROR;
+	}
+	free(mp_id);
+	if ((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
+	    != SLURM_SUCCESS) {
+		error("bridge_get_data(RM_NodeCardListSize): %s",
+		      bg_err_str(rc));
+		return SLURM_ERROR;
+	}
+
+	for(i=0; i<num; i++) {
+		if (i) {
+			if ((rc =
+			     bridge_get_data(ncard_list,
+					     RM_NodeCardListNext,
+					     &ncard2)) != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_NodeCardListNext): %s",
+				      bg_err_str(rc));
+				rc = SLURM_ERROR;
+				goto cleanup;
+			}
+		} else {
+			if ((rc = bridge_get_data(ncard_list,
+						  RM_NodeCardListFirst,
+						  &ncard2)) != SLURM_SUCCESS) {
+				error("bridge_get_data"
+				      "(RM_NodeCardListFirst: %s",
+				      bg_err_str(rc));
+				rc = SLURM_ERROR;
+				goto cleanup;
+			}
+		}
+		if ((rc = bridge_get_data(ncard2,
+					  RM_NodeCardID,
+					  &card_name)) != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_NodeCardID: %s",
+			      bg_err_str(rc));
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+		if (strcmp(my_card_name, card_name)) {
+			free(card_name);
+			continue;
+		}
+		free(card_name);
+		(*nc_id) = i;
+		break;
+	}
+cleanup:
+	free(my_card_name);
+	return SLURM_SUCCESS;
+}
+#endif
+
+#endif /* HAVE_BG_FILES */
diff --git a/src/plugins/select/bluegene/plugin/state_test.c b/src/plugins/select/bluegene/bl/bridge_status.c
similarity index 59%
rename from src/plugins/select/bluegene/plugin/state_test.c
rename to src/plugins/select/bluegene/bl/bridge_status.c
index 52e0cadb6..b957ed6fb 100644
--- a/src/plugins/select/bluegene/plugin/state_test.c
+++ b/src/plugins/select/bluegene/bl/bridge_status.c
@@ -1,16 +1,14 @@
 /*****************************************************************************\
- *  state_test.c - Test state of Bluegene base partitions and switches.
- *  DRAIN nodes in SLURM that are not usable.
+ *  bridge_status.c - bluegene block information from the db2 database.
  *
  *  $Id$
  *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov> and Morris Jette <jette1@llnl.gov>
+ *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,16 +43,28 @@
 
 #include <stdio.h>
 #include <string.h>
-#include <slurm/slurm.h>
 
-#include "src/common/log.h"
+#include "slurm/slurm.h"
+#include "../ba/block_allocator.h"
+#include "bridge_status.h"
+#include "../bg_status.h"
+
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/slurmctld.h"
-#include "bluegene.h"
 
-#define BUFSIZE 4096
+static List kill_job_list = NULL;
+static bool bridge_status_inited = false;
+
+#define MMCS_POLL_TIME 30	/* seconds between poll of MMCS for
+				 * down switches and nodes */
+#define BG_POLL_TIME 1	        /* seconds between poll of state
+				 * change in bg blocks */
+
+static pthread_t block_thread = 0;
+static pthread_t state_thread = 0;
+static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 
 /* Find the specified BlueGene node ID and drain it from SLURM */
 static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
@@ -66,7 +76,7 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 	rm_BP_state_t bp_state;
 	char bg_down_node[128];
 
-	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != STATUS_OK) {
+	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPNum): %s", bg_err_str(rc));
 		bp_num = 0;
 	}
@@ -74,7 +84,7 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 	for (i=0; i<bp_num; i++) {
 		if (i) {
 			if ((rc = bridge_get_data(my_bg, RM_NextBP, &my_bp))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_NextBP): %s",
 				      bg_err_str(rc));
 				continue;
@@ -82,7 +92,7 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 		} else {
 			if ((rc = bridge_get_data(my_bg, RM_FirstBP, &my_bp))
 			    !=
-			    STATUS_OK) {
+			    SLURM_SUCCESS) {
 				error("bridge_get_data(RM_FirstBP): %s",
 				      bg_err_str(rc));
 				continue;
@@ -90,8 +100,9 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 		}
 
 		if ((rc = bridge_get_data(my_bp, RM_BPID, &bpid))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPID): %s", bg_err_str(rc));
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_BPID): %s",
+			      bg_err_str(rc));
 			continue;
 		}
 
@@ -107,7 +118,7 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 		free(bpid);
 
 		if ((rc = bridge_get_data(my_bp, RM_BPState, &bp_state))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_BPState): %s",
 			      bg_err_str(rc));
 			continue;
@@ -116,8 +127,9 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 			continue;
 
 		if ((rc = bridge_get_data(my_bp, RM_BPLoc, &bp_loc))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPLoc): %s", bg_err_str(rc));
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_BPLoc): %s",
+			      bg_err_str(rc));
 			continue;
 		}
 
@@ -157,7 +169,7 @@ static char *_get_bp_node_name(rm_BP_t *bp_ptr)
 	errno = SLURM_SUCCESS;
 
 	if ((rc = bridge_get_data(bp_ptr, RM_BPLoc, &bp_loc))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPLoc): %s", bg_err_str(rc));
 		errno = SLURM_ERROR;
 		return NULL;
@@ -195,7 +207,7 @@ static int _test_nodecard_state(rm_nodecard_t *ncard, int nc_id,
 
 	if ((rc = bridge_get_data(ncard,
 				  RM_NodeCardState,
-				  &state)) != STATUS_OK) {
+				  &state)) != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_NodeCardState): %s",
 		      bg_err_str(rc));
 		return SLURM_ERROR;
@@ -206,20 +218,20 @@ static int _test_nodecard_state(rm_nodecard_t *ncard, int nc_id,
 
 	if ((rc = bridge_get_data(ncard,
 				  RM_NodeCardID,
-				  &nc_name)) != STATUS_OK) {
+				  &nc_name)) != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_NodeCardID): %s", bg_err_str(rc));
 		return SLURM_ERROR;
 	}
 
 	if (!nc_name) {
-		error("We didn't get an RM_NodeCardID but rc was STATUS_OK?");
+		error("We didn't get an RM_NodeCardID but rc was SLURM_SUCCESS?");
 		return SLURM_ERROR;
 	}
 
 #ifdef HAVE_BGL
 	if ((rc = bridge_get_data(ncard,
 				  RM_NodeCardQuarter,
-				  &io_start)) != STATUS_OK) {
+				  &io_start)) != SLURM_SUCCESS) {
 		error("bridge_get_data(CardQuarter): %s", bg_err_str(rc));
 		rc = SLURM_ERROR;
 		goto clean_up;
@@ -238,7 +250,7 @@ static int _test_nodecard_state(rm_nodecard_t *ncard, int nc_id,
 	   state.  To avoid getting a bunch of warnings here just
 	   skip over the ones missing.
 	*/
-	if (io_start >= bg_conf->numpsets) {
+	if (io_start >= bg_conf->ionodes_per_mp) {
 		rc = SLURM_SUCCESS;
 		if (state == RM_NODECARD_MISSING) {
 			debug3("Nodecard %s is missing",
@@ -247,15 +259,15 @@ static int _test_nodecard_state(rm_nodecard_t *ncard, int nc_id,
 			error("We don't have the system configured "
 			      "for this nodecard %s, we only have "
 			      "%d ionodes and this starts at %d",
-			      nc_name, io_start, bg_conf->numpsets);
+			      nc_name, bg_conf->ionodes_per_mp, io_start);
 		}
 		goto clean_up;
 	}
 
 	/* if (!ionode_bitmap) */
-	/* 	ionode_bitmap = bit_alloc(bg_conf->numpsets); */
+	/* 	ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp); */
 	/* info("setting %s start %d of %d", */
-	/*      nc_name,  io_start, bg_conf->numpsets); */
+	/*      nc_name,  io_start, bg_conf->ionodes_per_mp); */
 	/* bit_nset(ionode_bitmap, io_start, io_start+io_cnt); */
 
 	/* we have to handle each nodecard separately to make
@@ -304,14 +316,14 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr, bool slurmctld_locked)
 /* 		io_cnt--; */
 
 	if ((rc = bridge_get_data(bp_ptr, RM_BPID, &bp_id))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPID): %s",
 		      bg_err_str(rc));
 		return SLURM_ERROR;
 	}
 
 	if ((rc = bridge_get_nodecards(bp_id, &ncard_list))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_nodecards(%s): %d",
 		      bp_id, rc);
 		rc = SLURM_ERROR;
@@ -327,7 +339,7 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr, bool slurmctld_locked)
 	}
 
 	if ((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_NodeCardListSize): %s",
 		      bg_err_str(rc));
 		rc = SLURM_ERROR;
@@ -338,7 +350,7 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr, bool slurmctld_locked)
 		if (i) {
 			if ((rc = bridge_get_data(ncard_list,
 						  RM_NodeCardListNext,
-						  &ncard)) != STATUS_OK) {
+						  &ncard)) != SLURM_SUCCESS) {
 				error("bridge_get_data"
 				      "(RM_NodeCardListNext): %s",
 				      bg_err_str(rc));
@@ -348,7 +360,7 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr, bool slurmctld_locked)
 		} else {
 			if ((rc = bridge_get_data(ncard_list,
 						  RM_NodeCardListFirst,
-						  &ncard)) != STATUS_OK) {
+						  &ncard)) != SLURM_SUCCESS) {
 				error("bridge_get_data"
 				      "(RM_NodeCardListFirst: %s",
 				      bg_err_str(rc));
@@ -384,12 +396,12 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr, bool slurmctld_locked)
 /* 			if (bg_record->job_running != BLOCK_ERROR_STATE) */
 /* 				continue; */
 
-/* 			if (!bit_test(bg_record->bitmap, bp_bit)) */
+/* 			if (!bit_test(bg_record->mp_bitmap, bp_bit)) */
 /* 				continue; */
 /* 			info("bringing %s back to service", */
 /* 			     bg_record->bg_block_id); */
 /* 			bg_record->job_running = NO_JOB_RUNNING; */
-/* 			bg_record->state = RM_PARTITION_FREE; */
+/* 			bg_record->state = BG_BLOCK_FREE; */
 /* 			last_bg_update = time(NULL); */
 /* 		} */
 /* 		list_iterator_destroy(itr); */
@@ -431,21 +443,21 @@ static void _test_down_nodes(my_bluegene_t *my_bg)
 	rm_BP_t *my_bp;
 
 	debug2("Running _test_down_nodes");
-	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != STATUS_OK) {
+	if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num)) != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPNum): %s", bg_err_str(rc));
 		bp_num = 0;
 	}
 	for (i=0; i<bp_num; i++) {
 		if (i) {
 			if ((rc = bridge_get_data(my_bg, RM_NextBP, &my_bp))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_NextBP): %s",
 				      bg_err_str(rc));
 				continue;
 			}
 		} else {
 			if ((rc = bridge_get_data(my_bg, RM_FirstBP, &my_bp))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_FirstBP): %s",
 				      bg_err_str(rc));
 				continue;
@@ -467,7 +479,7 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 
 	debug2("Running _test_down_switches");
 	if ((rc = bridge_get_data(my_bg, RM_SwitchNum, &switch_num))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_SwitchNum): %s", bg_err_str(rc));
 		switch_num = 0;
 	}
@@ -475,7 +487,7 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 		if (i) {
 			if ((rc = bridge_get_data(my_bg, RM_NextSwitch,
 						  &my_switch))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_NextSwitch): %s",
 				      bg_err_str(rc));
 				continue;
@@ -483,7 +495,7 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 		} else {
 			if ((rc = bridge_get_data(my_bg, RM_FirstSwitch,
 						  &my_switch))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_FirstSwitch): %s",
 				      bg_err_str(rc));
 				continue;
@@ -491,7 +503,7 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 		}
 
 		if ((rc = bridge_get_data(my_switch, RM_SwitchState,
-					  &switch_state)) != STATUS_OK) {
+					  &switch_state)) != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_SwitchState): %s",
 			      bg_err_str(rc));
 			continue;
@@ -499,7 +511,7 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 		if (switch_state == RM_SWITCH_UP)
 			continue;
 		if ((rc = bridge_get_data(my_switch, RM_SwitchBPID, &bp_id))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_SwitchBPID): %s",
 			      bg_err_str(rc));
 			continue;
@@ -516,35 +528,18 @@ static void _test_down_switches(my_bluegene_t *my_bg)
 }
 #endif
 
-/* Determine if specific slurm node is already in DOWN or DRAIN state */
-extern int node_already_down(char *node_name)
-{
-	struct node_record *node_ptr = find_node_record(node_name);
-
-	if (node_ptr) {
-		if (IS_NODE_DRAIN(node_ptr))
-			return 2;
-		else if (IS_NODE_DOWN(node_ptr))
-			return 1;
-		else
-			return 0;
-	}
-
-	return 0;
-}
-
 /*
  * Search MMCS for failed switches and nodes. Failed resources are DRAINED in
  * SLURM. This relies upon rm_get_BG(), which is slow (10+ seconds) so run
  * this test infrequently.
  */
-extern void test_mmcs_failures(void)
+static void _test_mmcs_failures(void)
 {
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 	my_bluegene_t *local_bg;
 	int rc;
 
-	if ((rc = bridge_get_bg(&local_bg)) != STATUS_OK) {
+	if ((rc = bridge_get_bg(&local_bg)) != SLURM_SUCCESS) {
 
 		error("bridge_get_BG(): %s", bg_err_str(rc));
 		return;
@@ -553,21 +548,382 @@ extern void test_mmcs_failures(void)
 
 	_test_down_switches(local_bg);
 	_test_down_nodes(local_bg);
-	if ((rc = bridge_free_bg(local_bg)) != STATUS_OK)
+	if ((rc = bridge_free_bg(local_bg)) != SLURM_SUCCESS)
 		error("bridge_free_BG(): %s", bg_err_str(rc));
 #endif
 }
 
+static int _do_block_poll(void)
+{
+	int updated = 0;
+#if defined HAVE_BG_FILES
+	int rc;
+	rm_partition_t *block_ptr = NULL;
+#ifdef HAVE_BGL
+	rm_partition_mode_t node_use;
+#endif
+	rm_partition_state_t state;
+	char *name = NULL;
+	bg_record_t *bg_record = NULL;
+	ListIterator itr = NULL;
+
+	if (!bg_lists->main)
+		return updated;
+
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_lists->main);
+	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+		if (bg_record->magic != BLOCK_MAGIC) {
+			/* block is gone */
+			list_remove(itr);
+			continue;
+		} else if (!bg_record->bg_block_id)
+			continue;
+
+		name = bg_record->bg_block_id;
+		if ((rc = bridge_get_block_info(name, &block_ptr))
+		    != SLURM_SUCCESS) {
+			if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+				switch(rc) {
+				case BG_ERROR_INCONSISTENT_DATA:
+					debug2("got inconsistent data when "
+					       "querying block %s", name);
+					continue;
+					break;
+				case BG_ERROR_BLOCK_NOT_FOUND:
+					debug("block %s not found, removing "
+					      "from slurm", name);
+					list_remove(itr);
+					destroy_bg_record(bg_record);
+					continue;
+					break;
+				default:
+					break;
+				}
+			}
+
+			/* If the call was busy, just skip this
+			   iteration.  It usually means something like
+			   rm_get_BG was called which can be a very
+			   long call */
+			if (rc == EBUSY) {
+				debug5("lock was busy, aborting");
+				break;
+			}
+
+			error("bridge_get_block_info(%s): %s",
+			      name,
+			      bg_err_str(rc));
+			continue;
+		}
+
+#ifdef HAVE_BGL
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionMode,
+					  &node_use))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionMode): %s",
+			      bg_err_str(rc));
+			if (!updated)
+				updated = -1;
+			goto next_block;
+		} else if (bg_record->node_use != node_use) {
+			debug("node_use of Block %s was %d "
+			      "and now is %d",
+			      bg_record->bg_block_id,
+			      bg_record->node_use,
+			      node_use);
+			bg_record->node_use = node_use;
+			updated = 1;
+		}
+#else
+		if ((bg_record->cnode_cnt < bg_conf->mp_cnode_cnt)
+		    || (bg_conf->mp_cnode_cnt == bg_conf->nodecard_cnode_cnt)) {
+			char *mode = NULL;
+			uint16_t conn_type = SELECT_SMALL;
+			if ((rc = bridge_get_data(block_ptr,
+						  RM_PartitionOptions,
+						  &mode))
+			    != SLURM_SUCCESS) {
+				error("bridge_get_data(RM_PartitionOptions): "
+				      "%s", bg_err_str(rc));
+				if (!updated)
+					updated = -1;
+				goto next_block;
+			} else if (mode) {
+				switch(mode[0]) {
+				case 's':
+					conn_type = SELECT_HTC_S;
+					break;
+				case 'd':
+					conn_type = SELECT_HTC_D;
+					break;
+				case 'v':
+					conn_type = SELECT_HTC_V;
+					break;
+				case 'l':
+					conn_type = SELECT_HTC_L;
+					break;
+				default:
+					conn_type = SELECT_SMALL;
+					break;
+				}
+				free(mode);
+			}
+
+			if (bg_record->conn_type[0] != conn_type) {
+				debug("mode of small Block %s was %u "
+				      "and now is %u",
+				      bg_record->bg_block_id,
+				      bg_record->conn_type[0],
+				      conn_type);
+				bg_record->conn_type[0] = conn_type;
+				updated = 1;
+			}
+		}
+#endif
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
+					  &state))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionState): %s",
+			      bg_err_str(rc));
+			if (!updated)
+				updated = -1;
+			goto next_block;
+		} else if (bg_status_update_block_state(
+				   bg_record, state, kill_job_list) == 1)
+			updated = 1;
+
+	next_block:
+		if ((rc = bridge_free_block(block_ptr))
+		    != SLURM_SUCCESS) {
+			error("bridge_free_block(): %s",
+			      bg_err_str(rc));
+		}
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+
+	bg_status_process_kill_job_list(kill_job_list);
+
+#endif
+	return updated;
+}
+
+/*
+ * block_agent - thread periodically updates status of
+ * bluegene blocks.
+ *
+ */
+static void *_block_state_agent(void *args)
+{
+	static time_t last_bg_test;
+	int rc;
+	time_t now = time(NULL);
+
+	last_bg_test = now - BG_POLL_TIME;
+	while (bridge_status_inited) {
+		if (difftime(now, last_bg_test) >= BG_POLL_TIME) {
+			if (!bridge_status_inited) /* don't bother */
+				break;	/* quit now */
+			if (blocks_are_created) {
+				last_bg_test = now;
+				if ((rc = _do_block_poll()) == 1)
+					last_bg_update = now;
+				else if (rc == -1)
+					error("Error with update_block_list");
+			}
+		}
+
+		sleep(1);
+		now = time(NULL);
+	}
+	return NULL;
+}
+
+/*
+ * state_agent - thread periodically updates status of
+ * bluegene nodes.
+ *
+ */
+static void *_mp_state_agent(void *args)
+{
+	static time_t last_mmcs_test;
+	time_t now = time(NULL);
+
+	last_mmcs_test = now - MMCS_POLL_TIME;
+	while (bridge_status_inited) {
+		if (difftime(now, last_mmcs_test) >= MMCS_POLL_TIME) {
+			if (!bridge_status_inited) /* don't bother */
+				break; 	/* quit now */
+			if (blocks_are_created) {
+				/* can run for a while so set the
+				 * time after the call so there is
+				 * always MMCS_POLL_TIME between
+				 * calls */
+				_test_mmcs_failures();
+				last_mmcs_test = time(NULL);
+			}
+		}
+
+		sleep(1);
+		now = time(NULL);
+	}
+	return NULL;
+}
+
+extern int bridge_status_init(void)
+{
+	pthread_attr_t attr;
+
+	if (bridge_status_inited)
+		return SLURM_ERROR;
+
+	bridge_status_inited = true;
+	if (!kill_job_list)
+		kill_job_list = bg_status_create_kill_job_list();
+
+	pthread_mutex_lock(&thread_flag_mutex);
+	if (block_thread) {
+		debug2("Bluegene threads already running, not starting "
+		       "another");
+		pthread_mutex_unlock(&thread_flag_mutex);
+		return SLURM_ERROR;
+	}
+
+	slurm_attr_init(&attr);
+	/* since we do a join on this later we don't make it detached */
+	if (pthread_create(&block_thread, &attr, _block_state_agent, NULL))
+		error("Failed to create block_agent thread");
+	slurm_attr_init(&attr);
+	/* since we do a join on this later we don't make it detached */
+	if (pthread_create(&state_thread, &attr, _mp_state_agent, NULL))
+		error("Failed to create state_agent thread");
+	pthread_mutex_unlock(&thread_flag_mutex);
+	slurm_attr_destroy(&attr);
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_status_fini(void)
+{
+	bridge_status_inited = false;
+	pthread_mutex_lock(&thread_flag_mutex);
+	if ( block_thread ) {
+		verbose("Bluegene select plugin shutting down");
+		pthread_join(block_thread, NULL);
+		block_thread = 0;
+	}
+	if ( state_thread ) {
+		pthread_join(state_thread, NULL);
+		state_thread = 0;
+	}
+	pthread_mutex_unlock(&thread_flag_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/* This needs to have block_state_mutex locked before hand. */
+extern int bridge_status_update_block_list_state(List block_list)
+{
+	int updated = 0;
+#if defined HAVE_BG_FILES
+	int rc;
+	rm_partition_t *block_ptr = NULL;
+	rm_partition_state_t state;
+	uint16_t real_state;
+	char *name = NULL;
+	bg_record_t *bg_record = NULL;
+	ListIterator itr = NULL;
+
+	itr = list_iterator_create(block_list);
+	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
+		if (bg_record->magic != BLOCK_MAGIC) {
+			/* block is gone */
+			list_remove(itr);
+			continue;
+		} else if (!bg_record->bg_block_id)
+			continue;
+
+		name = bg_record->bg_block_id;
+		real_state = bg_record->state & (~BG_BLOCK_ERROR_FLAG);
+		if ((rc = bridge_get_block_info(name, &block_ptr))
+		    != SLURM_SUCCESS) {
+			if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+				switch(rc) {
+				case BG_ERROR_INCONSISTENT_DATA:
+					debug2("got inconsistent data when "
+					       "querying block %s", name);
+					continue;
+					break;
+				case BG_ERROR_BLOCK_NOT_FOUND:
+					debug("block %s not found, removing "
+					      "from slurm", name);
+					/* Just set to free,
+					   everything will be cleaned
+					   up outside this.
+					*/
+					bg_record->state = BG_BLOCK_FREE;
+					continue;
+					break;
+				default:
+					break;
+				}
+			}
+			/* If the call was busy, just skip this
+			   iteration.  It usually means something like
+			   rm_get_BG was called which can be a very
+			   long call */
+			if (rc == EBUSY) {
+				debug5("lock was busy, aborting");
+				break;
+			}
+
+			error("bridge_get_block_info(%s): %s",
+			      name,
+			      bg_err_str(rc));
+			continue;
+		}
+
+		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
+					  &state))
+		    != SLURM_SUCCESS) {
+			error("bridge_get_data(RM_PartitionState): %s",
+			      bg_err_str(rc));
+			updated = -1;
+			goto next_block;
+		} else if (real_state != state) {
+			debug("freeing state of Block %s was %d and now is %d",
+			      bg_record->bg_block_id,
+			      bg_record->state,
+			      state);
+
+			if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+				state |= BG_BLOCK_ERROR_FLAG;
+			bg_record->state = state;
+			updated = 1;
+		}
+	next_block:
+		if ((rc = bridge_free_block(block_ptr))
+		    != SLURM_SUCCESS) {
+			error("bridge_free_block(): %s",
+			      bg_err_str(rc));
+		}
+	}
+	list_iterator_destroy(itr);
+#endif
+	return updated;
+}
 
 /*
  * This could potentially lock the node lock in the slurmctld with
  * slurm_drain_node, so if slurmctld_locked is called we will call the
  * drainning function without locking the lock again.
  */
-extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
+extern int bridge_block_check_mp_states(char *bg_block_id,
+					bool slurmctld_locked)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 	rm_partition_t *block_ptr = NULL;
 	rm_BP_t *bp_ptr = NULL;
 	int cnt = 0;
@@ -580,7 +936,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 	if (!bg_block_id)
 		return SLURM_SUCCESS;
 
-	if ((rc = bridge_get_block(bg_block_id, &block_ptr)) != STATUS_OK) {
+	if ((rc = bridge_get_block(bg_block_id, &block_ptr)) != SLURM_SUCCESS) {
 		error("Block %s doesn't exist.", bg_block_id);
 		rc = SLURM_ERROR;
 
@@ -589,7 +945,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 
 
 	if ((rc = bridge_get_data(block_ptr, RM_PartitionSmall, &small))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_PartitionSmall): %s",
 		      bg_err_str(rc));
 		rc = SLURM_ERROR;
@@ -607,7 +963,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 		if ((rc = bridge_get_data(block_ptr,
 					  RM_PartitionNodeCardNum,
 					  &cnt))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_PartitionNodeCardNum): %s",
 			      bg_err_str(rc));
 			rc = SLURM_ERROR;
@@ -617,7 +973,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 		if ((rc = bridge_get_data(block_ptr,
 					  RM_PartitionFirstBP,
 					  &bp_ptr))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_FirstBP): %s",
 			      bg_err_str(rc));
 			rc = SLURM_ERROR;
@@ -636,7 +992,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 					     block_ptr,
 					     RM_PartitionNextNodeCard,
 					     &ncard))
-				    != STATUS_OK) {
+				    != SLURM_SUCCESS) {
 					error("bridge_get_data("
 					      "RM_PartitionNextNodeCard): %s",
 					      bg_err_str(rc));
@@ -648,7 +1004,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 					     block_ptr,
 					     RM_PartitionFirstNodeCard,
 					     &ncard))
-				    != STATUS_OK) {
+				    != SLURM_SUCCESS) {
 					error("bridge_get_data("
 					      "RM_PartitionFirstNodeCard): %s",
 					      bg_err_str(rc));
@@ -657,7 +1013,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 				}
 			}
 #ifdef HAVE_BGL
-			find_nodecard_num(block_ptr, ncard, &nc_id);
+			bridge_find_nodecard_num(block_ptr, ncard, &nc_id);
 #endif
 			/* If we find any nodecards in an error state just
 			   break here since we are seeing if we can run.  If
@@ -678,7 +1034,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 	   nodecards on each midplane.
 	*/
 	if ((rc = bridge_get_data(block_ptr, RM_PartitionBPNum, &cnt))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_BPNum): %s", bg_err_str(rc));
 		rc = SLURM_ERROR;
 		goto cleanup;
@@ -689,7 +1045,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 			if ((rc = bridge_get_data(block_ptr,
 						  RM_PartitionNextBP,
 						  &bp_ptr))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_NextBP): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
@@ -699,7 +1055,7 @@ extern int check_block_bp_states(char *bg_block_id, bool slurmctld_locked)
 			if ((rc = bridge_get_data(block_ptr,
 						  RM_PartitionFirstBP,
 						  &bp_ptr))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_get_data(RM_FirstBP): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
diff --git a/src/plugins/select/bluegene/bl/bridge_status.h b/src/plugins/select/bluegene/bl/bridge_status.h
new file mode 100644
index 000000000..39cbb995d
--- /dev/null
+++ b/src/plugins/select/bluegene/bl/bridge_status.h
@@ -0,0 +1,48 @@
+/*****************************************************************************\
+ *  bridge_status.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BRIDGE_STATUS_H_
+#define _BRIDGE_STATUS_H_
+
+extern int bridge_status_init(void);
+extern int bridge_status_fini(void);
+
+/* This needs to have block_state_mutex locked before hand. */
+extern int bridge_status_update_block_list_state(List block_list);
+
+#endif
diff --git a/src/plugins/select/bluegene/plugin/bg_switch_connections.c b/src/plugins/select/bluegene/bl/bridge_switch_connections.c
similarity index 78%
rename from src/plugins/select/bluegene/plugin/bg_switch_connections.c
rename to src/plugins/select/bluegene/bl/bridge_switch_connections.c
index 50c40e327..549eeb6fa 100644
--- a/src/plugins/select/bluegene/plugin/bg_switch_connections.c
+++ b/src/plugins/select/bluegene/bl/bridge_switch_connections.c
@@ -9,7 +9,7 @@
  *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,14 +38,13 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "bluegene.h"
+#include "bridge_switch_connections.h"
 
-
-#ifdef HAVE_BG_FILES
-static int _get_bp_by_location(my_bluegene_t* my_bg,
+#if defined HAVE_BG_FILES
+static int _get_mp_by_location(my_bluegene_t* my_bg,
 			       uint16_t* curr_coord,
-			       rm_BP_t** bp);
-static int _get_switches_by_bpid(my_bluegene_t* my_bg, const char *bpid,
+			       rm_BP_t** mp);
+static int _get_switches_by_mpid(my_bluegene_t* my_bg, const char *mpid,
 				 rm_switch_t **curr_switch);
 
 //static int _set_switch(rm_switch_t* curr_switch, ba_connection_t *int_wire);
@@ -53,46 +52,49 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 			     ba_switch_t *ba_switch);
 #endif
 
-static int _used_switches(ba_node_t *ba_node);
+static int _used_switches(ba_mp_t *ba_node);
 
 /**
  * this is just stupid.  there are some implicit rules for where
- * "NextBP" goes to, but we don't know, so we have to do this.
+ * "NextMP" goes to, but we don't know, so we have to do this.
  */
-#ifdef HAVE_BG_FILES
-static int _get_bp_by_location(my_bluegene_t* my_bg, uint16_t* curr_coord,
-			       rm_BP_t** bp)
+#if defined HAVE_BG_FILES
+static int _get_mp_by_location(my_bluegene_t* my_bg, uint16_t* curr_coord,
+			       rm_BP_t** mp)
 {
-	static int bp_num = 0;
+	static int mp_num = 0;
 	int i, rc;
 	rm_location_t loc;
 
-	if (!bp_num) {
-		if ((rc = bridge_get_data(my_bg, RM_BPNum, &bp_num))
-		    != STATUS_OK) {
-			fatal("bridge_get_data: RM_BPNum: %s", bg_err_str(rc));
+	if (!mp_num) {
+		if ((rc = bridge_get_data(my_bg, RM_BPNum, &mp_num))
+		    != SLURM_SUCCESS) {
+			fatal("bridge_get_data: RM_BPNum: %s",
+			      bg_err_str(rc));
 			return SLURM_ERROR;
 		}
 	}
 
-	for (i=0; i<bp_num; i++){
+	for (i=0; i<mp_num; i++) {
 		if (i) {
-			if ((rc = bridge_get_data(my_bg, RM_NextBP, bp))
-			    != STATUS_OK) {
+			if ((rc = bridge_get_data(my_bg, RM_NextBP, mp))
+			    != SLURM_SUCCESS) {
 				fatal("bridge_get_data: RM_NextBP: %s",
 				      bg_err_str(rc));
 				return SLURM_ERROR;
 			}
 		} else {
-			if ((rc = bridge_get_data(my_bg, RM_FirstBP, bp))
-			    != STATUS_OK) {
+			if ((rc = bridge_get_data(my_bg, RM_FirstBP, mp))
+			    != SLURM_SUCCESS) {
 				fatal("bridge_get_data: RM_FirstBP: %s",
 				      bg_err_str(rc));
 				return SLURM_ERROR;
 			}
 		}
-		if ((rc = bridge_get_data(*bp, RM_BPLoc, &loc)) != STATUS_OK) {
-			fatal("bridge_get_data: RM_BPLoc: %s", bg_err_str(rc));
+		if ((rc = bridge_get_data(*mp, RM_BPLoc, &loc))
+		    != SLURM_SUCCESS) {
+			fatal("bridge_get_data: RM_BPLoc: %s",
+			      bg_err_str(rc));
 			return SLURM_ERROR;
 		}
 
@@ -103,23 +105,23 @@ static int _get_bp_by_location(my_bluegene_t* my_bg, uint16_t* curr_coord,
 		}
 	}
 
-	// error("_get_bp_by_location: could not find specified bp.");
+	// error("_get_mp_by_location: could not find specified mp.");
 	return SLURM_ERROR;
 }
 
-static int _get_switches_by_bpid(
-	my_bluegene_t* my_bg, const char *bpid,
+static int _get_switches_by_mpid(
+	my_bluegene_t* my_bg, const char *mpid,
 	rm_switch_t *coord_switch[SYSTEM_DIMENSIONS])
 {
 	static int switch_num = 0;
 	rm_switch_t *curr_switch = NULL;
 	int i, rc;
-	int found_bpid = 0;
-	char *curr_bpid = NULL;
+	int found_mpid = 0;
+	char *curr_mpid = NULL;
 
 	if (!switch_num) {
 		if ((rc = bridge_get_data(my_bg, RM_SwitchNum, &switch_num))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			fatal("bridge_get_data: RM_SwitchNum: %s",
 			      bg_err_str(rc));
 			return SLURM_ERROR;
@@ -130,7 +132,7 @@ static int _get_switches_by_bpid(
 		if (i) {
 			if ((rc = bridge_get_data(my_bg, RM_NextSwitch,
 						  &curr_switch))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				fatal("bridge_get_data"
 				      "(RM_NextSwitch): %s",
 				      bg_err_str(rc));
@@ -138,32 +140,32 @@ static int _get_switches_by_bpid(
 		} else {
 			if ((rc = bridge_get_data(my_bg, RM_FirstSwitch,
 						  &curr_switch))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				fatal("bridge_get_data"
 				      "(RM_FirstSwitch): %s",
 				      bg_err_str(rc));
 			}
 		}
 		if ((rc = bridge_get_data(curr_switch, RM_SwitchBPID,
-					  &curr_bpid)) != STATUS_OK) {
+					  &curr_mpid)) != SLURM_SUCCESS) {
 			fatal("bridge_get_data: RM_SwitchBPID: %s",
 			      bg_err_str(rc));
 		}
 
-		if (!curr_bpid) {
+		if (!curr_mpid) {
 			error("No BP ID was returned from database");
 			continue;
 		}
 
-		if (!strcasecmp((char *)bpid, (char *)curr_bpid)) {
-			coord_switch[found_bpid] = curr_switch;
-			found_bpid++;
-			if (found_bpid==SYSTEM_DIMENSIONS) {
-				free(curr_bpid);
+		if (!strcasecmp((char *)mpid, (char *)curr_mpid)) {
+			coord_switch[found_mpid] = curr_switch;
+			found_mpid++;
+			if (found_mpid==SYSTEM_DIMENSIONS) {
+				free(curr_mpid);
 				return SLURM_SUCCESS;
 			}
 		}
-		free(curr_bpid);
+		free(curr_mpid);
 	}
 	return SLURM_ERROR;
 }
@@ -232,7 +234,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 	i = list_count(conn_list);
 	if (i) {
 		if ((rc = bridge_set_data(curr_switch, RM_SwitchConnNum, &i))
-		    != STATUS_OK) {
+		    != SLURM_SUCCESS) {
 			fatal("bridge_set_data: RM_SwitchConnNum: %s",
 			      bg_err_str(rc));
 
@@ -252,7 +254,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				     curr_switch,
 				     RM_SwitchFirstConnection,
 				     conn_ptr))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				fatal("bridge_set_data"
 				      "(RM_SwitchFirstConnection): "
 				      "%s",
@@ -266,7 +268,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				     curr_switch,
 				     RM_SwitchNextConnection,
 				     conn_ptr))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				fatal("bridge_set_data"
 				      "(RM_SwitchNextConnection): %s",
 				      bg_err_str(rc));
@@ -282,7 +284,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 }
 #endif
 
-static int _used_switches(ba_node_t* ba_node)
+static int _used_switches(ba_mp_t* ba_node)
 {
 	/* max number of connections in a switch */
 	int num_connections = 3;
@@ -333,11 +335,11 @@ static int _used_switches(ba_node_t* ba_node)
 extern int configure_small_block(bg_record_t *bg_record)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 	bool small = true;
-	ba_node_t* ba_node = NULL;
-	rm_BP_t *curr_bp = NULL;
-	rm_bp_id_t bp_id = NULL;
+	ba_mp_t* ba_node = NULL;
+	rm_BP_t *curr_mp = NULL;
+	rm_bp_id_t mp_id = NULL;
 #ifndef HAVE_BGL
 	rm_nodecard_id_t nc_char = NULL;
 #endif
@@ -346,27 +348,27 @@ extern int configure_small_block(bg_record_t *bg_record)
 	rm_nodecard_t *ncard;
 	rm_nodecard_list_t *ncard_list = NULL;
 	int num, i;
-	int use_nc[bg_conf->bp_nodecard_cnt];
+	int use_nc[bg_conf->mp_nodecard_cnt];
 	double nc_pos = 0;
 #endif
 	xassert(bg_record->ionode_bitmap);
-	if (bg_record->bp_count != 1) {
-		error("Requesting small block with %d bps, needs to be 1.",
-		      bg_record->bp_count);
+	if (bg_record->mp_count != 1) {
+		error("Requesting small block with %d mps, needs to be 1.",
+		      bg_record->mp_count);
 		return SLURM_ERROR;
 	}
 /* 	info("configuring small block on ionodes %s out of %d ncs",  */
-/* 	     bg_record->ionodes, bg_conf->bp_nodecard_cnt); */
-#ifdef HAVE_BG_FILES
+/* 	     bg_record->ionodes, bg_conf->mp_nodecard_cnt); */
+#if defined HAVE_BG_FILES
 	/* set that we are doing a small block */
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionSmall,
-				  &small)) != STATUS_OK) {
+				  &small)) != SLURM_SUCCESS) {
 
 		fatal("bridge_set_data(RM_PartitionPsetsPerBP): %s",
 		      bg_err_str(rc));
 	}
 
-	num_ncards = bg_record->node_cnt/bg_conf->nodecard_node_cnt;
+	num_ncards = bg_record->cnode_cnt/bg_conf->nodecard_cnode_cnt;
 	if (num_ncards < 1) {
 		num_ncards = 1;
 		sub_nodecard = 1;
@@ -375,7 +377,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 
 	/* find out how many nodecards to get for each ionode */
 
-	for(i = 0; i<bg_conf->numpsets; i++) {
+	for(i = 0; i<bg_conf->ionodes_per_mp; i++) {
 		if (bit_test(bg_record->ionode_bitmap, i)) {
 			if (bg_conf->nc_ratio > 1) {
 				int j=0;
@@ -393,25 +395,25 @@ extern int configure_small_block(bg_record_t *bg_record)
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionNodeCardNum,
 				  &num_ncards))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 
 		fatal("bridge_set_data: RM_PartitionBPNum: %s",
 		      bg_err_str(rc));
 	}
 
-	ba_node = list_peek(bg_record->bg_block_list);
+	ba_node = list_peek(bg_record->ba_mp_list);
 
-	if (_get_bp_by_location(bg, ba_node->coord, &curr_bp)
+	if (_get_mp_by_location(bg, ba_node->coord, &curr_mp)
 	    == SLURM_ERROR) {
-		fatal("_get_bp_by_location()");
+		fatal("_get_mp_by_location()");
 	}
 
-	/* Set the one BP */
+	/* Set the one MP */
 
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionBPNum,
-				  &bg_record->bp_count))
-	    != STATUS_OK) {
+				  &bg_record->mp_count))
+	    != SLURM_SUCCESS) {
 
 		fatal("bridge_set_data: RM_PartitionBPNum: %s",
 		      bg_err_str(rc));
@@ -419,8 +421,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 	}
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionFirstBP,
-				  curr_bp))
-	    != STATUS_OK) {
+				  curr_mp))
+	    != SLURM_SUCCESS) {
 
 		fatal("bridge_set_data("
 		      "BRIDGE_PartitionFirstBP): %s",
@@ -429,31 +431,31 @@ extern int configure_small_block(bg_record_t *bg_record)
 	}
 
 
-	/* find the bp_id of the bp to get the small32 */
-	if ((rc = bridge_get_data(curr_bp, RM_BPID, &bp_id))
-	    != STATUS_OK) {
+	/* find the mp_id of the mp to get the small32 */
+	if ((rc = bridge_get_data(curr_mp, RM_BPID, &mp_id))
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(): %d", rc);
 		return SLURM_ERROR;
 	}
 
 
-	if (!bp_id) {
-		error("No BP ID was returned from database");
+	if (!mp_id) {
+		error("No MP ID was returned from database");
 		return SLURM_ERROR;
 	}
 
-	if ((rc = bridge_get_nodecards(bp_id, &ncard_list))
-	    != STATUS_OK) {
+	if ((rc = bridge_get_nodecards(mp_id, &ncard_list))
+	    != SLURM_SUCCESS) {
 		error("bridge_get_nodecards(%s): %d",
-		      bp_id, rc);
-		free(bp_id);
+		      mp_id, rc);
+		free(mp_id);
 		return SLURM_ERROR;
 	}
-	free(bp_id);
+	free(mp_id);
 
 
 	if ((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		error("bridge_get_data(RM_NodeCardListSize): %s",
 		      bg_err_str(rc));
 		return SLURM_ERROR;
@@ -461,14 +463,14 @@ extern int configure_small_block(bg_record_t *bg_record)
 	if (num_ncards > num) {
 		error("You requested more (%d > %d) nodecards "
 		      "than are available on this block %s",
-		      num_ncards, num, bg_record->nodes);
+		      num_ncards, num, bg_record->mp_str);
 	}
 
 	for(i=0; i<num; i++) {
 		if (i) {
 			if ((rc = bridge_get_data(ncard_list,
 						  RM_NodeCardListNext,
-						  &ncard)) != STATUS_OK) {
+						  &ncard)) != SLURM_SUCCESS) {
 				error("bridge_get_data"
 				      "(RM_NodeCardListNext): %s",
 				      bg_err_str(rc));
@@ -478,7 +480,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 		} else {
 			if ((rc = bridge_get_data(ncard_list,
 						  RM_NodeCardListFirst,
-						  &ncard)) != STATUS_OK) {
+						  &ncard)) != SLURM_SUCCESS) {
 				error("bridge_get_data"
 				      "(RM_NodeCardListFirst): %s",
 				      bg_err_str(rc));
@@ -498,7 +500,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 #else
 		if ((rc = bridge_get_data(ncard,
 					  RM_NodeCardID,
-					  &nc_char)) != STATUS_OK) {
+					  &nc_char)) != SLURM_SUCCESS) {
 			error("bridge_get_data(RM_NodeCardID): %s",
 			      bg_err_str(rc));
 			rc = SLURM_ERROR;
@@ -522,7 +524,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 			rm_ionode_t *ionode;
 			char *ionode_id = "J00";
 
-			if ((rc = bridge_new_nodecard(&ncard)) != STATUS_OK) {
+			if ((rc = bridge_new_nodecard(&ncard))
+			    != SLURM_SUCCESS) {
 				error("bridge_new_nodecard(): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
@@ -532,7 +535,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(ncard,
 						  RM_NodeCardID,
 						  nc_char))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_set_data("
 				      "RM_NodeCardID): %s",
 				      bg_err_str(rc));
@@ -543,7 +546,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(ncard,
 						  RM_NodeCardIONodeNum,
 						  &sub_nodecard))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_set_data("
 				      "RM_NodeCardIONodeNum): %s",
 				      bg_err_str(rc));
@@ -551,7 +554,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 				goto cleanup;
 			}
 
-			if ((rc = bridge_new_ionode(&ionode)) != STATUS_OK) {
+			if ((rc = bridge_new_ionode(&ionode))
+			    != SLURM_SUCCESS) {
 				error("bridge_new_ionode(): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
@@ -564,7 +568,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(ionode,
 						  RM_IONodeID,
 						  ionode_id))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_set_data("
 				      "RM_NodeCardIONodeNum): %s",
 				      bg_err_str(rc));
@@ -575,7 +579,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(ncard,
 						  RM_NodeCardFirstIONode,
 						  ionode))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 				error("bridge_set_data("
 				      "RM_NodeCardFirstIONode): %s",
 				      bg_err_str(rc));
@@ -583,7 +587,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 				goto cleanup;
 			}
 
-			if ((rc = bridge_free_ionode(ionode)) != STATUS_OK) {
+			if ((rc = bridge_free_ionode(ionode))
+			    != SLURM_SUCCESS) {
 				error("bridge_free_ionode(): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
@@ -597,7 +602,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(bg_record->bg_block,
 						  RM_PartitionNextNodeCard,
 						  ncard))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 
 				error("bridge_set_data("
 				      "RM_PartitionNextNodeCard): %s",
@@ -609,7 +614,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 			if ((rc = bridge_set_data(bg_record->bg_block,
 						  RM_PartitionFirstNodeCard,
 						  ncard))
-			    != STATUS_OK) {
+			    != SLURM_SUCCESS) {
 
 				error("bridge_set_data("
 				      "RM_PartitionFirstNodeCard): %s",
@@ -622,7 +627,8 @@ extern int configure_small_block(bg_record_t *bg_record)
 		nc_count++;
 #ifndef HAVE_BGL
 		if (sub_nodecard) {
-			if ((rc = bridge_free_nodecard(ncard)) != STATUS_OK) {
+			if ((rc = bridge_free_nodecard(ncard))
+			    != SLURM_SUCCESS) {
 				error("bridge_free_nodecard(): %s",
 				      bg_err_str(rc));
 				rc = SLURM_ERROR;
@@ -634,13 +640,16 @@ extern int configure_small_block(bg_record_t *bg_record)
 			break;
 	}
 cleanup:
-	if ((rc = bridge_free_nodecard_list(ncard_list)) != STATUS_OK) {
+	if ((rc = bridge_free_nodecard_list(ncard_list)) != SLURM_SUCCESS) {
 		error("bridge_free_nodecard_list(): %s", bg_err_str(rc));
 		return SLURM_ERROR;
 	}
+
 #endif
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_WIRES)
 		info("making the small block");
+	if (rc != SLURM_ERROR)
+		rc = SLURM_SUCCESS;
 	return rc;
 }
 
@@ -651,35 +660,35 @@ extern int configure_block_switches(bg_record_t * bg_record)
 {
 	int rc = SLURM_SUCCESS;
 	ListIterator itr;
-	ba_node_t* ba_node = NULL;
-#ifdef HAVE_BG_FILES
-	char *bpid = NULL;
-	int first_bp=1;
+	ba_mp_t *ba_node = NULL;
+#if defined HAVE_BG_FILES
+	char *mpid = NULL;
+	int first_mp=1;
 	int first_switch=1;
 	int i = 0;
-	rm_BP_t *curr_bp = NULL;
+	rm_BP_t *curr_mp = NULL;
 	rm_switch_t *coord_switch[SYSTEM_DIMENSIONS];
 #endif
-	if (!bg_record->bg_block_list) {
+	if (!bg_record->ba_mp_list) {
 		error("There was no block_list given, can't create block");
 		return SLURM_ERROR;
 	}
 
 	bg_record->switch_count = 0;
-	bg_record->bp_count = 0;
+	bg_record->mp_count = 0;
 
-	itr = list_iterator_create(bg_record->bg_block_list);
+	itr = list_iterator_create(bg_record->ba_mp_list);
 	while ((ba_node = list_next(itr))) {
 		if (ba_node->used) {
-			bg_record->bp_count++;
+			bg_record->mp_count++;
 		}
 		bg_record->switch_count += _used_switches(ba_node);
 	}
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionBPNum,
-				  &bg_record->bp_count))
-	    != STATUS_OK) {
+				  &bg_record->mp_count))
+	    != SLURM_SUCCESS) {
 		fatal("bridge_set_data: RM_PartitionBPNum: %s",
 		      bg_err_str(rc));
 		rc = SLURM_ERROR;
@@ -689,7 +698,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 	if ((rc = bridge_set_data(bg_record->bg_block,
 				  RM_PartitionSwitchNum,
 				  &bg_record->switch_count))
-	    != STATUS_OK) {
+	    != SLURM_SUCCESS) {
 		fatal("bridge_set_data: RM_PartitionSwitchNum: %s",
 		      bg_err_str(rc));
 		rc = SLURM_ERROR;
@@ -698,14 +707,14 @@ extern int configure_block_switches(bg_record_t * bg_record)
 	}
 #endif
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_WIRES)
-		info("BP count %d", bg_record->bp_count);
+		info("MP count %d", bg_record->mp_count);
 	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_WIRES)
 		info("switch count %d", bg_record->switch_count);
 
 	list_iterator_reset(itr);
 	while ((ba_node = list_next(itr))) {
-#ifdef HAVE_BG_FILES
-		if (_get_bp_by_location(bg, ba_node->coord, &curr_bp)
+#if defined HAVE_BG_FILES
+		if (_get_mp_by_location(bg, ba_node->coord, &curr_mp)
 		    == SLURM_ERROR) {
 			rc = SLURM_ERROR;
 			goto cleanup;
@@ -724,23 +733,23 @@ extern int configure_block_switches(bg_record_t * bg_record)
 				     alpha_num[ba_node->coord[X]],
 				     alpha_num[ba_node->coord[Y]],
 				     alpha_num[ba_node->coord[Z]]);
-#ifdef HAVE_BG_FILES
-			if (first_bp){
+#if defined HAVE_BG_FILES
+			if (first_mp){
 				if ((rc = bridge_set_data(bg_record->bg_block,
 							  RM_PartitionFirstBP,
-							  curr_bp))
-				    != STATUS_OK) {
+							  curr_mp))
+				    != SLURM_SUCCESS) {
 					list_iterator_destroy(itr);
 					fatal("bridge_set_data("
 					      "RM_PartitionFirstBP): %s",
 					      bg_err_str(rc));
 				}
-				first_bp = 0;
+				first_mp = 0;
 			} else {
 				if ((rc = bridge_set_data(bg_record->bg_block,
 							  RM_PartitionNextBP,
-							  curr_bp))
-				    != STATUS_OK) {
+							  curr_mp))
+				    != SLURM_SUCCESS) {
 					list_iterator_destroy(itr);
 					fatal("bridge_set_data"
 					      "(RM_PartitionNextBP): %s",
@@ -749,24 +758,25 @@ extern int configure_block_switches(bg_record_t * bg_record)
 			}
 #endif
 		}
-#ifdef HAVE_BG_FILES
-		if ((rc = bridge_get_data(curr_bp, RM_BPID, &bpid))
-		    != STATUS_OK) {
+#if defined HAVE_BG_FILES
+		if ((rc = bridge_get_data(curr_mp, RM_BPID, &mpid))
+		    != SLURM_SUCCESS) {
 			list_iterator_destroy(itr);
-			fatal("bridge_get_data: RM_BPID: %s", bg_err_str(rc));
+			fatal("bridge_get_data: RM_BPID: %s",
+			      bg_err_str(rc));
 		}
 
-		if (!bpid) {
+		if (!mpid) {
 			error("No BP ID was returned from database");
 			continue;
 		}
-		if (_get_switches_by_bpid(bg, bpid, coord_switch)
+		if (_get_switches_by_mpid(bg, mpid, coord_switch)
 		    != SLURM_SUCCESS) {
-			error("Didn't get all the switches for bp %s", bpid);
-			free(bpid);
+			error("Didn't get all the switches for mp %s", mpid);
+			free(mpid);
 			continue;
 		}
-		free(bpid);
+		free(mpid);
 		for(i=0; i<SYSTEM_DIMENSIONS; i++) {
 			if (_add_switch_conns(coord_switch[i],
 					      &ba_node->axis_switch[i])
@@ -779,7 +789,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 						     bg_record->bg_block,
 						     RM_PartitionFirstSwitch,
 						     coord_switch[i]))
-					    != STATUS_OK) {
+					    != SLURM_SUCCESS) {
 						fatal("bridge_set_data("
 						      "RM_PartitionFirst"
 						      "Switch): %s",
@@ -792,7 +802,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 						     bg_record->bg_block,
 						     RM_PartitionNextSwitch,
 						     coord_switch[i]))
-					    != STATUS_OK) {
+					    != SLURM_SUCCESS) {
 						fatal("bridge_set_data("
 						      "RM_PartitionNext"
 						      "Switch): %s",
@@ -804,7 +814,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 #endif
 	}
 	rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 cleanup:
 #endif
 	return rc;
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.h b/src/plugins/select/bluegene/bl/bridge_switch_connections.h
similarity index 77%
rename from src/plugins/select/bluegene/plugin/bg_block_info.h
rename to src/plugins/select/bluegene/bl/bridge_switch_connections.h
index ea1751fcf..c58344ae1 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.h
+++ b/src/plugins/select/bluegene/bl/bridge_switch_connections.h
@@ -1,12 +1,15 @@
 /*****************************************************************************\
- *  bg_part_info.h - header for blue gene partition information.
+ *  bridge_switch_connections.h - Blue Gene switch management functions,
+ *  establish switch connections
+ *
+ *  $Id: bridge_switch_connections.c -1   $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
+ *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -35,15 +38,13 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _BG_PART_INFO_H_
-#define _BG_PART_INFO_H_
+#ifndef _BRIDGE_SWITCH_CONN_H_
+#define _BRIDGE_SWITCH_CONN_H_
+
+#include "../bg_core.h"
+#include "../ba/block_allocator.h"
 
-#include "bluegene.h"
+extern int configure_small_block(bg_record_t *bg_record);
+extern int configure_block_switches(bg_record_t * bg_record);
 
-/*****************************************************/
-extern int block_ready(struct job_record *job_ptr);
-extern void pack_block(bg_record_t *bg_record, Buf buffer,
-		       uint16_t protocol_version);
-extern int update_block_list();
-extern int update_block_list_state(List block_list);
-#endif /* _BG_PART_INFO_H_ */
+#endif
diff --git a/src/plugins/select/bluegene/bl_bgq/Makefile.am b/src/plugins/select/bluegene/bl_bgq/Makefile.am
new file mode 100644
index 000000000..041336610
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/Makefile.am
@@ -0,0 +1,15 @@
+# Makefile.am for bridge_linker on a bgq system
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
+
+# making a .la
+
+noinst_LTLIBRARIES = libbridge_linker.la
+libbridge_linker_la_SOURCES   = bridge_linker.cc \
+				bridge_helper.cc bridge_helper.h \
+				bridge_status.cc bridge_status.h
+
+libbridge_linker_la_LDFLAGS = $(LIB_LDFLAGS)
diff --git a/src/plugins/select/bgq/Makefile.in b/src/plugins/select/bluegene/bl_bgq/Makefile.in
similarity index 81%
rename from src/plugins/select/bgq/Makefile.in
rename to src/plugins/select/bluegene/bl_bgq/Makefile.in
index 1b57ddc0c..9429e4136 100644
--- a/src/plugins/select/bgq/Makefile.in
+++ b/src/plugins/select/bluegene/bl_bgq/Makefile.in
@@ -15,7 +15,7 @@
 
 @SET_MAKE@
 
-# Makefile for select/bgq plugin
+# Makefile.am for bridge_linker on a bgq system
 
 VPATH = @srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
@@ -37,7 +37,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
-subdir = src/plugins/select/bgq
+subdir = src/plugins/select/bluegene/bl_bgq
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -81,35 +83,14 @@ mkinstalldirs = $(install_sh) -d
 CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
-    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
-    *) f=$$p;; \
-  esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
-  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
-  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
-  for p in $$list; do echo "$$p $$p"; done | \
-  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
-  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
-    if (++n[$$2] == $(am__install_max)) \
-      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
-    END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
-  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
-  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__installdirs = "$(DESTDIR)$(pkglibdir)"
-LTLIBRARIES = $(pkglib_LTLIBRARIES)
-select_bgq_la_LIBADD =
-am_select_bgq_la_OBJECTS = select_bgq.lo
-select_bgq_la_OBJECTS = $(am_select_bgq_la_OBJECTS)
-select_bgq_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libbridge_linker_la_LIBADD =
+am_libbridge_linker_la_OBJECTS = bridge_linker.lo bridge_helper.lo \
+	bridge_status.lo
+libbridge_linker_la_OBJECTS = $(am_libbridge_linker_la_OBJECTS)
+libbridge_linker_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
-	$(CXXFLAGS) $(select_bgq_la_LDFLAGS) $(LDFLAGS) -o $@
+	$(CXXFLAGS) $(libbridge_linker_la_LDFLAGS) $(LDFLAGS) -o $@
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -132,8 +113,8 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(select_bgq_la_SOURCES)
-DIST_SOURCES = $(select_bgq_la_SOURCES)
+SOURCES = $(libbridge_linker_la_SOURCES)
+DIST_SOURCES = $(libbridge_linker_la_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -147,7 +128,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -158,7 +142,7 @@ CCDEPMODE = @CCDEPMODE@
 CFLAGS = @CFLAGS@
 CMD_LDFLAGS = @CMD_LDFLAGS@
 CPP = @CPP@
-CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
+CPPFLAGS = @CPPFLAGS@
 CXX = @CXX@
 CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
@@ -184,6 +168,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -241,6 +226,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -276,6 +262,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -329,13 +316,16 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
-pkglib_LTLIBRARIES = select_bgq.la
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
-# Linear node selection plugin.
-select_bgq_la_SOURCES = select_bgq.cc bgq.h
-select_bgq_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+# making a .la
+noinst_LTLIBRARIES = libbridge_linker.la
+libbridge_linker_la_SOURCES = bridge_linker.cc \
+				bridge_helper.cc bridge_helper.h \
+				bridge_status.cc bridge_status.h
+
+libbridge_linker_la_LDFLAGS = $(LIB_LDFLAGS)
 all: all-am
 
 .SUFFIXES:
@@ -349,9 +339,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bgq/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/bl_bgq/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign src/plugins/select/bgq/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/bl_bgq/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -370,39 +360,17 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
 $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
 	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
 $(am__aclocal_m4_deps):
-install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
-	@$(NORMAL_INSTALL)
-	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
-	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
-	list2=; for p in $$list; do \
-	  if test -f $$p; then \
-	    list2="$$list2 $$p"; \
-	  else :; fi; \
-	done; \
-	test -z "$$list2" || { \
-	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
-	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
-	}
-
-uninstall-pkglibLTLIBRARIES:
-	@$(NORMAL_UNINSTALL)
-	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
-	for p in $$list; do \
-	  $(am__strip_dir) \
-	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
-	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
-	done
 
-clean-pkglibLTLIBRARIES:
-	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
-	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
 	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
 	  test "$$dir" != "$$p" || dir=.; \
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-select_bgq.la: $(select_bgq_la_OBJECTS) $(select_bgq_la_DEPENDENCIES) 
-	$(select_bgq_la_LINK) -rpath $(pkglibdir) $(select_bgq_la_OBJECTS) $(select_bgq_la_LIBADD) $(LIBS)
+libbridge_linker.la: $(libbridge_linker_la_OBJECTS) $(libbridge_linker_la_DEPENDENCIES) 
+	$(libbridge_linker_la_LINK)  $(libbridge_linker_la_OBJECTS) $(libbridge_linker_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -410,7 +378,9 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_bgq.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_helper.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_status.Plo@am__quote@
 
 .cc.o:
 @am__fastdepCXX_TRUE@	$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -525,9 +495,6 @@ check-am: all-am
 check: check-am
 all-am: Makefile $(LTLIBRARIES)
 installdirs:
-	for dir in "$(DESTDIR)$(pkglibdir)"; do \
-	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
-	done
 install: install-am
 install-exec: install-exec-am
 install-data: install-data-am
@@ -545,6 +512,7 @@ install-strip:
 mostlyclean-generic:
 
 clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
 
 distclean-generic:
 	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
@@ -555,7 +523,7 @@ maintainer-clean-generic:
 	@echo "it deletes files that may require special tools to rebuild."
 clean: clean-am
 
-clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
 	mostlyclean-am
 
 distclean: distclean-am
@@ -582,7 +550,7 @@ install-dvi: install-dvi-am
 
 install-dvi-am:
 
-install-exec-am: install-pkglibLTLIBRARIES
+install-exec-am:
 
 install-html: install-html-am
 
@@ -622,23 +590,22 @@ ps: ps-am
 
 ps-am:
 
-uninstall-am: uninstall-pkglibLTLIBRARIES
+uninstall-am:
 
 .MAKE: install-am install-strip
 
 .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
 	distclean-compile distclean-generic distclean-libtool \
 	distclean-tags distdir dvi dvi-am html html-am info info-am \
 	install install-am install-data install-data-am install-dvi \
 	install-dvi-am install-exec install-exec-am install-html \
 	install-html-am install-info install-info-am install-man \
-	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
-	install-ps install-ps-am install-strip installcheck \
-	installcheck-am installdirs maintainer-clean \
-	maintainer-clean-generic mostlyclean mostlyclean-compile \
-	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am
 
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/src/plugins/select/bluegene/bl_bgq/bridge_helper.cc b/src/plugins/select/bluegene/bl_bgq/bridge_helper.cc
new file mode 100644
index 000000000..67d5503c2
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/bridge_helper.cc
@@ -0,0 +1,318 @@
+/*****************************************************************************\
+ *  bridge_helper.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "bridge_helper.h"
+
+#ifdef HAVE_BG_FILES
+extern int bridge_handle_database_errors(
+	const char *function, const uint32_t err)
+{
+	int rc = SLURM_ERROR;
+
+	switch (err) {
+	case bgsched::DatabaseErrors::DatabaseError:
+		error("%s: Can't access to the database!", function);
+		break;
+	case bgsched::DatabaseErrors::OperationFailed:
+		error("%s: Database option Failed!", function);
+		break;
+	case bgsched::DatabaseErrors::InvalidKey:
+		error("%s: Database Invalid Key.", function);
+		break;
+	case bgsched::DatabaseErrors::DataNotFound:
+		error("%s: Data not found error.", function);
+		break;
+	case bgsched::DatabaseErrors::DuplicateEntry:
+		error("%s: We got a duplicate entry?", function);
+		break;
+	case bgsched::DatabaseErrors::XmlError:
+		error("%s: XML Error?", function);
+		break;
+	case bgsched::DatabaseErrors::ConnectionError:
+		error("%s: Can't connect to the database!", function);
+		break;
+	default:
+		error("%s: Unexpected Database exception value %d",
+		      function, err);
+	}
+	return rc;
+}
+
+extern int bridge_handle_init_errors(
+	const char *function, const uint32_t err)
+{
+	int rc = SLURM_ERROR;
+
+	switch (err) {
+	case bgsched::InitializationErrors::DatabaseInitializationFailed:
+		error("%s: Database Init failed.", function);
+		break;
+	case bgsched::InitializationErrors::MalformedPropertiesFile:
+		error("%s: Malformated Properties File.", function);
+		break;
+	case bgsched::InitializationErrors::PropertiesNotFound:
+		error("%s: Can't locate Properties File.", function);
+		break;
+	default:
+		error("%s: Unexpected Initialization exception value %d",
+		      function, err);
+	}
+	return rc;
+}
+
+extern int bridge_handle_input_errors(const char *function, const uint32_t err,
+				      bg_record_t *bg_record)
+{
+	int rc = SLURM_ERROR;
+
+	/* Not real errors */
+	switch (err) {
+	case bgsched::InputErrors::InvalidMidplaneCoordinates:
+		error("%s: Invalid midplane coodinates given.", function);
+		break;
+	case bgsched::InputErrors::InvalidLocationString:
+		error("%s: Invalid location given.", function);
+		break;
+	case bgsched::InputErrors::InvalidBlockSize:
+		error("%s: Invalid Block Size.", function);
+		break;
+	case bgsched::InputErrors::InvalidBlockName:
+		/* Not real error */
+		rc = BG_ERROR_BLOCK_NOT_FOUND;
+		error("%s: Bad block name %s!",
+		      function, bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockDescription:
+		error("%s: Invalid Block Description (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockOptions:
+		error("%s: Invalid Block Options (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockBootOptions:
+		error("%s: Invalid Block boot options (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockMicroLoaderImage:
+		error("%s: Invalid Block microloader image (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockNodeConfiguration:
+		error("%s: Invalid Block Node Configuration (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidBlockInfo:
+		error("%s: Invalid Block Info (%s).", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::InvalidNodeBoards:
+		error("%s: Invalid Node Boards.", function);
+		break;
+	case bgsched::InputErrors::InvalidDimension:
+		error("%s: Invalid Dimensions.", function);
+		break;
+	case bgsched::InputErrors::InvalidNodeBoardCount:
+		error("%s: Invalid NodeBoard count.", function);
+		break;
+	case bgsched::InputErrors::InvalidMidplanes:
+		error("%s: Invalid midplanes given.", function);
+		break;
+	case bgsched::InputErrors::InvalidPassthroughMidplanes:
+		error("%s: Invalid passthrough midplanes given.", function);
+		break;
+	case bgsched::InputErrors::InvalidConnectivity:
+		error("%s: Invalid connectivity given.", function);
+		break;
+	case bgsched::InputErrors::BlockNotFound:
+		/* Not real error */
+		rc = BG_ERROR_BLOCK_NOT_FOUND;
+		debug2("%s: Unknown block %s!",
+		       function, bg_record->bg_block_id);
+		break;
+	case bgsched::InputErrors::BlockNotAdded:
+		error("%s: For some reason the block was not added.", function);
+		break;
+	case bgsched::InputErrors::BlockNotCreated:
+		error("%s: can not create block from input arguments",
+		      function);
+		break;
+	case bgsched::InputErrors::InvalidUser:
+		error("%s: Invalid User given.", function);
+		break;
+	default:
+		error("%s: Unexpected Input exception value %d",
+		      function, err);
+		rc = SLURM_ERROR;
+	}
+	if (bg_record && (rc == SLURM_SUCCESS)) {
+		/* Make sure we set this to free since if it isn't in
+		   the system and we are waiting for it to be free, we
+		   will be waiting around for a long time ;).
+		*/
+		bg_record->state = BG_BLOCK_FREE;
+	}
+	return rc;
+}
+
+extern int bridge_handle_internal_errors(
+	const char *function, const uint32_t err)
+{
+	int rc = SLURM_ERROR;
+
+	switch (err) {
+	case bgsched::InternalErrors::XMLParseError:
+		error("%s: XML Parse Error.", function);
+		break;
+	case bgsched::InternalErrors::InconsistentDataError:
+		error("%s: Inconsistent Data Error.", function);
+		break;
+	case bgsched::InternalErrors::UnexpectedError:
+		error("%s: Unexpected Error returned.", function);
+		break;
+	default:
+		error("%s: Unexpected Internal exception value %d",
+		      function, err);
+	}
+	return rc;
+}
+
+extern int bridge_handle_runtime_errors(const char *function,
+					const uint32_t err,
+					bg_record_t *bg_record)
+{
+	int rc = SLURM_ERROR;
+
+	switch (err) {
+	case bgsched::RuntimeErrors::BlockBootError:
+		error("%s: Error booting block %s.", function,
+		      bg_record->bg_block_id);
+		rc = BG_ERROR_BOOT_ERROR;
+		break;
+	case bgsched::RuntimeErrors::BlockFreeError:
+		/* not a real error */
+		rc = BG_ERROR_INVALID_STATE;
+		debug2("%s: Error freeing block %s.", function,
+		       bg_record->bg_block_id);
+		break;
+	case bgsched::RuntimeErrors::BlockCreateError:
+		error("%s: Error creating block %s.", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::RuntimeErrors::BlockAddError:
+		error("%s: Error Setting block %s owner.", function,
+		      bg_record->bg_block_id);
+		break;
+	case bgsched::RuntimeErrors::InvalidBlockState:
+		/* not a real error */
+		rc = BG_ERROR_INVALID_STATE;
+		error("%s: Error can't perform task with block %s in state %s",
+		      function, bg_record->bg_block_id,
+		      bg_block_state_string(bg_record->state));
+		break;
+	case bgsched::RuntimeErrors::DimensionOutOfRange:
+	 	error("%s: Dimension out of Range.", function);
+	        break;
+	case bgsched::RuntimeErrors::AuthorityError:
+	 	error("%s: Authority Error.", function);
+	        break;
+	default:
+		error("%s: Unexpected Runtime exception value %d.",
+		      function, err);
+	}
+	return rc;
+}
+
+extern uint16_t bridge_translate_status(bgsched::Block::Status state_in)
+{
+	switch (state_in) {
+	case Block::Allocated:
+		return BG_BLOCK_ALLOCATED;
+		break;
+	case Block::Booting:
+		return BG_BLOCK_BOOTING;
+		break;
+	case Block::Free:
+		return BG_BLOCK_FREE;
+		break;
+	case Block::Initialized:
+		return BG_BLOCK_INITED;
+		break;
+	case Block::Terminating:
+		return BG_BLOCK_TERM;
+		break;
+	default:
+		return BG_BLOCK_ERROR_FLAG;
+		break;
+	}
+	error("unknown block state %d", state_in);
+	return BG_BLOCK_NAV;
+}
+
+extern uint16_t bridge_translate_switch_usage(bgsched::Switch::InUse usage_in)
+{
+	switch (usage_in) {
+	case Switch::NotInUse:
+		return BG_SWITCH_NONE;
+		break;
+	case Switch::IncludedBothPortsInUse:
+		return BG_SWITCH_TORUS;
+		break;
+	case Switch::IncludedOutputPortInUse:
+		return (BG_SWITCH_OUT | BG_SWITCH_OUT_PASS);
+		break;
+	case Switch::IncludedInputPortInUse:
+		return (BG_SWITCH_IN | BG_SWITCH_IN_PASS);
+		break;
+	case Switch::Wrapped:
+		return BG_SWITCH_WRAPPED;
+		break;
+	case Switch::Passthrough:
+		return BG_SWITCH_PASS;
+		break;
+	case Switch::WrappedPassthrough:
+		return BG_SWITCH_WRAPPED_PASS;
+		break;
+	default:
+		error("unknown switch usage %d", usage_in);
+		break;
+	}
+
+	return BG_SWITCH_NONE;
+}
+#endif
diff --git a/src/plugins/select/bluegene/bl_bgq/bridge_helper.h b/src/plugins/select/bluegene/bl_bgq/bridge_helper.h
new file mode 100644
index 000000000..51955c4c1
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/bridge_helper.h
@@ -0,0 +1,82 @@
+/*****************************************************************************\
+ *  bridge_helper.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BRIDGE_HELPER_H_
+#define _BRIDGE_HELPER_H_
+
+extern "C" {
+#include "../bg_enums.h"
+#include "../bg_record_functions.h"
+}
+
+#ifdef HAVE_BG_FILES
+
+#include <bgsched/DatabaseException.h>
+#include <bgsched/InitializationException.h>
+#include <bgsched/InputException.h>
+#include <bgsched/InternalException.h>
+#include <bgsched/RuntimeException.h>
+#include <bgsched/Switch.h>
+#include <bgsched/bgsched.h>
+#include <bgsched/Block.h>
+#include <bgsched/core/core.h>
+
+#include <boost/foreach.hpp>
+
+using namespace std;
+using namespace bgsched;
+using namespace bgsched::core;
+
+extern int bridge_handle_database_errors(
+	const char *function, const uint32_t err);
+extern int bridge_handle_init_errors(
+	const char *function, const uint32_t err);
+extern int bridge_handle_input_errors(const char *function, const uint32_t err,
+				      bg_record_t *bg_record);
+extern int bridge_handle_internal_errors(
+	const char *function, const uint32_t err);
+extern int bridge_handle_runtime_errors(const char *function,
+					const uint32_t err,
+					bg_record_t *bg_record);
+
+extern uint16_t bridge_translate_status(bgsched::Block::Status state_in);
+extern uint16_t bridge_translate_switch_usage(bgsched::Switch::InUse usage_in);
+
+#endif
+
+#endif
diff --git a/src/plugins/select/bluegene/bl_bgq/bridge_linker.cc b/src/plugins/select/bluegene/bl_bgq/bridge_linker.cc
new file mode 100644
index 000000000..560f361a3
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/bridge_linker.cc
@@ -0,0 +1,1067 @@
+/*****************************************************************************\
+ *  bridge_linker.cc
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+/* needed to figure out if HAVE_BG_FILES is set */
+#  include "config.h"
+#endif
+
+#ifdef HAVE_BG_FILES
+/* These need to be the first declared since on line 187 of
+ * /bgsys/drivers/ppcfloor/extlib/include/log4cxx/helpers/transcoder.h
+ * there is a nice generic BUFSIZE declared and the BUFSIZE declared
+ * elsewhere in SLURM will cause errors when compiling.
+ */
+#include <log4cxx/fileappender.h>
+#include <log4cxx/logger.h>
+#include <log4cxx/patternlayout.h>
+
+#endif
+
+extern "C" {
+#include "../ba_bgq/block_allocator.h"
+#include "../bg_record_functions.h"
+#include "src/common/parse_time.h"
+#include "src/common/uid.h"
+}
+
+#include "bridge_status.h"
+
+/* local vars */
+//static pthread_mutex_t api_file_mutex = PTHREAD_MUTEX_INITIALIZER;
+static bool initialized = false;
+
+
+#ifdef HAVE_BG_FILES
+
+static void _setup_ba_mp(ComputeHardware::ConstPtr bgq, ba_mp_t *ba_mp)
+{
+	// int i;
+	Coordinates::Coordinates coords(ba_mp->coord[A], ba_mp->coord[X],
+					ba_mp->coord[Y], ba_mp->coord[Z]);
+	Midplane::ConstPtr mp_ptr;
+	int i;
+
+	try {
+		mp_ptr = bgq->getMidplane(coords);
+	} catch (const bgsched::InputException& err) {
+		int rc = bridge_handle_input_errors(
+			"ComputeHardware::getMidplane",
+			err.getError().toValue(), NULL);
+		if (rc != SLURM_SUCCESS)
+			return;
+	}
+
+	ba_mp->loc = xstrdup(mp_ptr->getLocation().c_str());
+
+	ba_mp->nodecard_loc =
+		(char **)xmalloc(sizeof(char *) * bg_conf->mp_nodecard_cnt);
+	for (i=0; i<bg_conf->mp_nodecard_cnt; i++) {
+		NodeBoard::ConstPtr nodeboard = mp_ptr->getNodeBoard(i);
+		ba_mp->nodecard_loc[i] =
+			xstrdup(nodeboard->getLocation().c_str());
+	}
+}
+
+static bg_record_t * _translate_object_to_block(const Block::Ptr &block_ptr)
+{
+	bg_record_t *bg_record = (bg_record_t *)xmalloc(sizeof(bg_record_t));
+	Block::Midplanes midplane_vec;
+	hostlist_t hostlist;
+	char *node_char = NULL;
+	char mp_str[256];
+
+	bg_record->magic = BLOCK_MAGIC;
+	bg_record->bg_block_id = xstrdup(block_ptr->getName().c_str());
+	bg_record->cnode_cnt = block_ptr->getComputeNodeCount();
+	bg_record->cpu_cnt = bg_conf->cpu_ratio * bg_record->cnode_cnt;
+
+	if (block_ptr->isSmall()) {
+		char bitstring[BITSIZE];
+		int io_cnt, io_start, len;
+		Block::NodeBoards nodeboards =
+			block_ptr->getNodeBoards();
+		int nb_cnt = nodeboards.size();
+		std::string nb_name = *(nodeboards.begin());
+
+		if ((io_cnt = nb_cnt * bg_conf->io_ratio))
+			io_cnt--;
+
+		/* From the first nodecard id we can figure
+		   out where to start from with the alloc of ionodes.
+		*/
+		len = nb_name.length()-2;
+		io_start = atoi((char*)nb_name.c_str()+len) * bg_conf->io_ratio;
+
+		bg_record->ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+		/* Set the correct ionodes being used in this block */
+		bit_nset(bg_record->ionode_bitmap,
+			 io_start, io_start+io_cnt);
+		bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap);
+		bg_record->ionode_str = xstrdup(bitstring);
+		debug3("%s uses ionodes %s",
+		       bg_record->bg_block_id,
+		       bg_record->ionode_str);
+		bg_record->conn_type[0] = SELECT_SMALL;
+	} else {
+		for (Dimension dim=Dimension::A; dim<=Dimension::D; dim++) {
+			bg_record->conn_type[dim] =
+				block_ptr->isTorus(dim) ?
+				SELECT_TORUS : SELECT_MESH;
+		}
+		/* Set the bitmap blank here if it is a full
+		   node we don't want anything set we also
+		   don't want the bg_record->ionode_str set.
+		*/
+		bg_record->ionode_bitmap =
+			bit_alloc(bg_conf->ionodes_per_mp);
+	}
+
+	hostlist = hostlist_create(NULL);
+	midplane_vec = block_ptr->getMidplanes();
+	BOOST_FOREACH(const std::string midplane, midplane_vec) {
+		char temp[256];
+		ba_mp_t *curr_mp = loc2ba_mp((char *)midplane.c_str());
+		if (!curr_mp) {
+			error("Unknown midplane for %s",
+			      midplane.c_str());
+			continue;
+		}
+		snprintf(temp, sizeof(temp), "%s%s",
+			 bg_conf->slurm_node_prefix,
+			 curr_mp->coord_str);
+
+		hostlist_push(hostlist, temp);
+	}
+	bg_record->mp_str = hostlist_ranged_string_xmalloc(hostlist);
+	hostlist_destroy(hostlist);
+	debug3("got nodes of %s", bg_record->mp_str);
+
+	process_nodes(bg_record, true);
+
+	reset_ba_system(true);
+	if (ba_set_removable_mps(bg_record->mp_bitmap, 1) != SLURM_SUCCESS)
+		fatal("It doesn't seem we have a bitmap for %s",
+		      bg_record->bg_block_id);
+
+	if (bg_record->ba_mp_list)
+		list_flush(bg_record->ba_mp_list);
+	else
+		bg_record->ba_mp_list = list_create(destroy_ba_mp);
+
+	node_char = set_bg_block(bg_record->ba_mp_list,
+				 bg_record->start,
+				 bg_record->geo,
+				 bg_record->conn_type);
+	ba_reset_all_removed_mps();
+	if (!node_char)
+		fatal("I was unable to make the requested block.");
+
+	snprintf(mp_str, sizeof(mp_str), "%s%s",
+		 bg_conf->slurm_node_prefix,
+		 node_char);
+
+	xfree(node_char);
+	if (strcmp(mp_str, bg_record->mp_str)) {
+		fatal("Couldn't make unknown block %s in our wiring.  "
+		      "Something is wrong with our algo.  Remove this block "
+		      "to continue (found %s, but allocated %s) "
+		      "YOU MUST COLDSTART",
+		      bg_record->bg_block_id, mp_str, bg_record->mp_str);
+	}
+
+	return bg_record;
+}
+#endif
+
+static int _block_wait_for_jobs(char *bg_block_id)
+{
+#ifdef HAVE_BG_FILES
+	std::vector<Job::ConstPtr> job_vec;
+	JobFilter job_filter;
+	JobFilter::Statuses job_statuses;
+#endif
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_block_id) {
+		error("no block name given");
+		return SLURM_ERROR;
+	}
+
+#ifdef HAVE_BG_FILES
+
+	job_filter.setComputeBlockName(bg_block_id);
+
+	/* I think these are all the states we need. */
+	job_statuses.insert(Job::Setup);
+	job_statuses.insert(Job::Loading);
+	job_statuses.insert(Job::Starting);
+	job_statuses.insert(Job::Running);
+	job_statuses.insert(Job::Cleanup);
+	job_filter.setStatuses(&job_statuses);
+
+	while (1) {
+		job_vec = getJobs(job_filter);
+		if (job_vec.empty())
+			return SLURM_SUCCESS;
+
+		BOOST_FOREACH(const Job::ConstPtr& job_ptr, job_vec) {
+			debug("waiting on mmcs job %lu to finish on block %s",
+			      job_ptr->getId(), bg_block_id);
+		}
+		sleep(POLL_INTERVAL);
+	}
+#endif
+	return SLURM_SUCCESS;
+}
+
+static void _remove_jobs_on_block_and_reset(char *block_id)
+{
+	bg_record_t *bg_record = NULL;
+	int job_remove_failed = 0;
+
+	if (!block_id) {
+		error("_remove_jobs_on_block_and_reset: no block name given");
+		return;
+	}
+
+	if (_block_wait_for_jobs(block_id) != SLURM_SUCCESS)
+		job_remove_failed = 1;
+
+	/* remove the block's users */
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record = find_bg_record_in_list(bg_lists->main, block_id);
+	if (bg_record) {
+		debug("got the record %s user is %s",
+		      bg_record->bg_block_id,
+		      bg_record->user_name);
+
+		if (job_remove_failed) {
+			if (bg_record->mp_str)
+				slurm_drain_nodes(
+					bg_record->mp_str,
+					(char *)
+					"_term_agent: Couldn't remove job",
+					slurm_get_slurm_user_id());
+			else
+				error("Block %s doesn't have a node list.",
+				      block_id);
+		}
+
+		bg_reset_block(bg_record);
+	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+		debug2("Hopefully we are destroying this block %s "
+		       "since it isn't in the bg_lists->main",
+		       block_id);
+	}
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+}
+
+extern int bridge_init(char *properties_file)
+{
+	if (initialized)
+		return 1;
+
+	if (bg_recover == NOT_FROM_CONTROLLER)
+		return 0;
+
+#ifdef HAVE_BG_FILES
+	if (!properties_file)
+		properties_file = (char *)"";
+	bgsched::init(properties_file);
+#endif
+	bridge_status_init();
+	initialized = true;
+
+	return 1;
+}
+
+extern int bridge_fini()
+{
+	initialized = false;
+	if (bg_recover != NOT_FROM_CONTROLLER)
+		bridge_status_fini();
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_get_size(int *size)
+{
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+#ifdef HAVE_BG_FILES
+	memset(size, 0, sizeof(int) * SYSTEM_DIMENSIONS);
+
+	Coordinates bgq_size = core::getMachineSize();
+	for (int dim=0; dim< SYSTEM_DIMENSIONS; dim++)
+		size[dim] = bgq_size[dim];
+#endif
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_setup_system()
+{
+	static bool inited = false;
+
+	if (inited)
+		return SLURM_SUCCESS;
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	inited = true;
+#ifdef HAVE_BG_FILES
+	ComputeHardware::ConstPtr bgq = getComputeHardware();
+
+	for (int a = 0; a < DIM_SIZE[A]; a++)
+		for (int x = 0; x < DIM_SIZE[X]; x++)
+			for (int y = 0; y < DIM_SIZE[Y]; y++)
+				for (int z = 0; z < DIM_SIZE[Z]; z++)
+					_setup_ba_mp(
+						bgq, &ba_main_grid[a][x][y][z]);
+#endif
+
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_block_create(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+
+#ifdef HAVE_BG_FILES
+	Block::Ptr block_ptr;
+	Block::Midplanes midplanes;
+	Block::NodeBoards nodecards;
+        Block::PassthroughMidplanes pt_midplanes;
+        Block::DimensionConnectivity conn_type;
+	Midplane::Ptr midplane;
+	Dimension dim;
+	ba_mp_t *ba_mp = NULL;
+#endif
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record->ba_mp_list || !list_count(bg_record->ba_mp_list)) {
+		error("There are no midplanes in this block?");
+		return SLURM_ERROR;
+	}
+
+	if (!bg_record->bg_block_id) {
+		struct tm my_tm;
+		struct timeval my_tv;
+		/* set up a common unique name */
+		gettimeofday(&my_tv, NULL);
+		localtime_r(&my_tv.tv_sec, &my_tm);
+		bg_record->bg_block_id = xstrdup_printf(
+			"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
+			my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
+			my_tm.tm_hour, my_tm.tm_min, my_tm.tm_sec,
+			my_tv.tv_usec/1000);
+#ifndef HAVE_BG_FILES
+		/* Since we divide by 1000 here we need to sleep that
+		   long to get a unique id. It takes longer than this
+		   in a real system so we don't worry about it. */
+		usleep(1000);
+#endif
+	}
+
+
+#ifdef HAVE_BG_FILES
+	if (bg_record->conn_type[0] == SELECT_SMALL) {
+		bool use_nc[bg_conf->mp_nodecard_cnt];
+		int i, nc_pos = 0, num_ncards = 0;
+
+		num_ncards = bg_record->cnode_cnt/bg_conf->nodecard_cnode_cnt;
+		if (num_ncards < 1) {
+			error("You have to have at least 1 nodecard to make "
+			      "a small block I got %d/%d = %d",
+			      bg_record->cnode_cnt, bg_conf->nodecard_cnode_cnt,
+			      num_ncards);
+			return SLURM_ERROR;
+		}
+		memset(use_nc, 0, sizeof(use_nc));
+
+		/* find out how many nodecards to get for each ionode */
+		for (i = 0; i<bg_conf->ionodes_per_mp; i++) {
+			if (bit_test(bg_record->ionode_bitmap, i)) {
+				for (int j=0; j<bg_conf->nc_ratio; j++)
+					use_nc[nc_pos+j] = 1;
+			}
+			nc_pos += bg_conf->nc_ratio;
+		}
+		// char tmp_char[256];
+		// format_node_name(bg_record, tmp_char, sizeof(tmp_char));
+		// info("creating %s %s", bg_record->bg_block_id, tmp_char);
+		ba_mp = (ba_mp_t *)list_peek(bg_record->ba_mp_list);
+		/* Since the nodeboard locations aren't set up in the
+		   copy of this pointer we need to go out a get the
+		   real one from the system and use it.
+		*/
+		ba_mp = coord2ba_mp(ba_mp->coord);
+		for (i=0; i<bg_conf->mp_nodecard_cnt; i++) {
+			if (use_nc[i] && ba_mp)
+				nodecards.push_back(ba_mp->nodecard_loc[i]);
+		}
+
+		try {
+			block_ptr = Block::create(nodecards);
+		} catch (const bgsched::InputException& err) {
+			rc = bridge_handle_input_errors(
+				"Block::createSmallBlock",
+				err.getError().toValue(),
+				bg_record);
+			if (rc != SLURM_SUCCESS)
+				return rc;
+		}
+	} else {
+		ListIterator itr = list_iterator_create(bg_record->ba_mp_list);
+		while ((ba_mp = (ba_mp_t *)list_next(itr))) {
+			/* Since the midplane locations aren't set up in the
+			   copy of this pointer we need to go out a get the
+			   real one from the system and use it.
+			*/
+			ba_mp_t *main_mp = coord2ba_mp(ba_mp->coord);
+			if (!main_mp)
+				continue;
+			info("got %s(%s) %d", main_mp->coord_str,
+			     main_mp->loc, ba_mp->used);
+			if (ba_mp->used)
+				midplanes.push_back(main_mp->loc);
+			else
+				pt_midplanes.push_back(main_mp->loc);
+		}
+		list_iterator_destroy(itr);
+
+		for (dim=Dimension::A; dim<=Dimension::D; dim++) {
+			switch (bg_record->conn_type[dim]) {
+			case SELECT_MESH:
+				conn_type[dim] = Block::Connectivity::Mesh;
+				break;
+			case SELECT_TORUS:
+			default:
+				conn_type[dim] = Block::Connectivity::Torus;
+				break;
+			}
+		}
+		try {
+			block_ptr = Block::create(midplanes,
+						  pt_midplanes, conn_type);
+		} catch (const bgsched::InputException& err) {
+			rc = bridge_handle_input_errors(
+				"Block::create",
+				err.getError().toValue(),
+				bg_record);
+			if (rc != SLURM_SUCCESS) {
+				assert(0);
+				return rc;
+			}
+		}
+	}
+
+	info("block created correctly");
+	block_ptr->setName(bg_record->bg_block_id);
+	block_ptr->setMicroLoaderImage(bg_record->mloaderimage);
+
+	try {
+		block_ptr->add("");
+		// block_ptr->addUser(bg_record->bg_block_id,
+		// 		   bg_record->user_name);
+		//info("got past add");
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::add",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::add",
+						  err.getError().toValue(),
+						  bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (...) {
+                error("Unknown error from Block::Add().");
+		rc = SLURM_ERROR;
+	}
+
+#endif
+
+	return rc;
+}
+
+/*
+ * Boot a block. Block state expected to be FREE upon entry.
+ * NOTE: This function does not wait for the boot to complete.
+ * the slurm prolog script needs to perform the waiting.
+ * NOTE: block_state_mutex needs to be locked before entering.
+ */
+extern int bridge_block_boot(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+
+	if (bg_record->magic != BLOCK_MAGIC) {
+		error("boot_block: magic was bad");
+		return SLURM_ERROR;
+	}
+
+	if (!bg_record || !bg_record->bg_block_id)
+		return SLURM_ERROR;
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+#ifdef HAVE_BG_FILES
+	/* Lets see if we are connected to the IO. */
+	try {
+		uint32_t avail, unavail;
+		Block::checkIOLinksSummary(bg_record->bg_block_id,
+					   &avail, &unavail);
+	} catch (const bgsched::DatabaseException& err) {
+		rc = bridge_handle_database_errors("Block::checkIOLinksSummary",
+						   err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::checkIOLinksSummary",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InternalException& err) {
+		rc = bridge_handle_internal_errors("Block::checkIOLinksSummary",
+						err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (...) {
+                error("checkIOLinksSummary request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+
+	try {
+		std::vector<std::string> mp_vec;
+		if (!Block::isIOConnected(bg_record->bg_block_id, &mp_vec)) {
+			error("block %s is not IOConnected, "
+			      "contact your admin. Midplanes not "
+			      "connected are ...", bg_record->bg_block_id);
+			BOOST_FOREACH(const std::string& mp, mp_vec) {
+				error("%s", mp.c_str());
+			}
+			return BG_ERROR_NO_IOBLOCK_CONNECTED;
+		}
+	} catch (const bgsched::DatabaseException& err) {
+		rc = bridge_handle_database_errors("Block::isIOConnected",
+						   err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::isIOConnected",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InternalException& err) {
+		rc = bridge_handle_internal_errors("Block::isIOConnected",
+						err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (...) {
+                error("isIOConnected request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+
+	if ((rc = bridge_block_remove_all_users(
+		     bg_record, bg_conf->slurm_user_name)) == REMOVE_USER_ERR) {
+		error("bridge_block_remove_all_users: Something "
+		      "happened removing users from block %s",
+		      bg_record->bg_block_id);
+		return SLURM_ERROR;
+	} else if (rc == REMOVE_USER_NONE && bg_conf->slurm_user_name)
+		rc = bridge_block_add_user(bg_record, bg_conf->slurm_user_name);
+
+	if (rc != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+        try {
+		Block::initiateBoot(bg_record->bg_block_id);
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::initiateBoot",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::DatabaseException& err) {
+		rc = bridge_handle_database_errors("Block::initiateBoot",
+						   err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::initiateBoot",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (...) {
+                error("Boot block request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+	/* Set this here just to make sure we know we are suppose to
+	   be booting.  Just incase the block goes free before we
+	   notice we are configuring.
+	*/
+	bg_record->boot_state = BG_BLOCK_BOOTING;
+#else
+	info("block %s is ready", bg_record->bg_block_id);
+	if (!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+	 	list_push(bg_lists->booted, bg_record);
+	bg_record->state = BG_BLOCK_INITED;
+	last_bg_update = time(NULL);
+#endif
+	return rc;
+}
+
+extern int bridge_block_free(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record || !bg_record->bg_block_id)
+		return SLURM_ERROR;
+
+	info("freeing block %s", bg_record->bg_block_id);
+
+#ifdef HAVE_BG_FILES
+	try {
+		Block::initiateFree(bg_record->bg_block_id);
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::initiateFree",
+						  err.getError().toValue(),
+						  bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::DatabaseException& err2) {
+		rc = bridge_handle_database_errors("Block::initiateFree",
+						   err2.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InputException& err3) {
+		rc = bridge_handle_input_errors("Block::initiateFree",
+						err3.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch(...) {
+                error("Free block request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+#else
+	bg_record->state = BG_BLOCK_FREE;
+#endif
+	return rc;
+}
+
+extern int bridge_block_remove(bg_record_t *bg_record)
+{
+	int rc = SLURM_SUCCESS;
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record || !bg_record->bg_block_id)
+		return SLURM_ERROR;
+
+	info("removing block %s %p", bg_record->bg_block_id, bg_record);
+
+#ifdef HAVE_BG_FILES
+	try {
+		Block::remove(bg_record->bg_block_id);
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::remove",
+						  err.getError().toValue(),
+						  bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::DatabaseException& err) {
+		rc = bridge_handle_database_errors("Block::remove",
+						   err.getError().toValue());
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::remove",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch(...) {
+                error("Remove block request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+#endif
+	return rc;
+}
+
+extern int bridge_block_add_user(bg_record_t *bg_record, char *user_name)
+{
+	int rc = SLURM_SUCCESS;
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record || !bg_record->bg_block_id || !user_name)
+		return SLURM_ERROR;
+
+	info("adding user %s to block %s", user_name, bg_record->bg_block_id);
+#ifdef HAVE_BG_FILES
+        try {
+		Block::addUser(bg_record->bg_block_id, user_name);
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::addUser",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::addUser",
+						  err.getError().toValue(),
+						  bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch(...) {
+                error("Add block user request failed ... continuing.");
+		rc = SLURM_ERROR;
+	}
+#endif
+	return rc;
+}
+
+extern int bridge_block_remove_user(bg_record_t *bg_record, char *user_name)
+{
+	int rc = SLURM_SUCCESS;
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record || !bg_record->bg_block_id || !user_name)
+		return SLURM_ERROR;
+
+	info("removing user %s from block %s",
+	     user_name, bg_record->bg_block_id);
+#ifdef HAVE_BG_FILES
+        try {
+		Block::removeUser(bg_record->bg_block_id, user_name);
+	} catch (const bgsched::InputException& err) {
+		rc = bridge_handle_input_errors("Block::removeUser",
+						err.getError().toValue(),
+						bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch (const bgsched::RuntimeException& err) {
+		rc = bridge_handle_runtime_errors("Block::removeUser",
+						  err.getError().toValue(),
+						  bg_record);
+		if (rc != SLURM_SUCCESS)
+			return rc;
+	} catch(...) {
+                error("Remove block user request failed ... continuing.");
+	        	rc = REMOVE_USER_ERR;
+	}
+#endif
+	return rc;
+}
+
+extern int bridge_block_remove_all_users(bg_record_t *bg_record,
+					 char *user_name)
+{
+	int rc = SLURM_SUCCESS;
+#ifdef HAVE_BG_FILES
+	std::vector<std::string> vec;
+	vector<std::string>::iterator iter;
+#endif
+
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_record || !bg_record->bg_block_id)
+		return SLURM_ERROR;
+
+#ifdef HAVE_BG_FILES
+	try {
+		vec = Block::getUsers(bg_record->bg_block_id);
+	} catch (const bgsched::InputException& err) {
+		bridge_handle_input_errors(
+			"Block::getUsers",
+			err.getError().toValue(), bg_record);
+		return REMOVE_USER_NONE;
+	} catch (const bgsched::RuntimeException& err) {
+		bridge_handle_runtime_errors(
+			"Block::getUsers",
+			err.getError().toValue(), bg_record);
+		return REMOVE_USER_NONE;
+	}
+
+	if (vec.empty())
+		return REMOVE_USER_NONE;
+
+	BOOST_FOREACH(const std::string& user, vec) {
+		if (user_name && (user == user_name))
+			continue;
+		if ((rc = bridge_block_remove_user(bg_record, user_name)
+		     != SLURM_SUCCESS))
+			break;
+	}
+
+#endif
+	return rc;
+}
+
+extern int bridge_blocks_load_curr(List curr_block_list)
+{
+	int rc = SLURM_SUCCESS;
+#ifdef HAVE_BG_FILES
+	Block::Ptrs vec;
+	BlockFilter filter;
+	uid_t my_uid;
+	bg_record_t *bg_record = NULL;
+
+	info("querying the system for existing blocks");
+
+	/* Get the midplane info */
+	filter.setExtendedInfo(true);
+
+	vec = getBlocks(filter, BlockSort::AnyOrder);
+	if (vec.empty()) {
+		debug("No blocks in the current system");
+		return SLURM_SUCCESS;
+	}
+
+	slurm_mutex_lock(&block_state_mutex);
+
+	BOOST_FOREACH(const Block::Ptr &block_ptr, vec) {
+		const char *bg_block_id = block_ptr->getName().c_str();
+		uint16_t state;
+
+		if (strncmp("RMP", bg_block_id, 3))
+			continue;
+
+		/* find BG Block record */
+		if (!(bg_record = find_bg_record_in_list(
+			      curr_block_list, bg_block_id))) {
+			info("%s not found in the state file, adding",
+			     bg_block_id);
+			bg_record = _translate_object_to_block(block_ptr);
+			slurm_list_append(curr_block_list, bg_record);
+		}
+		bg_record->modifying = 1;
+		/* If we are in error we really just want to get the
+		   new state.
+		*/
+		state = bridge_translate_status(
+			block_ptr->getStatus().toValue());
+		if (state == BG_BLOCK_BOOTING)
+			bg_record->boot_state = 1;
+
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+			state |= BG_BLOCK_ERROR_FLAG;
+		bg_record->state = state;
+
+		debug3("Block %s is in state %s",
+		       bg_record->bg_block_id,
+		       bg_block_state_string(bg_record->state));
+
+		bg_record->job_running = NO_JOB_RUNNING;
+
+		/* we are just going to go and destroy this block so
+		   just throw get the name and continue. */
+		if (!bg_recover)
+			continue;
+
+		bg_record->mloaderimage =
+			xstrdup(block_ptr->getMicroLoaderImage().c_str());
+
+
+		/* If a user is on the block this will be filled in */
+		xfree(bg_record->user_name);
+		xfree(bg_record->target_name);
+		if (block_ptr->getUser() != "")
+			bg_record->user_name =
+				xstrdup(block_ptr->getUser().c_str());
+
+		if (!bg_record->user_name)
+			bg_record->user_name =
+				xstrdup(bg_conf->slurm_user_name);
+
+		if (!bg_record->boot_state)
+			bg_record->target_name =
+				xstrdup(bg_conf->slurm_user_name);
+		else
+			bg_record->target_name = xstrdup(bg_record->user_name);
+
+		if (uid_from_string(bg_record->user_name, &my_uid) < 0)
+			error("uid_from_string(%s): %m", bg_record->user_name);
+		else
+			bg_record->user_uid = my_uid;
+	}
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+#endif
+	return rc;
+}
+
+extern void bridge_reset_block_list(List block_list)
+{
+	ListIterator itr = NULL;
+	bg_record_t *bg_record = NULL;
+
+	if (!block_list)
+		return;
+
+	itr = list_iterator_create(block_list);
+	while ((bg_record = (bg_record_t *)list_next(itr))) {
+		info("Queue clearing of users of BG block %s",
+		     bg_record->bg_block_id);
+		_remove_jobs_on_block_and_reset(bg_record->bg_block_id);
+	}
+	list_iterator_destroy(itr);
+}
+
+extern void bridge_block_post_job(char *bg_block_id)
+{
+	_remove_jobs_on_block_and_reset(bg_block_id);
+}
+
+extern int bridge_set_log_params(char *api_file_name, unsigned int level)
+{
+	if (!bridge_init(NULL))
+		return SLURM_ERROR;
+
+	if (!bg_conf->bridge_api_file)
+		return SLURM_SUCCESS;
+
+#ifdef HAVE_BG_FILES
+	// Scheduler APIs use the loggers under ibm.
+	log4cxx::LoggerPtr logger_ptr(log4cxx::Logger::getLogger("ibm"));
+	// Set the pattern for output.
+	log4cxx::LayoutPtr layout_ptr(
+		new log4cxx::PatternLayout(
+			"[%d{yyyy-MM-ddTHH:mm:ss}] %p: %c: %m [%t]%n"));
+	// Set the log file
+	log4cxx::AppenderPtr appender_ptr(
+		new log4cxx::FileAppender(layout_ptr,
+					  bg_conf->bridge_api_file));
+	log4cxx::LevelPtr level_ptr;
+
+	// Get rid of the console appender.
+	logger_ptr->removeAllAppenders();
+
+	switch (level) {
+	case 0:
+		level_ptr = log4cxx::Level::getOff();
+		break;
+	case 1:
+		level_ptr = log4cxx::Level::getFatal();
+		break;
+	case 2:
+		level_ptr = log4cxx::Level::getError();
+		break;
+	case 3:
+		level_ptr = log4cxx::Level::getWarn();
+		break;
+	case 4:
+		level_ptr = log4cxx::Level::getInfo();
+		break;
+	case 5:
+		level_ptr = log4cxx::Level::getDebug();
+		break;
+	case 6:
+		level_ptr = log4cxx::Level::getTrace();
+		break;
+	case 7:
+		level_ptr = log4cxx::Level::getAll();
+		break;
+	default:
+		level_ptr = log4cxx::Level::getDebug();
+		break;
+	}
+	// Now set the level of debug
+	logger_ptr->setLevel(level_ptr);
+	// Add the appender to the ibm logger.
+	logger_ptr->addAppender(appender_ptr);
+
+	// for (int i=1; i<7; i++) {
+	// switch (i) {
+	// case 0:
+	// 	level_ptr = log4cxx::Level::getOff();
+	// 	break;
+	// case 1:
+	// 	level_ptr = log4cxx::Level::getFatal();
+	// 	break;
+	// case 2:
+	// 	level_ptr = log4cxx::Level::getError();
+	// 	break;
+	// case 3:
+	// 	level_ptr = log4cxx::Level::getWarn();
+	// 	break;
+	// case 4:
+	// 	level_ptr = log4cxx::Level::getInfo();
+	// 	break;
+	// case 5:
+	// 	level_ptr = log4cxx::Level::getDebug();
+	// 	break;
+	// case 6:
+	// 	level_ptr = log4cxx::Level::getTrace();
+	// 	break;
+	// case 7:
+	// 	level_ptr = log4cxx::Level::getAll();
+	// 	break;
+	// default:
+	// 	level_ptr = log4cxx::Level::getDebug();
+	// 	break;
+	// }
+	// if (logger_ptr->isEnabledFor(level_ptr))
+	// 	info("we are doing %d", i);
+	// }
+
+#endif
+	return SLURM_SUCCESS;
+}
+
+
diff --git a/src/plugins/select/bluegene/bl_bgq/bridge_status.cc b/src/plugins/select/bluegene/bl_bgq/bridge_status.cc
new file mode 100644
index 000000000..0511dd73f
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/bridge_status.cc
@@ -0,0 +1,550 @@
+/*****************************************************************************\
+ *  bridge_status.cc
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+extern "C" {
+#include "../ba_bgq/block_allocator.h"
+#include "../bg_core.h"
+#include "../bg_status.h"
+#include "src/slurmctld/proc_req.h"
+#include "src/slurmctld/trigger_mgr.h"
+#include "src/slurmctld/locks.h"
+}
+
+#if defined HAVE_BG_FILES
+
+#include <bgsched/bgsched.h>
+#include <bgsched/Block.h>
+#include <bgsched/NodeBoard.h>
+#include <bgsched/Hardware.h>
+#include <bgsched/core/core.h>
+#include <boost/foreach.hpp>
+#include <bgsched/realtime/Client.h>
+#include <bgsched/realtime/ClientConfiguration.h>
+#include <bgsched/realtime/ClientEventListener.h>
+#include <bgsched/realtime/Filter.h>
+#include "bridge_status.h"
+
+#include <iostream>
+
+using namespace std;
+using namespace bgsched;
+using namespace bgsched::core;
+using namespace bgsched::realtime;
+#endif
+
+static bool bridge_status_inited = false;
+
+#if defined HAVE_BG_FILES
+
+/*
+ * Handle compute block status changes as a result of a block allocate.
+ */
+typedef class event_handler: public bgsched::realtime::ClientEventListener {
+public:
+	/*
+	 *  Handle a block state changed real-time event.
+	 */
+	void handleBlockStateChangedRealtimeEvent(
+		const BlockStateChangedEventInfo& event);
+
+	/*
+	 *  Handle a midplane state changed real-time event.
+	 */
+	virtual void handleMidplaneStateChangedRealtimeEvent(
+		const MidplaneStateChangedEventInfo& event);
+
+	/*
+	 * Handle a switch state changed real-time event.
+	 */
+	virtual void handleSwitchStateChangedRealtimeEvent(
+		const SwitchStateChangedEventInfo& event);
+
+	/*
+	 * Handle a node board state changed real-time event.
+	 */
+	virtual void handleNodeBoardStateChangedRealtimeEvent(
+		const NodeBoardStateChangedEventInfo& event);
+
+	// /*
+	//  * Handle a cable state changed real-time event.
+	//  */
+	// virtual void handleCableStateChangedRealtimeEvent(
+	// 	const CableStateChangedEventInfo& event);
+
+} event_handler_t;
+
+static List kill_job_list = NULL;
+static pthread_t real_time_thread;
+static pthread_t poll_thread;
+static bgsched::realtime::Client *rt_client_ptr = NULL;
+pthread_mutex_t rt_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void _handle_bad_switch(int dim, const char *mp_coords,
+			       EnumWrapper<Hardware::State> state)
+{
+	char bg_down_node[128];
+
+	assert(mp_coords);
+
+	snprintf(bg_down_node, sizeof(bg_down_node), "%s%s",
+		 bg_conf->slurm_node_prefix, mp_coords);
+
+	if (!node_already_down(bg_down_node)) {
+		error("Switch at dim '%d' on Midplane %s, state went to %d, "
+		      "marking midplane down.",
+		      dim, bg_down_node, state.toValue());
+		slurm_drain_nodes(bg_down_node,
+				  (char *)"select_bluegene: MMCS switch not UP",
+				  slurm_get_slurm_user_id());
+	}
+}
+
+static void _handle_bad_nodeboard(const char *nb_name, const char* mp_coords,
+				  EnumWrapper<Hardware::State> state)
+{
+	char bg_down_node[128];
+	int io_start;
+
+	assert(nb_name);
+	assert(mp_coords);
+
+	/* From the first nodecard id we can figure
+	   out where to start from with the alloc of ionodes.
+	*/
+	io_start = atoi((char*)nb_name+1);
+	io_start *= bg_conf->io_ratio;
+
+	/* On small systems with less than a midplane the
+	   database may see the nodecards there but in missing
+	   state.  To avoid getting a bunch of warnings here just
+	   skip over the ones missing.
+	*/
+	if (io_start >= bg_conf->ionodes_per_mp) {
+		if (state == Hardware::Missing)
+			debug3("Nodeboard %s is missing",
+			       nb_name);
+		else
+			error("We don't have the system configured "
+			      "for this nodecard %s, we only have "
+			      "%d ionodes and this starts at %d",
+			      nb_name, bg_conf->ionodes_per_mp, io_start);
+		return;
+	}
+
+	/* if (!ionode_bitmap) */
+	/* 	ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp); */
+	/* info("setting %s start %d of %d", */
+	/*      nb_name,  io_start, bg_conf->ionodes_per_mp); */
+	/* bit_nset(ionode_bitmap, io_start, io_start+io_cnt); */
+
+	/* we have to handle each nodecard separately to make
+	   sure we don't create holes in the system */
+	snprintf(bg_down_node, sizeof(bg_down_node), "%s%s",
+		 bg_conf->slurm_node_prefix, mp_coords);
+
+	if (down_nodecard(bg_down_node, io_start, 0) == SLURM_SUCCESS)
+		debug("nodeboard %s on %s is in an error state (%d)",
+		      nb_name, bg_down_node, state.toValue());
+	else
+		debug2("nodeboard %s on %s is in an error state (%d), "
+		       "but error was returned when trying to make it so",
+		       nb_name, bg_down_node, state.toValue());
+	return;
+}
+
+void event_handler::handleBlockStateChangedRealtimeEvent(
+        const BlockStateChangedEventInfo& event)
+{
+	bg_record_t *bg_record = NULL;
+	const char *bg_block_id = event.getBlockName().c_str();
+
+	if (!bg_lists->main)
+		return;
+
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record = find_bg_record_in_list(bg_lists->main, bg_block_id);
+	if (!bg_record) {
+		slurm_mutex_unlock(&block_state_mutex);
+		info("bridge_status: bg_record %s isn't in the main list",
+		     bg_block_id);
+		return;
+	}
+
+	bg_status_update_block_state(bg_record,
+				     bridge_translate_status(event.getStatus()),
+				     kill_job_list);
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+	bg_status_process_kill_job_list(kill_job_list);
+
+	last_bg_update = time(NULL);
+}
+
+void event_handler::handleMidplaneStateChangedRealtimeEvent(
+	const MidplaneStateChangedEventInfo& event)
+{
+//	const char *midplane = event.getMidplaneId().c_str();
+
+}
+
+void event_handler::handleSwitchStateChangedRealtimeEvent(
+	const SwitchStateChangedEventInfo& event)
+{
+	const char *mp_name = event.getMidplaneLocation().c_str();
+	int dim = event.getDimension();
+	ba_mp_t *ba_mp = loc2ba_mp(mp_name);
+
+	if (!ba_mp) {
+		error("Switch in dim '%d' on Midplane %s, state "
+		      "went from %d to %d, but is not in our system",
+		      dim, mp_name,
+		      event.getPreviousState(),
+		      event.getState());
+	}
+
+	if (event.getState() == Hardware::Available) {
+		/* Don't do anything, wait for admin to fix things,
+		 * just note things are better. */
+
+		info("Switch in dim '%u' on Midplane %s, "
+		     "has returned to service",
+		     dim, mp_name);
+		return;
+	}
+
+	/* Else mark the midplane down */
+	_handle_bad_switch(dim, ba_mp->coord_str, event.getState());
+
+	return;
+}
+
+void event_handler::handleNodeBoardStateChangedRealtimeEvent(
+	const NodeBoardStateChangedEventInfo& event)
+{
+	const char *mp_name = event.getLocation().substr(0,6).c_str();
+	const char *nb_name = event.getLocation().substr(7,3).c_str();
+	ba_mp_t *ba_mp = loc2ba_mp(mp_name);
+
+	if (!ba_mp) {
+		error("Nodeboard '%s' on Midplane %s, state went from %d to %d,"
+		      "but is not in our system",
+		      nb_name, mp_name,
+		      event.getPreviousState(),
+		      event.getState());
+	}
+
+	if (event.getState() == Hardware::Available) {
+		/* Don't do anything, wait for admin to fix things,
+		 * just note things are better. */
+
+		info("Nodeboard '%s' on Midplane %s(%s), "
+		     "has returned to service",
+		     nb_name, mp_name, ba_mp->coord_str);
+		return;
+	}
+
+	_handle_bad_nodeboard(nb_name, ba_mp->coord_str, event.getState());
+
+	return;
+}
+
+static int _real_time_connect(void)
+{
+	int rc = SLURM_ERROR;
+	int count = 0;
+	int sleep_value = 5;
+
+	while (bridge_status_inited && (rc != SLURM_SUCCESS)) {
+		try {
+			rt_client_ptr->connect();
+			rc = SLURM_SUCCESS;
+		} catch (...) {
+			rc = SLURM_ERROR;
+			error("couldn't connect to the real_time server, "
+			      "trying for %d seconds.", count * sleep_value);
+			sleep(sleep_value);
+			count++;
+		}
+	}
+
+	return rc;
+}
+
+static void *_real_time(void *no_data)
+{
+	event_handler_t event_hand;
+	int rc = SLURM_SUCCESS;
+	bool failed = false;
+	Filter::BlockStatuses block_statuses;
+  	Filter rt_filter(Filter::createNone());
+
+	rt_filter.setNodeBoards(true);
+	rt_filter.setSwitches(true);
+	rt_filter.setBlocks(true);
+
+	block_statuses.insert(Block::Free);
+	block_statuses.insert(Block::Booting);
+	block_statuses.insert(Block::Initialized);
+	block_statuses.insert(Block::Terminating);
+	rt_filter.setBlockStatuses(&block_statuses);
+
+ 	// rt_filter.get().setMidplanes(true);
+ 	// rt_filter.get().setCables(true);
+
+	rt_client_ptr->addListener(event_hand);
+
+	rc = _real_time_connect();
+
+	while (bridge_status_inited) {
+		bgsched::realtime::Filter::Id filter_id; // Assigned filter id
+
+		slurm_mutex_lock(&rt_mutex);
+		if (!bridge_status_inited) {
+			slurm_mutex_unlock(&rt_mutex);
+			break;
+		}
+
+		if (rc == SLURM_SUCCESS) {
+			rt_client_ptr->setFilter(rt_filter, &filter_id, NULL);
+			rt_client_ptr->requestUpdates(NULL);
+			rt_client_ptr->receiveMessages(NULL, NULL, &failed);
+		} else
+			failed = true;
+
+		slurm_mutex_unlock(&rt_mutex);
+
+		if (bridge_status_inited && failed) {
+			error("Disconnected from real-time events. "
+			      "Will try to reconnect.");
+			rc = _real_time_connect();
+			if (rc == SLURM_SUCCESS) {
+				info("real-time server connected again");
+				failed = false;
+			}
+		}
+	}
+	return NULL;
+}
+
+static void _do_block_poll(void)
+{
+#if defined HAVE_BG_FILES
+	bg_record_t *bg_record;
+	ListIterator itr;
+	int updated = 0;
+
+	if (!bg_lists->main)
+		return;
+
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_lists->main);
+	while ((bg_record = (bg_record_t *) list_next(itr))) {
+		BlockFilter filter;
+		Block::Ptrs vec;
+
+		if ((bg_record->magic != BLOCK_MAGIC)
+		    || !bg_record->bg_block_id)
+			continue;
+
+		filter.setName(string(bg_record->bg_block_id));
+
+		vec = getBlocks(filter, BlockSort::AnyOrder);
+		if (vec.empty()) {
+			debug("block %s not found, removing "
+			      "from slurm", bg_record->bg_block_id);
+			list_remove(itr);
+			destroy_bg_record(bg_record);
+			continue;
+		}
+		const Block::Ptr &block_ptr = *(vec.begin());
+
+		if (bg_status_update_block_state(
+			    bg_record,
+			    bridge_translate_status(
+				    block_ptr->getStatus().toValue()),
+			    kill_job_list))
+			updated = 1;
+	}
+	slurm_mutex_unlock(&block_state_mutex);
+
+	bg_status_process_kill_job_list(kill_job_list);
+
+	if (updated == 1)
+		last_bg_update = time(NULL);
+
+#endif
+}
+
+static void _handle_midplane_update(ComputeHardware::ConstPtr bgq,
+				    ba_mp_t *ba_mp)
+{
+	Coordinates::Coordinates coords(ba_mp->coord[A], ba_mp->coord[X],
+					ba_mp->coord[Y], ba_mp->coord[Z]);
+	Midplane::ConstPtr mp_ptr = bgq->getMidplane(coords);
+	int i;
+	Dimension dim;
+
+	for (i=0; i<16; i++) {
+		NodeBoard::ConstPtr nodeboard = mp_ptr->getNodeBoard(i);
+		if (nodeboard->getState() != Hardware::Available)
+			_handle_bad_nodeboard(
+				nodeboard->getLocation().substr(7,3).c_str(),
+				ba_mp->coord_str, nodeboard->getState());
+	}
+
+	for (dim=Dimension::A; dim<=Dimension::D; dim++) {
+		Switch::ConstPtr my_switch = mp_ptr->getSwitch(dim);
+		if (my_switch->getState() != Hardware::Available)
+			_handle_bad_switch(dim,
+					   ba_mp->coord_str,
+					   my_switch->getState());
+	}
+}
+
+static void _do_hardware_poll(void)
+{
+#if defined HAVE_BG_FILES
+	if (!ba_main_grid)
+		return;
+
+	ComputeHardware::ConstPtr bgq = getComputeHardware();
+
+	for (int a = 0; a < DIM_SIZE[A]; a++)
+		for (int x = 0; x < DIM_SIZE[X]; x++)
+			for (int y = 0; y < DIM_SIZE[Y]; y++)
+				for (int z = 0; z < DIM_SIZE[Z]; z++)
+					_handle_midplane_update(
+						bgq, &ba_main_grid[a][x][y][z]);
+#endif
+}
+static void *_poll(void *no_data)
+{
+	event_handler_t event_hand;
+	time_t last_ran = time(NULL);
+	time_t curr_time;
+
+	while (bridge_status_inited) {
+		//debug("polling waiting until realtime dies");
+		slurm_mutex_lock(&rt_mutex);
+		if (!bridge_status_inited) {
+			slurm_mutex_unlock(&rt_mutex);
+			break;
+		}
+		//debug("polling taking over, realtime is dead");
+		curr_time = time(NULL);
+		_do_block_poll();
+		/* only do every 30 seconds */
+		if ((curr_time - 30) >= last_ran)
+			_do_hardware_poll();
+
+		slurm_mutex_unlock(&rt_mutex);
+		last_ran = time(NULL);
+		sleep(1);
+	}
+	return NULL;
+}
+
+#endif
+
+extern int bridge_status_init(void)
+{
+	if (bridge_status_inited)
+		return SLURM_ERROR;
+
+	bridge_status_inited = true;
+
+#if defined HAVE_BG_FILES
+	pthread_attr_t thread_attr;
+
+	if (!kill_job_list)
+		kill_job_list = bg_status_create_kill_job_list();
+
+	rt_client_ptr = new(bgsched::realtime::Client);
+
+	slurm_attr_init(&thread_attr);
+	if (pthread_create(&real_time_thread, &thread_attr, _real_time, NULL))
+		fatal("pthread_create error %m");
+	slurm_attr_init(&thread_attr);
+	if (pthread_create(&poll_thread, &thread_attr, _poll, NULL))
+		fatal("pthread_create error %m");
+	slurm_attr_destroy(&thread_attr);
+#endif
+	return SLURM_SUCCESS;
+}
+
+extern int bridge_status_fini(void)
+{
+	if (!bridge_status_inited)
+		return SLURM_ERROR;
+
+	bridge_status_inited = false;
+#if defined HAVE_BG_FILES
+	/* make the rt connection end. */
+	rt_client_ptr->disconnect();
+
+	if (kill_job_list) {
+		list_destroy(kill_job_list);
+		kill_job_list = NULL;
+	}
+
+	if (real_time_thread) {
+		pthread_join(real_time_thread, NULL);
+		real_time_thread = 0;
+	}
+
+	if (poll_thread) {
+		pthread_join(poll_thread, NULL);
+		poll_thread = 0;
+	}
+	pthread_mutex_destroy(&rt_mutex);
+	delete(rt_client_ptr);
+#endif
+	return SLURM_SUCCESS;
+}
+
+/*
+ * This could potentially lock the node lock in the slurmctld with
+ * slurm_drain_node, so if slurmctld_locked is called we will call the
+ * drainning function without locking the lock again.
+ */
+extern int bridge_block_check_mp_states(char *bg_block_id,
+					bool slurmctld_locked)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/select/bluegene/bl_bgq/bridge_status.h b/src/plugins/select/bluegene/bl_bgq/bridge_status.h
new file mode 100644
index 000000000..dcc8c5f23
--- /dev/null
+++ b/src/plugins/select/bluegene/bl_bgq/bridge_status.h
@@ -0,0 +1,47 @@
+/*****************************************************************************\
+ *  bridge_status.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BRIDGE_STATUS_H_
+#define _BRIDGE_STATUS_H_
+
+#include "bridge_helper.h"
+
+extern int bridge_status_init(void);
+extern int bridge_status_fini(void);
+
+#endif
diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.am b/src/plugins/select/bluegene/block_allocator/Makefile.am
deleted file mode 100644
index 3a1bf6708..000000000
--- a/src/plugins/select/bluegene/block_allocator/Makefile.am
+++ /dev/null
@@ -1,29 +0,0 @@
-# Makefile.am for bluegene_block_allocator
-
-AUTOMAKE_OPTIONS = foreign
-CLEANFILES = core.*
-
-INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
-
-# making a .la
-
-noinst_LTLIBRARIES = libbluegene_block_allocator.la
-libbluegene_block_allocator_la_SOURCES =    \
-	block_allocator.c bridge_linker.c block_allocator.h bridge_linker.h
-
-libbluegene_block_allocator_la_LDFLAGS        = \
-	$(LIB_LDFLAGS) -lm
-
-if BLUEGENE_LOADED
-
-#to build the debug executable
-noinst_PROGRAMS = wire_test
-
-wire_testSOURCES = wire_test.c block_allocator.h
-
-wire_test_LDADD = libbluegene_block_allocator.la \
-	$(top_builddir)/src/api/libslurm.o -ldl
-
-wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
-
-endif
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
deleted file mode 100644
index f612bade9..000000000
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ /dev/null
@@ -1,530 +0,0 @@
-/*****************************************************************************\
- *  block_allocator.h
- *
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef _BLOCK_ALLOCATOR_H_
-#define _BLOCK_ALLOCATOR_H_
-
-#include "bridge_linker.h"
-
-// #define DEBUG_PA
-#define BIG_MAX 9999
-#define BUFSIZE 4096
-
-#define NUM_PORTS_PER_NODE 6
-
-#define PASS_DENY_X 0x0001
-#define PASS_DENY_Y 0x0002
-#define PASS_DENY_Z 0x0004
-#define PASS_DENY_ALL 0x00ff
-
-#define PASS_FOUND_X 0x0100
-#define PASS_FOUND_Y 0x0200
-#define PASS_FOUND_Z 0x0400
-#define PASS_FOUND_ANY 0xff00
-
-extern bool _initialized;
-
-enum {X, Y, Z};
-
-/* */
-
-/*
- * structure that holds switch path information for finding the wiring
- * path without setting the configuration.
- *
- * - dim      - Which Axis it is on
- * - geometry - node location
- * - in       - ingress port.
- * - out      - egress port.
- *
- */
-typedef struct {
-	int dim;
-	uint16_t geometry[HIGHEST_DIMENSIONS];
-	int in;
-	int out;
-} ba_path_switch_t;
-
-/*
- * structure that holds the configuration settings for each request
- */
-typedef struct {
-	bitstr_t *avail_node_bitmap;   /* pointer to available nodes */
-#ifdef HAVE_BGL
-	char *blrtsimage;              /* BlrtsImage for this block */
-#endif
-	int conn_type;                 /* mesh, torus, or small */
-	bool elongate;                 /* whether allow elongation or not */
-	int elongate_count;            /* place in elongate_geos list
-					  we are at */
-	List elongate_geos;            /* list of possible shapes of
-					  blocks. contains int* ptrs */
-	uint16_t geometry[HIGHEST_DIMENSIONS]; /* size of block in geometry */
-	char *linuximage;              /* LinuxImage for this block */
-	char *mloaderimage;            /* mloaderImage for this block */
-	uint16_t deny_pass;            /* PASSTHROUGH_FOUND is set if there are
-					  passthroughs in the block
-					  created you can deny
-					  passthroughs by setting the
-					  appropriate bits*/
-	int procs;                     /* Number of Real processors in
-					  block */
-	char *ramdiskimage;            /* RamDiskImage for this block */
-	bool rotate;                   /* whether allow elongation or not */
-	int rotate_count;              /* number of times rotated */
-	char *save_name;               /* name of blocks in midplanes */
-	int size;                      /* count of midplanes in block */
-	int small32;                   /* number of blocks using 32 cnodes in
-					* block, only used for small
-					* block creation */
-	int small128;                  /* number of blocks using 128 cnodes in
-					* block, only used for small
-					* block creation */
-#ifndef HAVE_BGL
-	int small16;                   /* number of blocks using 16 cnodes in
-					* block, only used for small
-					* block creation */
-	int small64;                   /* number of blocks using 64 cnodes in
-					* block, only used for small
-					* block creation */
-	int small256;                  /* number of blocks using 256 cnodes in
-					* block, only used for small
-					* block creation */
-#endif
-	uint16_t start[HIGHEST_DIMENSIONS]; /* where to start creation of
-					    block */
-	int start_req;                 /* state there was a start
-					  request */
-} ba_request_t;
-
-/* structure filled in from reading bluegene.conf file for block
- * creation */
-typedef struct {
-	char *block;                   /* Hostlist of midplanes in the
-					  block */
-	int conn_type;                 /* mesh, torus, or small */
-#ifdef HAVE_BGL
-	char *blrtsimage;              /* BlrtsImage for this block */
-#endif
-	char *linuximage;              /* LinuxImage for this block */
-	char *mloaderimage;            /* mloaderImage for this block */
-	char *ramdiskimage;            /* RamDiskImage for this block */
-	uint16_t small32;                   /* number of blocks using 32 cnodes in
-					* block, only used for small
-					* block creation */
-	uint16_t small128;             /* number of blocks using 128 cnodes in
-					* block, only used for small
-					* block creation */
-#ifndef HAVE_BGL
-	uint16_t small16;              /* number of blocks using 16 cnodes in
-					* block, only used for small
-					* block creation */
-	uint16_t small64;                   /* number of blocks using 64 cnodes in
-					* block, only used for small
-					* block creation */
-	uint16_t small256;             /* number of blocks using 256 cnodes in
-					* block, only used for small
-					* block creation */
-#endif
-} blockreq_t;
-
-/* structure filled in from reading bluegene.conf file for specifying
- * images */
-typedef struct {
-	bool def;                      /* Whether image is the default
-					  image or not */
-	List groups;                   /* list of groups able to use
-					* the image contains
-					* image_group_t's */
-	char *name;                    /* Name of image */
-} image_t;
-
-typedef struct {
-	char *name;
-	gid_t gid;
-} image_group_t;
-
-/*
- * structure that holds the configuration settings for each connection
- *
- * - node_tar - coords of where the next hop is externally
- *              interanlly - nothing.
- *              exteranlly - location of next hop.
- * - port_tar - which port the connection is going to
- *              interanlly - always going to something within the switch.
- *              exteranlly - always going to the next hop outside the switch.
- * - used     - weather or not the connection is used.
- *
- */
-typedef struct
-{
-	/* target label */
-	uint16_t node_tar[HIGHEST_DIMENSIONS];
-	/* target port */
-	int port_tar;
-	bool used;
-} ba_connection_t;
-
-/*
- * structure that holds the configuration settings for each switch
- * which pretty much means the wiring information
- * - int_wire - keeps details of where the wires are attached
- *   interanlly.
- * - ext_wire - keeps details of where the wires are attached
- *   exteranlly.
- *
- */
-typedef struct
-{
-	ba_connection_t int_wire[NUM_PORTS_PER_NODE];
-	ba_connection_t ext_wire[NUM_PORTS_PER_NODE];
-} ba_switch_t;
-
-/*
- * ba_node_t: node within the allocation system.
- */
-typedef struct {
-	/* a switch for each dimensions */
-	ba_switch_t axis_switch[HIGHEST_DIMENSIONS];
-	/* coordinates of midplane */
-	uint16_t coord[HIGHEST_DIMENSIONS];
-	/* color of letter used in smap */
-	int color;
-	/* midplane index used for easy look up of the miplane */
-	int index;
-	/* letter used in smap */
-	char letter;
-//	int phys_x;	// no longer needed
-	int state;
-	/* set if using this midplane in a block */
-	uint16_t used;
-} ba_node_t;
-
-typedef struct {
-	/* total number of procs on the system */
-	int num_of_proc;
-
-	/* made to hold info about a system, which right now is only a
-	 * grid of ba_nodes*/
-	ba_node_t ***grid;
-} ba_system_t;
-
-/* Used to Keep track of where the Base Blocks are at all times
-   Rack and Midplane is the bp_id and XYZ is the coords.
-*/
-typedef struct {
-	char *bp_id;
-	uint16_t coord[HIGHEST_DIMENSIONS];
-} ba_bp_map_t;
-
-/* Global */
-extern my_bluegene_t *bg;
-extern List bp_map_list; /* list used for conversion from XYZ to Rack
-			  * midplane */
-extern char letters[62]; /* complete list of letters used in smap */
-extern char colors[6]; /* index into colors used for smap */
-extern uint16_t DIM_SIZE[HIGHEST_DIMENSIONS]; /* how many midplanes in
-					  * each dimension */
-extern s_p_options_t bg_conf_file_options[]; /* used to parse the
-					      * bluegene.conf file. */
-extern uint16_t ba_deny_pass;
-extern ba_system_t *ba_system_ptr;
-
-/* must xfree return of this */
-extern char *ba_passthroughs_string(uint16_t passthrough);
-
-/* Parse a block request from the bluegene.conf file */
-extern int parse_blockreq(void **dest, slurm_parser_enum_t type,
-			  const char *key, const char *value,
-			  const char *line, char **leftover);
-
-extern void destroy_blockreq(void *ptr);
-
-/* Parse imagine information from blugene.conf file */
-extern int parse_image(void **dest, slurm_parser_enum_t type,
-		       const char *key, const char *value,
-		       const char *line, char **leftover);
-
-extern void destroy_image_group_list(void *ptr);
-extern void destroy_image(void *ptr);
-extern void destroy_ba_node(void *ptr);
-
-/*
- * create a block request.  Note that if the geometry is given,
- * then size is ignored.  If elongate is true, the algorithm will try
- * to fit that a block of cubic shape and then it will try other
- * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1).
- *
- * IN/OUT - ba_request: structure to allocate and fill in.
- *
- * ALL below IN's need to be set within the ba_request before the call
- * if you want them to be used.
- * ALL below OUT's are set and returned within the ba_request.
- * IN - avail_node_bitmap: bitmap of usable midplanes.
- * IN - blrtsimage: BlrtsImage for this block if not default
- * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
- * IN - elongate: if true, will try to fit different geometries of
- *      same size requests
- * IN/OUT - geometry: requested/returned geometry of block
- * IN - linuximage: LinuxImage for this block if not default
- * IN - mloaderimage: MLoaderImage for this block if not default
- * OUT - passthroughs: if there were passthroughs used in the
- *       generation of the block.
- * IN - procs: Number of real processors requested
- * IN - RamDiskimage: RamDiskImage for this block if not default
- * IN - rotate: if true, allows rotation of block during fit
- * OUT - save_name: hostlist of midplanes used in block
- * IN/OUT - size: requested/returned count of midplanes in block
- * IN - start: geo location of where to start the allocation
- * IN - start_req: if set use the start variable to start at
- * return success of allocation/validation of params
- */
-extern int new_ba_request(ba_request_t* ba_request);
-
-/*
- * delete a block request
- */
-extern void delete_ba_request(void *arg);
-
-/*
- * empty a list that we don't want to destroy the memory of the
- * elements always returns 1
-*/
-extern int empty_null_destroy_list(void *arg, void *key);
-
-/*
- * print a block request
- */
-extern void print_ba_request(ba_request_t* ba_request);
-
-/*
- * Initialize internal structures by either reading previous block
- * configurations from a file or by running the graph solver.
- *
- * IN: node_info_msg_t * can be null,
- *     should be from slurm_load_node().
- * IN: load_bridge: whiether or not to get bridge information
- *
- * return: void.
- */
-extern void ba_init(node_info_msg_t *node_info_ptr, bool load_bridge);
-
-/* If emulating a system set up a known configuration for wires in a
- * system of the size given.
- * If a real bluegene system, query the system and get all wiring
- * information of the system.
- */
-extern void init_wires();
-
-/*
- * destroy all the internal (global) data structs.
- */
-extern void ba_fini();
-extern void set_ba_debug_flags(uint32_t debug_flags);
-
-/*
- * set the node in the internal configuration as in, or not in use,
- * along with the current state of the node.
- *
- * IN ba_node: ba_node_t to update state
- * IN state: new state of ba_node_t
- */
-extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state);
-
-/*
- * copy info from a ba_node, a direct memcpy of the ba_node_t
- *
- * IN ba_node: node to be copied
- * Returned ba_node_t *: copied info must be freed with destroy_ba_node
- */
-extern ba_node_t *ba_copy_node(ba_node_t *ba_node);
-
-/*
- * copy the path of the nodes given
- *
- * IN nodes List of ba_node_t *'s: nodes to be copied
- * OUT dest_nodes List of ba_node_t *'s: filled in list of nodes
- * wiring.
- * Return on success SLURM_SUCCESS, on error SLURM_ERROR
- */
-extern int copy_node_path(List nodes, List *dest_nodes);
-
-/*
- * Try to allocate a block.
- *
- * IN - ba_request: allocation request
- * OUT - results: List of results of the allocation request.  Each
- * list entry will be a coordinate.  allocate_block will create the
- * list, but the caller must destroy it.
- *
- * return: success or error of request
- */
-extern int allocate_block(ba_request_t* ba_request, List results);
-
-/*
- * Admin wants to remove a previous allocation.
- * will allow Admin to delete a previous allocation retrival by letter code.
- */
-extern int remove_block(List nodes, int new_count, int conn_type);
-
-/*
- * Admin wants to change something about a previous allocation.
- * will allow Admin to change previous allocation by giving the
- * letter code for the allocation and the variable to alter
- * (Not currently used in the system, update this if it is)
- */
-extern int alter_block(List nodes, int conn_type);
-
-/*
- * After a block is deleted or altered following allocations must
- * be redone to make sure correct path will be used in the real system
- * (Not currently used in the system, update this if it is)
- */
-extern int redo_block(List nodes, uint16_t *geo, int conn_type, int new_count);
-
-/*
- * Used to set a block into a virtual system.  The system can be
- * cleared first and this function sets all the wires and midplanes
- * used in the nodelist given.  The nodelist is a list of ba_node_t's
- * that are already set up.  This is very handly to test if there are
- * any passthroughs used by one block when adding another block that
- * also uses those wires, and neither use any overlapping
- * midplanes. Doing a simple bitmap & will not reveal this.
- *
- * Returns SLURM_SUCCESS if nodelist fits into system without
- * conflict, and SLURM_ERROR if nodelist conflicts with something
- * already in the system.
- */
-extern int check_and_set_node_list(List nodes);
-
-/*
- * Used to find, and set up midplanes and the wires in the virtual
- * system and return them in List results
- *
- * IN/OUT results - a list with a NULL destroyer filled in with
- *        midplanes and wires set to create the block with the api. If
- *        only interested in the hostlist NULL can be excepted also.
- * IN start - where to start the allocation.
- * IN geometry - the requested geometry of the block.
- * IN conn_type - mesh, torus, or small.
- * RET char * - hostlist of midplanes results represent must be
- *     xfreed.  NULL on failure
- */
-extern char *set_bg_block(List results, uint16_t *start,
-			  uint16_t *geometry, int conn_type);
-
-/*
- * Resets the virtual system to a virgin state.  If track_down_nodes is set
- * then those midplanes are not set to idle, but kept in a down state.
- */
-extern int reset_ba_system(bool track_down_nodes);
-
-/*
- * Used to set all midplanes in a special used state except the ones
- * we are able to use in a new allocation.
- *
- * IN: hostlist of midplanes we do not want
- * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
- *
- * Note: Need to call reset_all_removed_bps before starting another
- * allocation attempt after
- */
-extern int removable_set_bps(char *bps);
-
-/*
- * Resets the virtual system to the pervious state before calling
- * removable_set_bps, or set_all_bps_except.
- */
-extern int reset_all_removed_bps();
-
-/*
- * IN: hostlist of midplanes we do not want
- * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
- *
- * Need to call rest_all_removed_bps before starting another
- * allocation attempt.  If possible use removable_set_bps since it is
- * faster. It does basically the opposite of this function. If you
- * have to come up with this list though it is faster to use this
- * function than if you have to call bitmap2node_name since that is slow.
- */
-extern int set_all_bps_except(char *bps);
-
-/*
- * set values of every grid point (used in smap)
- */
-extern void init_grid(node_info_msg_t *node_info_ptr);
-
-/*
- * Convert a BG API error code to a string
- * IN inx - error code from any of the BG Bridge APIs
- * RET - string describing the error condition
- */
-extern char *bg_err_str(status_t inx);
-
-/*
- * Set up the map for resolving
- */
-extern int set_bp_map(void);
-
-/*
- * find a base blocks bg location based on Rack Midplane name R000 not R00-M0
- */
-extern uint16_t *find_bp_loc(char* bp_id);
-
-/*
- * find a rack/midplace location based on XYZ coords
- */
-extern char *find_bp_rack_mid(char* xyz);
-
-/*
- * set the used wires in the virtual system for a block from the real system
- */
-extern int load_block_wiring(char *bg_block_id);
-
-/*
- * get the used wires for a block out of the database and return the
- * node list
- */
-extern List get_and_set_block_wiring(char *bg_block_id,
-				     rm_partition_t *block_ptr);
-
-/* make sure a node is in the system return 1 if it is 0 if not */
-extern int validate_coord(uint16_t *coord);
-
-
-#endif /* _BLOCK_ALLOCATOR_H_ */
diff --git a/src/plugins/select/bluegene/block_allocator/bridge_linker.c b/src/plugins/select/bluegene/block_allocator/bridge_linker.c
deleted file mode 100644
index 5d7a4f942..000000000
--- a/src/plugins/select/bluegene/block_allocator/bridge_linker.c
+++ /dev/null
@@ -1,750 +0,0 @@
-/*****************************************************************************\
- *  bridge_linker.c
- *
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-
-#include "bridge_linker.h"
-
-#ifdef HAVE_BG_FILES
-typedef struct {
-	/* all the rm functions */
-	status_t (*set_serial)(const rm_serial_t serial);
-	status_t (*get_bg)(my_bluegene_t **bg);
-	status_t (*add_partition)(rm_partition_t *partition);
-	status_t (*get_partition)(pm_partition_id_t pid,
-				  rm_partition_t **partition);
-	status_t (*get_partition_info)(pm_partition_id_t pid,
-				       rm_partition_t **partition);
-	status_t (*modify_partition)(pm_partition_id_t pid,
-				     enum rm_modify_op op, const void *data);
-	status_t (*set_part_owner)(pm_partition_id_t pid, const char *name);
-	status_t (*add_part_user)(pm_partition_id_t pid, const char *name);
-	status_t (*remove_part_user)(pm_partition_id_t pid, const char *name);
-	status_t (*remove_partition)(pm_partition_id_t pid);
-	status_t (*get_partitions)(rm_partition_state_flag_t flag,
-				   rm_partition_list_t **part_list);
-	status_t (*get_partitions_info)(rm_partition_state_flag_t flag,
-					rm_partition_list_t **part_list);
-	status_t (*get_job)(db_job_id_t dbJobId, rm_job_t **job);
-	status_t (*get_jobs)(rm_job_state_flag_t flag, rm_job_list_t **jobs);
-	status_t (*remove_job)(db_job_id_t jid);
-	status_t (*get_nodecards)(rm_bp_id_t bpid,
-				  rm_nodecard_list_t **nc_list);
-	status_t (*new_nodecard)(rm_nodecard_t **nodecard);
-	status_t (*free_nodecard)(rm_nodecard_t *nodecard);
-#ifndef HAVE_BGL
-	status_t (*new_ionode)(rm_ionode_t **ionode);
-	status_t (*free_ionode)(rm_ionode_t *ionode);
-#endif
-	status_t (*new_partition)(rm_partition_t **partition);
-	status_t (*free_partition)(rm_partition_t *partition);
-	status_t (*free_job)(rm_job_t *job);
-	status_t (*free_bg)(my_bluegene_t *bg);
-	status_t (*free_partition_list)(rm_partition_list_t *part_list);
-	status_t (*free_job_list)(rm_job_list_t *job_list);
-	status_t (*free_nodecard_list)(rm_nodecard_list_t *nc_list);
-	status_t (*get_data)(rm_element_t* element,
-			     enum rm_specification field, void *data);
-	status_t (*set_data)(rm_element_t* element,
-			     enum rm_specification field, void *data);
-
-	/* all the jm functions */
-	status_t (*signal_job)(db_job_id_t jid, rm_signal_t sig);
-	status_t (*cancel_job)(db_job_id_t jid);
-
-	/* all the pm functions */
-	status_t (*create_partition)(pm_partition_id_t pid);
-#ifndef HAVE_BGL
-	status_t (*reboot_partition)(pm_partition_id_t pid);
-#endif
-	status_t (*destroy_partition)(pm_partition_id_t pid);
-
-	/* set say message stuff */
-	void (*set_log_params)(FILE * stream, unsigned int level);
-
-} bridge_api_t;
-
-pthread_mutex_t api_file_mutex = PTHREAD_MUTEX_INITIALIZER;
-bridge_api_t bridge_api;
-bool initialized = false;
-bool have_db2 = true;
-void *handle = NULL;
-
-int _get_syms(int n_syms, const char *names[], void *ptrs[])
-{
-        int i, count;
-#ifdef HAVE_BGL
-#ifdef BG_DB2_SO
-	void *db_handle = NULL;
-	db_handle = dlopen (BG_DB2_SO, RTLD_LAZY);
-	if (!db_handle) {
-		have_db2 = false;
-		debug("%s", dlerror());
-		return 0;
-	}
-	dlclose(db_handle);
-#else
-	fatal("No BG_DB2_SO is set, can't run.");
-#endif
-#endif // HAVE_BGL
-
-#ifdef BG_BRIDGE_SO
-	handle = dlopen (BG_BRIDGE_SO, RTLD_LAZY);
-	if (!handle) {
-		have_db2 = false;
-		debug("%s", dlerror());
-		return 0;
-	}
-#else
-	fatal("No BG_BRIDGE_SO is set, can't run.");
-#endif
-
-	dlerror();    /* Clear any existing error */
-        count = 0;
-        for ( i = 0; i < n_syms; ++i ) {
-                ptrs[i] = dlsym(handle, names[i]);
-                if (ptrs[i]) {
-			++count;
-		} else
-			fatal("Can't find %s in api", names[i]);
-	}
-        return count;
-}
-
-
-
-extern int bridge_init()
-{
-#ifdef HAVE_BGL
-	static const char *syms[] = {
-		"rm_set_serial",
-		"rm_get_BGL",
-		"rm_add_partition",
-		"rm_get_partition",
-		"rm_get_partition_info",
-		"rm_modify_partition",
-		"rm_set_part_owner",
-		"rm_add_part_user",
-		"rm_remove_part_user",
-		"rm_remove_partition",
-		"rm_get_partitions",
-		"rm_get_partitions_info",
-		"rm_get_job",
-		"rm_get_jobs",
-		"rm_remove_job",
-		"rm_get_nodecards",
-		"rm_new_nodecard",
-		"rm_free_nodecard",
-		"rm_new_partition",
-		"rm_free_partition",
-		"rm_free_job",
-		"rm_free_BGL",
-		"rm_free_partition_list",
-		"rm_free_job_list",
-		"rm_free_nodecard_list",
-		"rm_get_data",
-		"rm_set_data",
-		"jm_signal_job",
-		"jm_cancel_job",
-		"pm_create_partition",
-		"pm_destroy_partition",
-		"setSayMessageParams"
-	};
-#else
-	static const char *syms[] = {
-		"rm_set_serial",
-		"rm_get_BG",
-		"rm_add_partition",
-		"rm_get_partition",
-		"rm_get_partition_info",
-		"rm_modify_partition",
-		"rm_set_part_owner",
-		"rm_add_part_user",
-		"rm_remove_part_user",
-		"rm_remove_partition",
-		"rm_get_partitions",
-		"rm_get_partitions_info",
-		"rm_get_job",
-		"rm_get_jobs",
-		"rm_remove_job",
-		"rm_get_nodecards",
-		"rm_new_nodecard",
-		"rm_free_nodecard",
-		"rm_new_ionode",
-		"rm_free_ionode",
-		"rm_new_partition",
-		"rm_free_partition",
-		"rm_free_job",
-		"rm_free_BG",
-		"rm_free_partition_list",
-		"rm_free_job_list",
-		"rm_free_nodecard_list",
-		"rm_get_data",
-		"rm_set_data",
-		"jm_signal_job",
-		"jm_cancel_job",
-		"pm_create_partition",
-		"pm_reboot_partition",
-		"pm_destroy_partition",
-		"setSayMessageParams"
-	};
-#endif
-	int n_syms;
-	int rc;
-
-	if (initialized)
-		return 1;
-
-	n_syms = sizeof( syms ) / sizeof( char * );
-
-	initialized = true;
-	if (!_get_syms(n_syms, syms, (void **) &bridge_api))
-		return 0;
-#ifdef BG_SERIAL
-	debug("setting the serial to %s", BG_SERIAL);
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.set_serial))(BG_SERIAL);
-	slurm_mutex_unlock(&api_file_mutex);
-	debug2("done %d", rc);
-#else
-	fatal("No BG_SERIAL is set, can't run.");
-#endif
-	return 1;
-
-}
-
-extern int bridge_fini()
-{
-	if (handle)
-		dlclose(handle);
-	initialized = false;
-
-	return SLURM_SUCCESS;
-}
-
-extern status_t bridge_get_bg(my_bluegene_t **bg)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_bg))(bg);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-}
-
-extern status_t bridge_add_block(rm_partition_t *partition)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.add_partition))(partition);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_block(pm_partition_id_t pid,
-				 rm_partition_t **partition)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_partition))(pid, partition);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_block_info(pm_partition_id_t pid,
-				      rm_partition_t **partition)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	/* this is here to make sure we don't lock up things with
-	   polling and the long running get_BG call */
-	rc = pthread_mutex_trylock(&api_file_mutex);
-	if (rc == EBUSY)
-		return rc;
-	else if (rc) {
-		errno = rc;
-		error("%s:%d %s: pthread_mutex_trylock(): %m",
-		      __FILE__, __LINE__, __CURRENT_FUNC__);
-	}
-
-	//slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_partition_info))(pid, partition);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_modify_block(pm_partition_id_t pid,
-				    enum rm_modify_op op, const void *data)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.modify_partition))(pid, op, data);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_set_block_owner(pm_partition_id_t pid, const char *name)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.set_part_owner))(pid, name);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_add_block_user(pm_partition_id_t pid, const char *name)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.add_part_user))(pid, name);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_remove_block_user(pm_partition_id_t pid,
-					 const char *name)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.remove_part_user))(pid, name);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_remove_block(pm_partition_id_t pid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.remove_partition))(pid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_blocks(rm_partition_state_flag_t flag,
-				  rm_partition_list_t **part_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_partitions))(flag, part_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_blocks_info(rm_partition_state_flag_t flag,
-				       rm_partition_list_t **part_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_partitions_info))(flag, part_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_job(db_job_id_t dbJobId, rm_job_t **job)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_job))(dbJobId, job);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_jobs(rm_job_state_flag_t flag, rm_job_list_t **jobs)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_jobs))(flag, jobs);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_remove_job(db_job_id_t jid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.remove_job))(jid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_nodecards(rm_bp_id_t bpid,
-				     rm_nodecard_list_t **nc_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_nodecards))(bpid, nc_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_new_nodecard(rm_nodecard_t **nodecard)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.new_nodecard))(nodecard);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_nodecard(rm_nodecard_t *nodecard)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_nodecard))(nodecard);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-#ifndef HAVE_BGL
-extern status_t bridge_new_ionode(rm_ionode_t **ionode)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.new_ionode))(ionode);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_ionode(rm_ionode_t *ionode)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_ionode))(ionode);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-#endif
-
-extern status_t bridge_new_block(rm_partition_t **partition)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.new_partition))(partition);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_block(rm_partition_t *partition)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_partition))(partition);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_job(rm_job_t *job)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_job))(job);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_bg(my_bluegene_t *bg)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_bg))(bg);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_block_list(rm_partition_list_t *part_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_partition_list))(part_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_job_list(rm_job_list_t *job_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_job_list))(job_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_free_nodecard_list(rm_nodecard_list_t *nc_list)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.free_nodecard_list))(nc_list);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_get_data(rm_element_t* element,
-				enum rm_specification field, void *data)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.get_data))(element, field, data);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_set_data(rm_element_t* element,
-				enum rm_specification field, void *data)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.set_data))(element, field, data);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-/* all the jm functions */
-extern status_t bridge_signal_job(db_job_id_t jid, rm_signal_t sig)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.signal_job))(jid, sig);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern status_t bridge_cancel_job(db_job_id_t jid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.cancel_job))(jid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-/* all the pm functions */
-extern status_t bridge_create_block(pm_partition_id_t pid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.create_partition))(pid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-#ifndef HAVE_BGL
-extern status_t bridge_reboot_block(pm_partition_id_t pid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.reboot_partition))(pid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-#endif
-
-extern status_t bridge_destroy_block(pm_partition_id_t pid)
-{
-	int rc = CONNECTION_ERROR;
-	if (!bridge_init())
-		return rc;
-
-	slurm_mutex_lock(&api_file_mutex);
-	rc = (*(bridge_api.destroy_partition))(pid);
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-
-}
-
-extern int bridge_set_log_params(char *api_file_name, unsigned int level)
-{
-	static FILE *fp = NULL;
-        FILE *fp2 = NULL;
-	int rc = SLURM_SUCCESS;
-
-	if (!bridge_init())
-		return SLURM_ERROR;
-
-	slurm_mutex_lock(&api_file_mutex);
-	if (fp)
-		fp2 = fp;
-
-	fp = fopen(api_file_name, "a");
-
-	if (fp == NULL) {
-		error("can't open file for bridgeapi.log at %s: %m",
-		      api_file_name);
-		rc = SLURM_ERROR;
-		goto end_it;
-	}
-
-
-	(*(bridge_api.set_log_params))(fp, level);
-	/* In the libraries linked to from the bridge there are stderr
-	   messages send which we would miss unless we dup this to the
-	   log */
-	//(void)dup2(fileno(fp), STDERR_FILENO);
-
-	if (fp2)
-		fclose(fp2);
-end_it:
-	slurm_mutex_unlock(&api_file_mutex);
-	return rc;
-}
-#endif /* HAVE_BG_FILES */
-
-
diff --git a/src/plugins/select/bluegene/block_allocator/bridge_linker.h b/src/plugins/select/bluegene/bridge_linker.h
similarity index 66%
rename from src/plugins/select/bluegene/block_allocator/bridge_linker.h
rename to src/plugins/select/bluegene/bridge_linker.h
index 236b2739e..eda7cdcc9 100644
--- a/src/plugins/select/bluegene/block_allocator/bridge_linker.h
+++ b/src/plugins/select/bluegene/bridge_linker.h
@@ -2,12 +2,12 @@
  *  bridge_linker.h
  *
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
+ *  Written by Danny Auble <da@llnl.gov>
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,6 +50,13 @@
 #  include "config.h"
 #endif
 
+#include <dlfcn.h>
+
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+#endif				/* WITH_PTHREADS */
+
+#include "src/common/node_select.h"
 #include "src/common/read_config.h"
 #include "src/common/parse_spec.h"
 #include "src/slurmctld/proc_req.h"
@@ -58,73 +65,84 @@
 #include "src/common/bitstring.h"
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
-#include <dlfcn.h>
+#include "bg_list_functions.h"
+#include "bg_enums.h"
 
-#ifdef WITH_PTHREADS
-#  include <pthread.h>
-#endif				/* WITH_PTHREADS */
+#define MAX_POLL_RETRIES    220
+#define POLL_INTERVAL        3
 
-#ifdef HAVE_BG_FILES
-extern bool have_db2;
-extern int bridge_init();
+/* Global variables */
+extern bg_config_t *bg_conf;
+extern bg_lists_t *bg_lists;
+extern time_t last_bg_update;
+extern pthread_mutex_t block_state_mutex;
+extern pthread_mutex_t request_list_mutex;
+extern int blocks_are_created;
+extern int num_unused_cpus;
+
+extern int bridge_init(char *properties_file);
 extern int bridge_fini();
 
+extern int bridge_get_size(int *size);
+extern int bridge_setup_system();
+
+extern int bridge_block_create(bg_record_t *bg_record);
+
+/*
+ * Boot a block. Block state expected to be FREE upon entry.
+ * NOTE: This function does not wait for the boot to complete.
+ * the slurm prolog script needs to perform the waiting.
+ * NOTE: block_state_mutex needs to be locked before entering.
+ */
+extern int bridge_block_boot(bg_record_t *bg_record);
+extern int bridge_block_free(bg_record_t *bg_record);
+extern int bridge_block_remove(bg_record_t *bg_record);
+
+extern int bridge_block_add_user(bg_record_t *bg_record, char *user_name);
+extern int bridge_block_remove_user(bg_record_t *bg_record, char *user_name);
+extern int bridge_block_remove_all_users(bg_record_t *bg_record,
+					 char *user_name);
+
+extern int bridge_blocks_load_curr(List curr_block_list);
+
+extern void bridge_reset_block_list(List block_list);
+extern void bridge_block_post_job(char *bg_block_id);
+extern int bridge_set_log_params(char *api_file_name, unsigned int level);
+
+#if defined HAVE_BG_FILES && defined HAVE_BG_L_P
+extern bool have_db2;
+
 extern status_t bridge_get_bg(my_bluegene_t **bg);
-extern status_t bridge_add_block(rm_partition_t *partition);
-extern status_t bridge_get_block(pm_partition_id_t pid,
+extern status_t bridge_free_bg(my_bluegene_t *bg);
+extern status_t bridge_get_data(rm_element_t* element,
+				enum rm_specification field, void *data);
+extern status_t bridge_set_data(rm_element_t* element,
+				enum rm_specification field, void *data);
+extern status_t bridge_free_nodecard_list(rm_nodecard_list_t *nc_list);
+extern status_t bridge_free_block(rm_partition_t *partition);
+extern status_t bridge_block_modify(char *bg_block_id,
+				    int op, const void *data);
+extern status_t bridge_get_block(char *bg_block_id,
 				 rm_partition_t **partition);
-extern status_t bridge_get_block_info(pm_partition_id_t pid,
+extern status_t bridge_get_block_info(char *bg_block_id,
 				      rm_partition_t **partition);
-extern status_t bridge_modify_block(pm_partition_id_t pid,
-				    enum rm_modify_op op, const void *data);
-extern status_t bridge_set_block_owner(pm_partition_id_t, const char *);
-extern status_t bridge_add_block_user(pm_partition_id_t, const char *);
-extern status_t bridge_remove_block_user(pm_partition_id_t, const char *);
-extern status_t bridge_remove_block(pm_partition_id_t pid);
 extern status_t bridge_get_blocks(rm_partition_state_flag_t flag,
 				  rm_partition_list_t **part_list);
 extern status_t bridge_get_blocks_info(rm_partition_state_flag_t flag,
 				       rm_partition_list_t **part_list);
-extern status_t bridge_get_job(db_job_id_t dbJobId, rm_job_t **job);
-extern status_t bridge_get_jobs(rm_job_state_flag_t flag,
-				rm_job_list_t **jobs);
-extern status_t bridge_remove_job(db_job_id_t jid);
-extern status_t bridge_get_nodecards(rm_bp_id_t bpid,
-				     rm_nodecard_list_t **nc_list);
+extern status_t bridge_free_block_list(rm_partition_list_t *part_list);
 extern status_t bridge_new_nodecard(rm_nodecard_t **nodecard);
 extern status_t bridge_free_nodecard(rm_nodecard_t *nodecard);
-#ifndef HAVE_BGL
+extern status_t bridge_get_nodecards(rm_bp_id_t bpid,
+				     rm_nodecard_list_t **nc_list);
+#ifdef HAVE_BGP
 extern status_t bridge_new_ionode(rm_ionode_t **ionode);
 extern status_t bridge_free_ionode(rm_ionode_t *ionode);
+#else
+extern int bridge_find_nodecard_num(rm_partition_t *block_ptr,
+				    rm_nodecard_t *ncard,
+				    int *nc_id);
 #endif
-extern status_t bridge_new_block(rm_partition_t **partition);
-extern status_t bridge_free_block(rm_partition_t *partition);
-extern status_t bridge_free_job(rm_job_t *job);
-extern status_t bridge_free_bg(my_bluegene_t *bg);
-extern status_t bridge_free_block_list(rm_partition_list_t *part_list);
-extern status_t bridge_free_job_list(rm_job_list_t *job_list);
-extern status_t bridge_free_nodecard_list(rm_nodecard_list_t *nc_list);
-extern status_t bridge_get_data(rm_element_t* element,
-				enum rm_specification field, void *data);
-extern status_t bridge_set_data(rm_element_t* element,
-				enum rm_specification field, void *data);
-
-/* all the jm functions */
-extern status_t bridge_signal_job(db_job_id_t, rm_signal_t);
-extern status_t bridge_cancel_job(db_job_id_t);
-
-/* all the pm functions */
-extern status_t bridge_create_block(pm_partition_id_t pid);
-
-#ifndef HAVE_BGL
-extern status_t bridge_reboot_block(pm_partition_id_t pid);
-#endif
-
-extern status_t bridge_destroy_block(pm_partition_id_t pid);
-
-/* say message */
-
-extern int bridge_set_log_params(char *api_file_name, unsigned int level);
 #endif /* HAVE_BG_FILES */
+
 #endif /* _BRIDGE_LINKER_H_ */
diff --git a/src/plugins/select/bluegene/configure_api.c b/src/plugins/select/bluegene/configure_api.c
new file mode 100644
index 000000000..83e3590e3
--- /dev/null
+++ b/src/plugins/select/bluegene/configure_api.c
@@ -0,0 +1,371 @@
+/*****************************************************************************\
+ *  configure_api.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "configure_api.h"
+#include "src/common/plugin.h"
+#include "src/common/plugrack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xstring.h"
+
+typedef struct {
+	void (*ba_init)                (node_info_msg_t *node_info_ptr,
+					bool load_bridge);
+	void (*ba_fini)                (void);
+	void (*ba_setup_wires)         (void);
+	void (*reset_ba_system)        (bool track_down_mps);
+	void (*destroy_ba_mp)          (void *ptr);
+	char *(*ba_passthroughs_string)(uint16_t passthrough);
+	void (*ba_update_mp_state)     (ba_mp_t *ba_mp, uint16_t state);
+	int (*ba_set_removable_mps)    (bitstr_t *bitmap, bool except);
+	int (*ba_reset_all_removed_mps)(void);
+	int (*new_ba_request)          (select_ba_request_t* ba_request);
+	int (*allocate_block)          (select_ba_request_t* ba_request,
+				        List results);
+	int (*remove_block)            (List mps, bool is_small);
+	ba_mp_t *(*str2ba_mp)          (const char *coords);
+	ba_mp_t *(*loc2ba_mp)          (const char *mp_id);
+	ba_mp_t *(*coord2ba_mp)        (const uint16_t *coord);
+	char *(*give_geo)              (uint16_t *int_geo, int dims,
+					bool with_sep);
+	s_p_hashtbl_t *(*config_make_tbl)(char *filename);
+	void (*set_ba_debug_flags)     (uint32_t debug_flags);
+} bg_configure_api_ops_t;
+
+typedef struct bg_configure_context {
+	char	       	*type;
+	plugrack_t     	plugin_list;
+	plugin_handle_t	cur_plugin;
+	int		bg_configure_errno;
+	bg_configure_api_ops_t ops;
+} bg_configure_context_t;
+
+static bg_configure_context_t *bg_configure_context = NULL;
+static pthread_mutex_t	       bg_configure_context_lock =
+	PTHREAD_MUTEX_INITIALIZER;
+
+static bg_configure_api_ops_t *_get_ops(bg_configure_context_t *c)
+{
+	/*
+	 * Must be synchronized with bg_configure_api_ops_t above.
+	 */
+	static const char *syms[] = {
+		"ba_init",
+		"ba_fini",
+		"ba_setup_wires",
+		"reset_ba_system",
+		"destroy_ba_mp",
+		"ba_passthroughs_string",
+		"ba_update_mp_state",
+		"ba_set_removable_mps",
+		"ba_reset_all_removed_mps",
+		"new_ba_request",
+		"allocate_block",
+		"remove_block",
+		"str2ba_mp",
+		"loc2ba_mp",
+		"coord2ba_mp",
+		"give_geo",
+		"config_make_tbl",
+		"set_ba_debug_flags",
+	};
+	int n_syms = sizeof(syms) / sizeof(char *);
+
+	/* Find the correct plugin. */
+	c->cur_plugin = plugin_load_and_link(c->type, n_syms, syms,
+					     (void **) &c->ops);
+	if (c->cur_plugin != PLUGIN_INVALID_HANDLE)
+		return &c->ops;
+
+	if(errno != EPLUGIN_NOTFOUND) {
+		error("Couldn't load specified plugin name for %s: %s",
+		      c->type, plugin_strerror(errno));
+		return NULL;
+	}
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->type);
+
+	/* Get plugin list. */
+	if ( c->plugin_list == NULL ) {
+		char *plugin_dir;
+		c->plugin_list = plugrack_create();
+		if ( c->plugin_list == NULL ) {
+			error( "cannot create plugin manager" );
+			return NULL;
+		}
+		plugrack_set_major_type(c->plugin_list, "select");
+		plugrack_set_paranoia(c->plugin_list,
+				      PLUGRACK_PARANOIA_NONE,
+				      0);
+		plugin_dir = slurm_get_plugin_dir();
+		plugrack_read_dir(c->plugin_list, plugin_dir);
+		xfree(plugin_dir);
+	}
+
+	c->cur_plugin = plugrack_use_by_type(c->plugin_list, c->type);
+	if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) {
+		error( "cannot find accounting_storage plugin for %s",
+		       c->type );
+		return NULL;
+	}
+
+	/* Dereference the API. */
+	if ( plugin_get_syms(c->cur_plugin,
+			     n_syms,
+			     syms,
+			     (void **) &c->ops ) < n_syms) {
+		error("incomplete select plugin detected");
+		return NULL;
+	}
+
+	return &c->ops;
+}
+
+/*
+ * Destroy a node selection context
+ */
+static int _context_destroy(bg_configure_context_t *c)
+{
+	int rc = SLURM_SUCCESS;
+	/*
+	 * Must check return code here because plugins might still
+	 * be loaded and active.
+	 */
+	if (c->plugin_list) {
+		if (plugrack_destroy(c->plugin_list) != SLURM_SUCCESS)
+			rc = SLURM_ERROR;
+	} else {
+		plugin_unload(c->cur_plugin);
+	}
+
+	xfree(c->type);
+
+	return rc;
+}
+
+
+extern int bg_configure_init(void)
+{
+	int rc = SLURM_SUCCESS;
+	slurm_mutex_lock(&bg_configure_context_lock);
+
+	if (bg_configure_context)
+		goto done;
+
+	bg_configure_context = xmalloc(sizeof(bg_configure_context_t));
+	bg_configure_context->type = xstrdup("select/bluegene");
+	bg_configure_context->cur_plugin = PLUGIN_INVALID_HANDLE;
+	bg_configure_context->bg_configure_errno = SLURM_SUCCESS;
+
+	if (!_get_ops(bg_configure_context)) {
+		error("cannot resolve select plugin operations for configure");
+		_context_destroy(bg_configure_context);
+		bg_configure_context = NULL;
+		rc = SLURM_ERROR;
+	}
+
+done:
+	slurm_mutex_unlock(&bg_configure_context_lock);
+	return rc;
+
+}
+
+extern int bg_configure_fini(void)
+{
+	int rc = SLURM_SUCCESS;
+
+	slurm_mutex_lock(&bg_configure_context_lock);
+	if (!bg_configure_context)
+		goto fini;
+
+	rc = _context_destroy(bg_configure_context);
+	bg_configure_context = NULL;
+fini:
+	slurm_mutex_unlock(&bg_configure_context_lock);
+	return rc;
+}
+
+extern void bg_configure_ba_init(
+	node_info_msg_t *node_info_ptr, bool load_bridge)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.ba_init))(node_info_ptr, load_bridge);
+}
+
+extern void bg_configure_ba_fini(void)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.ba_fini))();
+}
+
+extern void bg_configure_ba_setup_wires(void)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.ba_setup_wires))();
+}
+
+extern void bg_configure_reset_ba_system(bool track_down_mps)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.reset_ba_system))(track_down_mps);
+}
+
+extern void bg_configure_destroy_ba_mp(void *ptr)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.destroy_ba_mp))(ptr);
+}
+
+extern char *bg_configure_ba_passthroughs_string(uint16_t passthrough)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.ba_passthroughs_string))
+		(passthrough);
+}
+
+extern void bg_configure_ba_update_mp_state(ba_mp_t *ba_mp, uint16_t state)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.ba_update_mp_state))(ba_mp, state);
+}
+
+extern int bg_configure_ba_set_removable_mps(bitstr_t *bitmap, bool except)
+{
+	if (bg_configure_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(bg_configure_context->ops.ba_set_removable_mps))
+		(bitmap, except);
+}
+
+extern int bg_configure_ba_reset_all_removed_mps(void)
+{
+	if (bg_configure_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(bg_configure_context->ops.ba_reset_all_removed_mps))();
+}
+
+
+extern int bg_configure_new_ba_request(select_ba_request_t* ba_request)
+{
+	if (bg_configure_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(bg_configure_context->ops.new_ba_request))(ba_request);
+}
+
+extern int bg_configure_allocate_block(
+	select_ba_request_t* ba_request, List results)
+{
+	if (bg_configure_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(bg_configure_context->ops.allocate_block))
+		(ba_request, results);
+}
+
+extern int bg_configure_remove_block(List mps, bool is_small)
+{
+	if (bg_configure_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(bg_configure_context->ops.remove_block))(mps, is_small);
+}
+
+extern ba_mp_t *bg_configure_str2ba_mp(const char *coords)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.str2ba_mp))(coords);
+}
+
+extern ba_mp_t *bg_configure_loc2ba_mp(const char *mp_id)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.loc2ba_mp))(mp_id);
+}
+
+extern ba_mp_t *bg_configure_coord2ba_mp(const uint16_t *coord)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.coord2ba_mp))(coord);
+}
+
+extern char *bg_configure_give_geo(uint16_t *int_geo, int dims, bool with_sep)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.give_geo))(int_geo, dims, with_sep);
+}
+
+extern s_p_hashtbl_t *bg_configure_config_make_tbl(char *filename)
+{
+	if (bg_configure_init() < 0)
+		return NULL;
+
+	return (*(bg_configure_context->ops.config_make_tbl))(filename);
+}
+
+extern void ba_configure_set_ba_debug_flags(uint32_t debug_flags)
+{
+	if (bg_configure_init() < 0)
+		return;
+
+	(*(bg_configure_context->ops.set_ba_debug_flags))(debug_flags);
+}
diff --git a/src/plugins/select/bluegene/configure_api.h b/src/plugins/select/bluegene/configure_api.h
new file mode 100644
index 000000000..86e240ccf
--- /dev/null
+++ b/src/plugins/select/bluegene/configure_api.h
@@ -0,0 +1,182 @@
+/*****************************************************************************\
+ *  configure_api.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _BG_CONFIGURE_API_H_
+#define _BG_CONFIGURE_API_H_
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
+#include "src/common/list.h"
+#include "src/common/node_select.h"
+#include "src/common/parse_spec.h"
+
+#include "ba_common.h"
+
+extern int bg_configure_init(void);
+extern int bg_configure_fini(void);
+
+/*
+ * Initialize internal structures by either reading previous block
+ * configurations from a file or by running the graph solver.
+ *
+ * IN: node_info_msg_t * can be null,
+ *     should be from slurm_load_node().
+ * IN: load_bridge: whiether or not to get bridge information
+ *
+ * return: void.
+ */
+extern void bg_configure_ba_init(
+	node_info_msg_t *node_info_ptr, bool load_bridge);
+
+/*
+ * destroy all the internal (global) data structs.
+ */
+extern void bg_configure_ba_fini(void);
+
+/* Setup the wires on the system and the structures needed to create
+ * blocks.  This should be called before trying to create blocks.
+ */
+extern void bg_configure_ba_setup_wires(void);
+
+/*
+ * Resets the virtual system to a virgin state.  If track_down_mps is set
+ * then those midplanes are not set to idle, but kept in a down state.
+ */
+extern void bg_configure_reset_ba_system(bool track_down_mps);
+
+extern void bg_configure_destroy_ba_mp(void *ptr);
+
+/* Convert PASS_FOUND_* into equivalent string
+ * Caller MUST xfree() the returned value */
+extern char *bg_configure_ba_passthroughs_string(uint16_t passthrough);
+
+/*
+ * set the mp in the internal configuration as in, or not in use,
+ * along with the current state of the mp.
+ *
+ * IN ba_mp: ba_mp_t to update state
+ * IN state: new state of ba_mp_t
+ */
+extern void bg_configure_ba_update_mp_state(ba_mp_t *ba_mp, uint16_t state);
+
+/*
+ * Used to set all midplanes in a special used state except the ones
+ * we are able to use in a new allocation.
+ *
+ * IN: bitmap of midplanes we do or do not want
+ * IN: except - If true set all midplanes not set in the bitmap else
+ *              set all midplanes that are set in the bitmap.
+ * RET: SLURM_SUCCESS on success, or SLURM_ERROR on error
+ *
+ * Note: Need to call ba_reset_all_removed_mps before starting another
+ * allocation attempt after
+ */
+extern int bg_configure_ba_set_removable_mps(bitstr_t *bitmap, bool except);
+
+/*
+ * Resets the virtual system to the pervious state before calling
+ * ba_set_removable_mps.
+ */
+extern int bg_configure_ba_reset_all_removed_mps(void);
+
+/*
+ * create a block request.  Note that if the geometry is given,
+ * then size is ignored.  If elongate is true, the algorithm will try
+ * to fit that a block of cubic shape and then it will try other
+ * elongated geometries.  (ie, 2x2x2 -> 4x2x1 -> 8x1x1).
+ *
+ * IN/OUT - ba_request: structure to allocate and fill in.
+ *
+ * ALL below IN's need to be set within the ba_request before the call
+ * if you want them to be used.
+ * ALL below OUT's are set and returned within the ba_request.
+ * IN - avail_mp_bitmap: bitmap of usable midplanes.
+ * IN - blrtsimage: BlrtsImage for this block if not default
+ * IN - conn_type: connection type of request (TORUS or MESH or SMALL)
+ * IN - elongate: if true, will try to fit different geometries of
+ *      same size requests
+ * IN/OUT - geometry: requested/returned geometry of block
+ * IN - linuximage: LinuxImage for this block if not default
+ * IN - mloaderimage: MLoaderImage for this block if not default
+ * OUT - passthroughs: if there were passthroughs used in the
+ *       generation of the block.
+ * IN - procs: Number of real processors requested
+ * IN - RamDiskimage: RamDiskImage for this block if not default
+ * IN - rotate: if true, allows rotation of block during fit
+ * OUT - save_name: hostlist of midplanes used in block
+ * IN/OUT - size: requested/returned count of midplanes in block
+ * IN - start: geo location of where to start the allocation
+ * IN - start_req: if set use the start variable to start at
+ * return success of allocation/validation of params
+ */
+extern int bg_configure_new_ba_request(select_ba_request_t* ba_request);
+
+/*
+ * Try to allocate a block.
+ *
+ * IN - ba_request: allocation request
+ * OUT - results: List of results of the allocation request.  Each
+ * list entry will be a coordinate.  allocate_block will create the
+ * list, but the caller must destroy it.
+ *
+ * return: success or error of request
+ */
+extern int bg_configure_allocate_block(
+	select_ba_request_t* ba_request, List results);
+
+/*
+ * Admin wants to remove a previous allocation.
+ * will allow Admin to delete a previous allocation retrival by letter code.
+ */
+extern int bg_configure_remove_block(List mps, bool is_small);
+
+/* translate a string of at least AXYZ into a ba_mp_t ptr */
+extern ba_mp_t *bg_configure_str2ba_mp(const char *coords);
+/*
+ * find a base blocks bg location (rack/midplane)
+ */
+extern ba_mp_t *bg_configure_loc2ba_mp(const char* mp_id);
+
+extern ba_mp_t *bg_configure_coord2ba_mp(const uint16_t *coord);
+extern char *bg_configure_give_geo(uint16_t *int_geo, int dims, bool with_sep);
+
+extern s_p_hashtbl_t *bg_configure_config_make_tbl(char *filename);
+
+extern void ba_configure_set_ba_debug_flags(uint32_t debug_flags);
+
+#endif
diff --git a/src/plugins/select/bluegene/plugin/libsched_if64.c b/src/plugins/select/bluegene/libsched_if64.c
similarity index 93%
rename from src/plugins/select/bluegene/plugin/libsched_if64.c
rename to src/plugins/select/bluegene/libsched_if64.c
index 086856bb1..82a0121ab 100644
--- a/src/plugins/select/bluegene/plugin/libsched_if64.c
+++ b/src/plugins/select/bluegene/libsched_if64.c
@@ -4,12 +4,13 @@
  *
  *  $Id: bluegene.c 9169 2006-09-05 17:26:51Z jette $
  *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <auble1@llnl.gov> et. al.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/select/bluegene/plugin/Makefile.am b/src/plugins/select/bluegene/plugin/Makefile.am
deleted file mode 100644
index f294a30b8..000000000
--- a/src/plugins/select/bluegene/plugin/Makefile.am
+++ /dev/null
@@ -1,82 +0,0 @@
-# Makefile for select/bluegene plugin
-
-CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
-AUTOMAKE_OPTIONS = foreign
-
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
-
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
-
-if BLUEGENE_LOADED
-
-if BGL_LOADED
-pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
-else
-pkglib_LTLIBRARIES = select_bluegene.la libsched_if.la
-endif
-
-# Blue Gene node selection plugin.
-select_bluegene_la_SOURCES = select_bluegene.c \
-				bg_boot_time.h \
-				bg_job_place.c bg_job_place.h \
-				bg_job_run.c bg_job_run.h \
-				bg_block_info.c bg_block_info.h \
-				bg_record_functions.c bg_record_functions.h \
-				bluegene.c bluegene.h \
-				state_test.c state_test.h \
-				bg_switch_connections.c \
-				block_sys.c \
-				dynamic_block.c dynamic_block.h \
-				defined_block.c defined_block.h \
-				jobinfo.c jobinfo.h\
-				nodeinfo.c nodeinfo.h\
-				../wrap_rm_api.h
-
-select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-select_bluegene_la_LIBADD  = ../block_allocator/libbluegene_block_allocator.la
-
-
-# MPIRUN dynamic lib.
-if BGL_LOADED
-libsched_if64_la_SOURCES = libsched_if64.c
-libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-else
-libsched_if_la_SOURCES = libsched_if64.c
-libsched_if_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-endif
-
-sbin_PROGRAMS = slurm_prolog slurm_epilog sfree
-
-convenience_libs = \
-	$(top_builddir)/src/api/libslurm.o -ldl
-
-sfree_LDADD = $(convenience_libs)
-slurm_prolog_LDADD = $(convenience_libs)
-slurm_epilog_LDADD = $(convenience_libs)
-sfree_SOURCES = sfree.c sfree.h opts.c \
-		../block_allocator/bridge_linker.c \
-		../block_allocator/bridge_linker.h
-slurm_prolog_SOURCES = slurm_prolog.c
-slurm_epilog_SOURCES = slurm_epilog.c
-sfree_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
-slurm_prolog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
-slurm_epilog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
-
-force:
-$(select_bluegene_la_LIBADD) $(sfree_LDADD) : force
-	@cd `dirname $@` && $(MAKE) `basename $@`
-
-else
-# These are needed for pack/unpack of structures for cross-cluster stuff
-
-pkglib_LTLIBRARIES = select_bluegene.la
-
-select_bluegene_la_SOURCES = select_bluegene.c \
-			jobinfo.c jobinfo.h\
-			nodeinfo.c nodeinfo.h\
-			../wrap_rm_api.h
-select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-
-endif
diff --git a/src/plugins/select/bluegene/plugin/Manifest b/src/plugins/select/bluegene/plugin/Manifest
deleted file mode 100644
index 950074a36..000000000
--- a/src/plugins/select/bluegene/plugin/Manifest
+++ /dev/null
@@ -1,21 +0,0 @@
-Filename			: description
-README				: description of the plugin and 
-                                  configuration file
-Makefile.am			: autoconf Makefile 
-bluegene.conf			: sample configuration file
-select_bluegene.c		: API for the select_plugin
-bluegene.c			: component used for parsing config 
-                                  file, determining where jobs will 
-                                  and other plugin logic.
-bluegene.h			: header file
-block_sys.c			: component used for wiring up the 
-				  blocks
-block_sys.h			: header file
-bg_switch_connections.c		: interface for connecting wires in
-				  the bluegene system
-bg_switch_connections.h		: header file
-bg_block_info.c			: functions for updating states of bluegene
-				  blocks
-bg_job_run.c			: controls all states of the job
-bg_job_place.c			: determines which bg_block to run in
-
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
deleted file mode 100644
index 5dca2c699..000000000
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*****************************************************************************\
- *  bg_block_info.c - bluegene block information from the db2 database.
- *
- *  $Id$
- *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifdef HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_STDINT_H
-#    include <stdint.h>
-#  endif
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  endif
-#  if WITH_PTHREADS
-#    include <pthread.h>
-#  endif
-#endif
-
-#include <signal.h>
-#include <unistd.h>
-
-#include <slurm/slurm_errno.h>
-
-#include <pwd.h>
-#include <sys/types.h>
-#include "src/common/hostlist.h"
-#include "src/common/list.h"
-#include "src/common/macros.h"
-#include "src/common/node_select.h"
-#include "src/common/uid.h"
-#include "src/common/xstring.h"
-#include "src/slurmctld/proc_req.h"
-#include "src/slurmctld/trigger_mgr.h"
-#include "src/slurmctld/locks.h"
-#include "bluegene.h"
-
-#define _DEBUG 0
-#define RETRY_BOOT_COUNT 3
-
-#ifdef HAVE_BG_FILES
-
-typedef struct {
-	int jobid;
-} kill_job_struct_t;
-
-List kill_job_list = NULL;
-
-static int _block_is_deallocating(bg_record_t *bg_record);
-static void _destroy_kill_struct(void *object);
-
-static int _block_is_deallocating(bg_record_t *bg_record)
-{
-	int jobid = bg_record->job_running;
-	char *user_name = NULL;
-
-	if (bg_record->modifying)
-		return SLURM_SUCCESS;
-
-	user_name = xstrdup(bg_conf->slurm_user_name);
-	if (remove_all_users(bg_record->bg_block_id, NULL)
-	    == REMOVE_USER_ERR) {
-		error("Something happened removing "
-		      "users from block %s",
-		      bg_record->bg_block_id);
-	}
-
-	if (bg_record->target_name && bg_record->user_name) {
-		if (!strcmp(bg_record->target_name, user_name)) {
-			if (strcmp(bg_record->target_name, bg_record->user_name)
-			    || (jobid > NO_JOB_RUNNING)) {
-				kill_job_struct_t *freeit =
-					xmalloc(sizeof(freeit));
-				freeit->jobid = jobid;
-				list_push(kill_job_list, freeit);
-
-				error("Block %s was in a ready state "
-				      "for user %s but is being freed. "
-				      "Job %d was lost.",
-				      bg_record->bg_block_id,
-				      bg_record->user_name,
-				      jobid);
-			} else {
-				debug("Block %s was in a ready state "
-				      "but is being freed. No job running.",
-				      bg_record->bg_block_id);
-			}
-		} else {
-			error("State went to free on a boot "
-			      "for block %s.",
-			      bg_record->bg_block_id);
-		}
-	} else if (bg_record->user_name) {
-		error("Target Name was not set "
-		      "not set for block %s.",
-		      bg_record->bg_block_id);
-		bg_record->target_name = xstrdup(bg_record->user_name);
-	} else {
-		error("Target Name and User Name are "
-		      "not set for block %s.",
-		      bg_record->bg_block_id);
-		bg_record->user_name = xstrdup(user_name);
-		bg_record->target_name = xstrdup(bg_record->user_name);
-	}
-
-	if (remove_from_bg_list(bg_lists->job_running, bg_record)
-	    == SLURM_SUCCESS)
-		num_unused_cpus += bg_record->cpu_cnt;
-	remove_from_bg_list(bg_lists->booted, bg_record);
-
-	xfree(user_name);
-
-	return SLURM_SUCCESS;
-}
-static void _destroy_kill_struct(void *object)
-{
-	kill_job_struct_t *freeit = (kill_job_struct_t *)object;
-
-	if (freeit) {
-		xfree(freeit);
-	}
-}
-
-#endif
-
-
-/*
- * check to see if block is ready to execute.  Meaning
- * User is added to the list of users able to run, and no one
- * else is running on the block.
- *
- * NOTE: This happens in parallel with srun and slurmd spawning
- * the job. A prolog script is expected to defer initiation of
- * the job script until the BG block is available for use.
- */
-extern int block_ready(struct job_record *job_ptr)
-{
-	int rc = 1;
-	char *block_id = NULL;
-	bg_record_t *bg_record = NULL;
-
-	rc = get_select_jobinfo(job_ptr->select_jobinfo->data,
-				SELECT_JOBDATA_BLOCK_ID, &block_id);
-	if (rc == SLURM_SUCCESS) {
-		slurm_mutex_lock(&block_state_mutex);
-		bg_record = find_bg_record_in_list(bg_lists->main, block_id);
-
-		if (bg_record) {
-			if (bg_record->job_running != job_ptr->job_id) {
-				rc = 0;
-			} else if ((bg_record->user_uid == job_ptr->user_id)
-				   && (bg_record->state
-				       == RM_PARTITION_READY)) {
-				/* Clear the state just incase we
-				   missed it somehow.
-				*/
-				job_ptr->job_state &= (~JOB_CONFIGURING);
-				last_job_update = time(NULL);
-
-				rc = 1;
-			} else if (bg_record->user_uid != job_ptr->user_id)
-				rc = 0;
-			else
-				rc = READY_JOB_ERROR;	/* try again */
-		} else {
-			/* This means the block has been removed and
-			   is no longer valid.  This could happen
-			   often during an epilog on a busy system.
-			*/
-			debug2("block_ready: block %s not in bg_lists->main.",
-			       block_id);
-			rc = READY_JOB_FATAL;	/* fatal error */
-		}
-		slurm_mutex_unlock(&block_state_mutex);
-	} else
-		rc = READY_JOB_ERROR;
-	/* info("returning %d for job %u block %s %d %d", */
-	/*      rc, job_ptr->job_id, block_id, */
-	/*      READY_JOB_ERROR, READY_JOB_FATAL); */
-	xfree(block_id);
-	return rc;
-}
-
-/* Pack all relevent information about a block */
-extern void pack_block(bg_record_t *bg_record, Buf buffer,
-		       uint16_t protocol_version)
-{
-	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
-		packstr(bg_record->bg_block_id, buffer);
-#ifdef HAVE_BGL
-		packstr(bg_record->blrtsimage, buffer);
-#endif
-		pack_bit_fmt(bg_record->bitmap, buffer);
-		pack16((uint16_t)bg_record->conn_type, buffer);
-		packstr(bg_record->ionodes, buffer);
-		pack_bit_fmt(bg_record->ionode_bitmap, buffer);
-		pack32((uint32_t)bg_record->job_running, buffer);
-		packstr(bg_record->linuximage, buffer);
-		packstr(bg_record->mloaderimage, buffer);
-		packstr(bg_record->nodes, buffer);
-		pack32((uint32_t)bg_record->node_cnt, buffer);
-#ifdef HAVE_BGL
-		pack16((uint16_t)bg_record->node_use, buffer);
-#endif
-		packstr(bg_record->user_name, buffer);
-		packstr(bg_record->ramdiskimage, buffer);
-		packstr(bg_record->reason, buffer);
-		pack16((uint16_t)bg_record->state, buffer);
-	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		packstr(bg_record->bg_block_id, buffer);
-#ifdef HAVE_BGL
-		packstr(bg_record->blrtsimage, buffer);
-#endif
-		pack_bit_fmt(bg_record->bitmap, buffer);
-		pack16((uint16_t)bg_record->conn_type, buffer);
-		packstr(bg_record->ionodes, buffer);
-		pack_bit_fmt(bg_record->ionode_bitmap, buffer);
-		pack32((uint32_t)bg_record->job_running, buffer);
-		packstr(bg_record->linuximage, buffer);
-		packstr(bg_record->mloaderimage, buffer);
-		packstr(bg_record->nodes, buffer);
-		pack32((uint32_t)bg_record->node_cnt, buffer);
-#ifdef HAVE_BGL
-		pack16((uint16_t)bg_record->node_use, buffer);
-#endif
-		packstr(bg_record->user_name, buffer);
-		packstr(bg_record->ramdiskimage, buffer);
-		pack16((uint16_t)bg_record->state, buffer);
-	}
-}
-
-extern int update_block_list()
-{
-	int updated = 0;
-#ifdef HAVE_BG_FILES
-	int rc;
-	rm_partition_t *block_ptr = NULL;
-#ifdef HAVE_BGL
-	rm_partition_mode_t node_use;
-#endif
-	rm_partition_state_t state;
-	char *name = NULL;
-	bg_record_t *bg_record = NULL;
-	kill_job_struct_t *freeit = NULL;
-	ListIterator itr = NULL;
-
-	if (!kill_job_list)
-		kill_job_list = list_create(_destroy_kill_struct);
-
-	if (!bg_lists->main)
-		return updated;
-
-	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_lists->main);
-	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-		if (bg_record->magic != BLOCK_MAGIC) {
-			/* block is gone */
-			list_remove(itr);
-			continue;
-		} else if (!bg_record->bg_block_id)
-			continue;
-
-		name = bg_record->bg_block_id;
-		if ((rc = bridge_get_block_info(name, &block_ptr))
-		    != STATUS_OK) {
-			if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-				switch(rc) {
-				case INCONSISTENT_DATA:
-					debug2("got inconsistent data when "
-					       "querying block %s", name);
-					continue;
-					break;
-				case PARTITION_NOT_FOUND:
-					debug("block %s not found, removing "
-					      "from slurm", name);
-					list_remove(itr);
-					destroy_bg_record(bg_record);
-					continue;
-					break;
-				default:
-					break;
-				}
-			}
-
-			/* If the call was busy, just skip this
-			   iteration.  It usually means something like
-			   rm_get_BG was called which can be a very
-			   long call */
-			if (rc == EBUSY) {
-				debug5("lock was busy, aborting");
-				break;
-			}
-
-			error("bridge_get_block_info(%s): %s",
-			      name,
-			      bg_err_str(rc));
-			continue;
-		}
-
-#ifdef HAVE_BGL
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionMode,
-					  &node_use))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionMode): %s",
-			      bg_err_str(rc));
-			updated = -1;
-			goto next_block;
-		} else if (bg_record->node_use != node_use) {
-			debug("node_use of Block %s was %d "
-			      "and now is %d",
-			      bg_record->bg_block_id,
-			      bg_record->node_use,
-			      node_use);
-			bg_record->node_use = node_use;
-			updated = 1;
-		}
-#else
-		if ((bg_record->node_cnt < bg_conf->bp_node_cnt)
-		    || (bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)) {
-			char *mode = NULL;
-			uint16_t conn_type = SELECT_SMALL;
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionOptions,
-						  &mode))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_PartitionOptions): "
-				      "%s", bg_err_str(rc));
-				updated = -1;
-				goto next_block;
-			} else if (mode) {
-				switch(mode[0]) {
-				case 's':
-					conn_type = SELECT_HTC_S;
-					break;
-				case 'd':
-					conn_type = SELECT_HTC_D;
-					break;
-				case 'v':
-					conn_type = SELECT_HTC_V;
-					break;
-				case 'l':
-					conn_type = SELECT_HTC_L;
-					break;
-				default:
-					conn_type = SELECT_SMALL;
-					break;
-				}
-				free(mode);
-			}
-
-			if (bg_record->conn_type != conn_type) {
-				debug("mode of small Block %s was %u "
-				      "and now is %u",
-				      bg_record->bg_block_id,
-				      bg_record->conn_type,
-				      conn_type);
-				bg_record->conn_type = conn_type;
-				updated = 1;
-			}
-		}
-#endif
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
-					  &state))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionState): %s",
-			      bg_err_str(rc));
-			updated = -1;
-			goto next_block;
-		} else if (bg_record->job_running != BLOCK_ERROR_STATE
-			   //plugin set error
-			   && bg_record->state != state) {
-			int skipped_dealloc = 0;
-
-			debug("state of Block %s was %d and now is %d",
-			      bg_record->bg_block_id,
-			      bg_record->state,
-			      state);
-			/*
-			  check to make sure block went
-			  through freeing correctly
-			*/
-			if ((bg_record->state != RM_PARTITION_DEALLOCATING
-			     && bg_record->state != RM_PARTITION_ERROR)
-			    && state == RM_PARTITION_FREE)
-				skipped_dealloc = 1;
-			else if ((bg_record->state == RM_PARTITION_READY)
-				 && (state == RM_PARTITION_CONFIGURING)) {
-				/* This means the user did a reboot through
-				   mpirun but we missed the state
-				   change */
-				debug("Block %s skipped rebooting, "
-				      "but it really is.  "
-				      "Setting target_name back to %s",
-				      bg_record->bg_block_id,
-				      bg_record->user_name);
-				xfree(bg_record->target_name);
-				bg_record->target_name =
-					xstrdup(bg_record->user_name);
-			} else if ((bg_record->state
-				    == RM_PARTITION_DEALLOCATING)
-				   && (state == RM_PARTITION_CONFIGURING))
-				/* This is a funky state IBM says
-				   isn't a bug, but all their
-				   documentation says this doesn't
-				   happen, but IBM says oh yeah, you
-				   weren't really suppose to notice
-				   that. So we will just skip this
-				   state and act like this didn't happen. */
-				goto nochange_state;
-
-			bg_record->state = state;
-
-			if (bg_record->state == RM_PARTITION_DEALLOCATING
-			    || skipped_dealloc)
-				_block_is_deallocating(bg_record);
-#ifndef HAVE_BGL
-			else if (bg_record->state == RM_PARTITION_REBOOTING) {
-				/* This means the user did a reboot through
-				   mpirun */
-				debug("Block %s rebooting.  "
-				      "Setting target_name back to %s",
-				      bg_record->bg_block_id,
-				      bg_record->user_name);
-				xfree(bg_record->target_name);
-				bg_record->target_name =
-					xstrdup(bg_record->user_name);
-			}
-#endif
-			else if (bg_record->state == RM_PARTITION_CONFIGURING) {
-				debug("Setting bootflag for %s",
-				      bg_record->bg_block_id);
-				bg_record->boot_state = 1;
-			} else if (bg_record->state == RM_PARTITION_FREE) {
-				if (remove_from_bg_list(bg_lists->job_running,
-							bg_record)
-				    == SLURM_SUCCESS)
-					num_unused_cpus += bg_record->cpu_cnt;
-				remove_from_bg_list(bg_lists->booted,
-						    bg_record);
-			} else if (bg_record->state == RM_PARTITION_ERROR) {
-				if (bg_record->boot_state == 1)
-					error("Block %s in an error "
-					      "state while booting.",
-					      bg_record->bg_block_id);
-				else
-					error("Block %s in an error state.",
-					      bg_record->bg_block_id);
-				remove_from_bg_list(bg_lists->booted,
-						    bg_record);
-				trigger_block_error();
-			} else if (bg_record->state == RM_PARTITION_READY) {
-				if (!block_ptr_exist_in_list(bg_lists->booted,
-							     bg_record))
-					list_push(bg_lists->booted, bg_record);
-			}
-
-			updated = 1;
-		}
-	nochange_state:
-
-		/* check the boot state */
-		debug3("boot state for block %s is %d",
-		       bg_record->bg_block_id,
-		       bg_record->boot_state);
-		if (bg_record->boot_state == 1) {
-			switch(bg_record->state) {
-			case RM_PARTITION_CONFIGURING:
-				debug3("checking to make sure user %s "
-				       "is the user.",
-				       bg_record->target_name);
-
-				if (update_block_user(bg_record, 0) == 1)
-					last_bg_update = time(NULL);
-				if (bg_record->job_ptr) {
-					bg_record->job_ptr->job_state |=
-						JOB_CONFIGURING;
-					last_job_update = time(NULL);
-				}
-				break;
-			case RM_PARTITION_ERROR:
-				/* If we get an error on boot that
-				 * means it is a transparent L3 error
-				 * and should be trying to fix
-				 * itself.  If this is the case we
-				 * just hang out waiting for the state
-				 * to go to free where we will try to
-				 * boot again below.
-				 */
-				break;
-			case RM_PARTITION_FREE:
-				if (bg_record->boot_count < RETRY_BOOT_COUNT) {
-					if ((rc = boot_block(bg_record))
-					    != SLURM_SUCCESS)
-						updated = -1;
-
-					if (bg_record->magic == BLOCK_MAGIC) {
-						debug("boot count for block "
-						      "%s is %d",
-						      bg_record->bg_block_id,
-						      bg_record->boot_count);
-						bg_record->boot_count++;
-					}
-				} else {
-					char *reason = "update_block_list: "
-						"Boot fails ";
-
-					error("Couldn't boot Block %s "
-					      "for user %s",
-					      bg_record->bg_block_id,
-					      bg_record->target_name);
-
-					slurm_mutex_unlock(&block_state_mutex);
-					requeue_and_error(bg_record, reason);
-					slurm_mutex_lock(&block_state_mutex);
-
-					bg_record->boot_state = 0;
-					bg_record->boot_count = 0;
-					if (remove_from_bg_list(
-						    bg_lists->job_running,
-						    bg_record)
-					    == SLURM_SUCCESS) {
-						num_unused_cpus +=
-							bg_record->cpu_cnt;
-					}
-					remove_from_bg_list(
-						bg_lists->booted, bg_record);
-				}
-				break;
-			case RM_PARTITION_READY:
-				debug("block %s is ready.",
-				      bg_record->bg_block_id);
-				if (bg_record->job_ptr) {
-					bg_record->job_ptr->job_state &=
-						(~JOB_CONFIGURING);
-					last_job_update = time(NULL);
-				}
-				/* boot flags are reset here */
-				if (set_block_user(bg_record) == SLURM_ERROR) {
-					freeit = xmalloc(
-						sizeof(kill_job_struct_t));
-					freeit->jobid = bg_record->job_running;
-					list_push(kill_job_list, freeit);
-				}
-				break;
-			case RM_PARTITION_DEALLOCATING:
-				debug2("Block %s is in a deallocating state "
-				       "during a boot.  Doing nothing until "
-				       "free state.",
-				       bg_record->bg_block_id);
-				break;
-#ifndef HAVE_BGL
-			case RM_PARTITION_REBOOTING:
-				debug2("Block %s is rebooting.",
-				       bg_record->bg_block_id);
-				break;
-#endif
-			default:
-				debug("Hey the state of block "
-				      "%s is %d(%s) doing nothing.",
-				      bg_record->bg_block_id,
-				      bg_record->state,
-				      bg_block_state_string(bg_record->state));
-				break;
-			}
-		}
-	next_block:
-		if ((rc = bridge_free_block(block_ptr))
-		    != STATUS_OK) {
-			error("bridge_free_block(): %s",
-			      bg_err_str(rc));
-		}
-	}
-	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&block_state_mutex);
-
-	/* kill all the jobs from unexpectedly freed blocks */
-	while ((freeit = list_pop(kill_job_list))) {
-		debug2("Trying to requeue job %u", freeit->jobid);
-		bg_requeue_job(freeit->jobid, 0);
-		_destroy_kill_struct(freeit);
-	}
-
-#endif
-	return updated;
-}
-
-/* This needs to have block_state_mutex locked before hand. */
-extern int update_block_list_state(List block_list)
-{
-	int updated = 0;
-#ifdef HAVE_BG_FILES
-	int rc;
-	rm_partition_t *block_ptr = NULL;
-	rm_partition_state_t state;
-	char *name = NULL;
-	bg_record_t *bg_record = NULL;
-	ListIterator itr = NULL;
-
-	itr = list_iterator_create(block_list);
-	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
-		if (bg_record->magic != BLOCK_MAGIC) {
-			/* block is gone */
-			list_remove(itr);
-			continue;
-		} else if (!bg_record->bg_block_id)
-			continue;
-
-		name = bg_record->bg_block_id;
-		if ((rc = bridge_get_block_info(name, &block_ptr))
-		    != STATUS_OK) {
-			if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-				switch(rc) {
-				case INCONSISTENT_DATA:
-					debug2("got inconsistent data when "
-					       "querying block %s", name);
-					continue;
-					break;
-				case PARTITION_NOT_FOUND:
-					debug("block %s not found, removing "
-					      "from slurm", name);
-					/* Just set to free,
-					   everything will be cleaned
-					   up outside this.
-					*/
-					bg_record->state = RM_PARTITION_FREE;
-					continue;
-					break;
-				default:
-					break;
-				}
-			}
-			/* If the call was busy, just skip this
-			   iteration.  It usually means something like
-			   rm_get_BG was called which can be a very
-			   long call */
-			if (rc == EBUSY) {
-				debug5("lock was busy, aborting");
-				break;
-			}
-
-			error("bridge_get_block_info(%s): %s",
-			      name,
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
-					  &state))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionState): %s",
-			      bg_err_str(rc));
-			updated = -1;
-			goto next_block;
-		} else if (bg_record->state != state) {
-			debug("freeing state of Block %s was %d and now is %d",
-			      bg_record->bg_block_id,
-			      bg_record->state,
-			      state);
-
-			bg_record->state = state;
-			updated = 1;
-		}
-	next_block:
-		if ((rc = bridge_free_block(block_ptr))
-		    != STATUS_OK) {
-			error("bridge_free_block(): %s",
-			      bg_err_str(rc));
-		}
-	}
-	list_iterator_destroy(itr);
-#endif
-	return updated;
-}
diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c
deleted file mode 100755
index 1a91deab5..000000000
--- a/src/plugins/select/bluegene/plugin/block_sys.c
+++ /dev/null
@@ -1,1281 +0,0 @@
-/*****************************************************************************\
- *  block_sys.c - component used for wiring up the blocks
- *
- *  $Id$
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "bluegene.h"
-#include "src/common/uid.h"
-#include <fcntl.h>
-
-/** these are used in the dynamic partitioning algorithm */
-
-/* global system = list of free blocks */
-List bg_sys_free = NULL;
-/* global system = list of allocated blocks */
-List bg_sys_allocated = NULL;
-
-/**
- * _get_bp: get the BP at location loc
- *
- * IN - bg: pointer to preinitialized bg pointer
- * IN - bp: pointer to preinitailized rm_element_t that will
- *      hold the BP that we resolve to.
- * IN - loc: location of the desired BP
- * OUT - bp: will point to BP at location loc
- * OUT - rc: error code (0 = success)
- */
-
-static void _pre_allocate(bg_record_t *bg_record);
-static int _post_allocate(bg_record_t *bg_record);
-
-#define MAX_ADD_RETRY 2
-
-#if 0
-/* Vestigial
- * print out a list
- */
-static void _print_list(List list)
-{
-	int* stuff = NULL, i = 0;
-	ListIterator itr;
-
-	if (list == NULL)
-		return;
-
-	debug("trying to get the list iterator");
-	itr = list_iterator_create(list);
-	debug("done");
-
-	debug("printing list");
-	while ((stuff = (int*) list_next(itr))) {
-		debug("stuff %d", stuff);
-		if (stuff == NULL){
-			break;
-		}
-
-		debug("[ %d", stuff[0]);
-		for (i=1; i<SYSTEM_DIMENSIONS; i++){
-			debug(" x %d", stuff[i]);
-		}
-		debug(" ]");
-	}
-	list_iterator_destroy(itr);
-}
-#endif
-
-/**
- * initialize the BG block in the resource manager
- */
-static void _pre_allocate(bg_record_t *bg_record)
-{
-#ifdef HAVE_BG_FILES
-	int rc;
-	int send_psets=bg_conf->numpsets;
-
-#ifdef HAVE_BGL
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionBlrtsImg,
-				  bg_record->blrtsimage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionBlrtsImg): %s",
-		      bg_err_str(rc));
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionLinuxImg,
-				  bg_record->linuximage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionLinuxImg): %s",
-		      bg_err_str(rc));
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionRamdiskImg,
-				  bg_record->ramdiskimage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionRamdiskImg): %s",
-		      bg_err_str(rc));
-#else
-	struct tm my_tm;
-	struct timeval my_tv;
-
-	if ((rc = bridge_set_data(bg_record->bg_block,
-				  RM_PartitionCnloadImg,
-				  bg_record->linuximage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionLinuxCnloadImg): %s",
-		      bg_err_str(rc));
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionIoloadImg,
-				  bg_record->ramdiskimage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionIoloadImg): %s",
-		      bg_err_str(rc));
-
-	gettimeofday(&my_tv, NULL);
-	localtime_r(&my_tv.tv_sec, &my_tm);
-	bg_record->bg_block_id = xstrdup_printf(
-		"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
-		my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
-		my_tm.tm_hour, my_tm.tm_min, my_tm.tm_sec, my_tv.tv_usec/1000);
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionID,
-				  bg_record->bg_block_id)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionID): %s", bg_err_str(rc));
-#endif
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionMloaderImg,
-				  bg_record->mloaderimage)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionMloaderImg): %s",
-		      bg_err_str(rc));
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionConnection,
-				  &bg_record->conn_type)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionConnection): %s",
-		      bg_err_str(rc));
-
-	/* rc = bg_conf->bp_node_cnt/bg_record->node_cnt; */
-/* 	if (rc > 1) */
-/* 		send_psets = bg_conf->numpsets/rc; */
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionPsetsPerBP,
-				  &send_psets)) != STATUS_OK)
-		error("bridge_set_data(RM_PartitionPsetsPerBP): %s",
-		      bg_err_str(rc));
-
-	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionUserName,
-				  bg_conf->slurm_user_name))
-	    != STATUS_OK)
-		error("bridge_set_data(RM_PartitionUserName): %s",
-		      bg_err_str(rc));
-
-#endif
-}
-
-/**
- * add the block record to the DB
- */
-static int _post_allocate(bg_record_t *bg_record)
-{
-	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG_FILES
-	int i;
-	pm_partition_id_t block_id;
-	uid_t my_uid;
-
-	/* Add partition record to the DB */
-	debug2("adding block");
-
-	for(i=0;i<MAX_ADD_RETRY; i++) {
-		if ((rc = bridge_add_block(bg_record->bg_block))
-		    != STATUS_OK) {
-			error("bridge_add_block(): %s", bg_err_str(rc));
-			rc = SLURM_ERROR;
-		} else {
-			rc = SLURM_SUCCESS;
-			break;
-		}
-		sleep(3);
-	}
-	if (rc == SLURM_ERROR) {
-		info("going to free it");
-		if ((rc = bridge_free_block(bg_record->bg_block))
-		    != STATUS_OK)
-			error("bridge_free_block(): %s", bg_err_str(rc));
-		fatal("couldn't add last block.");
-	}
-	debug2("done adding");
-
-	/* Get back the new block id */
-	if ((rc = bridge_get_data(bg_record->bg_block, RM_PartitionID,
-				  &block_id))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_PartitionID): %s", bg_err_str(rc));
-		bg_record->bg_block_id = xstrdup("UNKNOWN");
-	} else {
-		if (!block_id) {
-			error("No Block ID was returned from database");
-			return SLURM_ERROR;
-		}
-		bg_record->bg_block_id = xstrdup(block_id);
-
-		free(block_id);
-
-		xfree(bg_record->target_name);
-
-
-		bg_record->target_name =
-			xstrdup(bg_conf->slurm_user_name);
-
-		xfree(bg_record->user_name);
-		bg_record->user_name =
-			xstrdup(bg_conf->slurm_user_name);
-
-		if (uid_from_string (bg_record->user_name, &my_uid) < 0)
-			error("uid_from_string(%s): %m", bg_record->user_name);
-		else
-			bg_record->user_uid = my_uid;
-	}
-	/* We are done with the block */
-	if ((rc = bridge_free_block(bg_record->bg_block)) != STATUS_OK)
-		error("bridge_free_block(): %s", bg_err_str(rc));
-#else
-	/* We are just looking for a real number here no need for a
-	   base conversion
-	*/
-	static int block_inx = 0;
-	int i=0, temp = 0;
-	if (bg_record->bg_block_id) {
-		while (bg_record->bg_block_id[i]
-		       && (bg_record->bg_block_id[i] > '9'
-			   || bg_record->bg_block_id[i] < '0'))
-			i++;
-		if (bg_record->bg_block_id[i]) {
-			temp = atoi(bg_record->bg_block_id+i)+1;
-			if (temp > block_inx)
-				block_inx = temp;
-			debug4("first new block inx will now be %d", block_inx);
-		}
-	} else {
-		bg_record->bg_block_id = xmalloc(8);
-		snprintf(bg_record->bg_block_id, 8,
-			 "RMP%d", block_inx++);
-	}
-#endif
-
-	return rc;
-}
-
-#ifdef HAVE_BG_FILES
-
-static int _set_ionodes(bg_record_t *bg_record, int io_start, int io_nodes)
-{
-	char bitstring[BITSIZE];
-
-	if (!bg_record)
-		return SLURM_ERROR;
-
-	bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
-	/* Set the correct ionodes being used in this block */
-	bit_nset(bg_record->ionode_bitmap, io_start, io_start+io_nodes);
-	bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap);
-	bg_record->ionodes = xstrdup(bitstring);
-	return SLURM_SUCCESS;
-}
-
-
-#ifdef HAVE_BGL
-extern int find_nodecard_num(rm_partition_t *block_ptr, rm_nodecard_t *ncard,
-			     int *nc_id)
-{
-	char *my_card_name = NULL;
-	char *card_name = NULL;
-	rm_bp_id_t bp_id = NULL;
-	int num = 0;
-	int i=0;
-	int rc;
-	rm_nodecard_list_t *ncard_list = NULL;
-	rm_BP_t *curr_bp = NULL;
-	rm_nodecard_t *ncard2;
-
-	xassert(block_ptr);
-	xassert(nc_id);
-
-	if ((rc = bridge_get_data(ncard,
-				  RM_NodeCardID,
-				  &my_card_name))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_NodeCardID): %s",
-		      bg_err_str(rc));
-	}
-
-	if ((rc = bridge_get_data(block_ptr,
-				  RM_PartitionFirstBP,
-				  &curr_bp))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_PartitionFirstBP): %s",
-		      bg_err_str(rc));
-	}
-	if ((rc = bridge_get_data(curr_bp, RM_BPID, &bp_id))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_BPID): %d", rc);
-		return SLURM_ERROR;
-	}
-
-	if ((rc = bridge_get_nodecards(bp_id, &ncard_list))
-	    != STATUS_OK) {
-		error("bridge_get_nodecards(%s): %d",
-		      bp_id, rc);
-		free(bp_id);
-		return SLURM_ERROR;
-	}
-	free(bp_id);
-	if ((rc = bridge_get_data(ncard_list, RM_NodeCardListSize, &num))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_NodeCardListSize): %s",
-		      bg_err_str(rc));
-		return SLURM_ERROR;
-	}
-
-	for(i=0; i<num; i++) {
-		if (i) {
-			if ((rc =
-			     bridge_get_data(ncard_list,
-					     RM_NodeCardListNext,
-					     &ncard2)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_NodeCardListNext): %s",
-				      bg_err_str(rc));
-				rc = SLURM_ERROR;
-				goto cleanup;
-			}
-		} else {
-			if ((rc = bridge_get_data(ncard_list,
-						  RM_NodeCardListFirst,
-						  &ncard2)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_NodeCardListFirst: %s",
-				      bg_err_str(rc));
-				rc = SLURM_ERROR;
-				goto cleanup;
-			}
-		}
-		if ((rc = bridge_get_data(ncard2,
-					  RM_NodeCardID,
-					  &card_name)) != STATUS_OK) {
-			error("bridge_get_data(RM_NodeCardID: %s",
-			      bg_err_str(rc));
-			rc = SLURM_ERROR;
-			goto cleanup;
-		}
-		if (strcmp(my_card_name, card_name)) {
-			free(card_name);
-			continue;
-		}
-		free(card_name);
-		(*nc_id) = i;
-		break;
-	}
-cleanup:
-	free(my_card_name);
-	return SLURM_SUCCESS;
-}
-#endif
-#endif
-
-extern int configure_block(bg_record_t *bg_record)
-{
-	/* new block to be added */
-#ifdef HAVE_BG_FILES
-	bridge_new_block(&bg_record->bg_block);
-#endif
-	_pre_allocate(bg_record);
-
-	if (bg_record->cpu_cnt < bg_conf->cpus_per_bp)
-		configure_small_block(bg_record);
-	else
-		configure_block_switches(bg_record);
-
-	_post_allocate(bg_record);
-	return 1;
-}
-
-#ifdef HAVE_BG_FILES
-/*
- * Download from MMCS the initial BG block information
- */
-int read_bg_blocks(List curr_block_list)
-{
-	int rc = SLURM_SUCCESS;
-
-	int bp_cnt, i, nc_cnt, io_cnt;
-	rm_element_t *bp_ptr = NULL;
-	rm_bp_id_t bpid;
-	rm_partition_t *block_ptr = NULL;
-	char node_name_tmp[255], *user_name = NULL;
-	bg_record_t *bg_record = NULL;
-	uid_t my_uid;
-
-	uint16_t *coord = NULL;
-	int block_number, block_count;
-	char *tmp_char = NULL;
-
-	rm_partition_list_t *block_list = NULL;
-	rm_partition_state_flag_t state = PARTITION_ALL_FLAG;
-	rm_nodecard_t *ncard = NULL;
-	int nc_id, io_start;
-
-	bool small = false;
-	hostlist_t hostlist;		/* expanded form of hosts */
-
-	set_bp_map();
-
-	if (bg_recover) {
-		if ((rc = bridge_get_blocks(state, &block_list))
-		    != STATUS_OK) {
-			error("2 rm_get_blocks(): %s", bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-	} else {
-		if ((rc = bridge_get_blocks_info(state, &block_list))
-		    != STATUS_OK) {
-			error("2 rm_get_blocks_info(): %s", bg_err_str(rc));
-			return SLURM_ERROR;
-		}
-	}
-
-	if ((rc = bridge_get_data(block_list, RM_PartListSize, &block_count))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_PartListSize): %s", bg_err_str(rc));
-		block_count = 0;
-	}
-
-	info("querying the system for existing blocks");
-	for(block_number=0; block_number<block_count; block_number++) {
-		if (block_number) {
-			if ((rc = bridge_get_data(block_list,
-						  RM_PartListNextPart,
-						  &block_ptr)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_PartListNextPart): %s",
-				      bg_err_str(rc));
-				break;
-			}
-		} else {
-			if ((rc = bridge_get_data(block_list,
-						  RM_PartListFirstPart,
-						  &block_ptr)) != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_PartListFirstPart): %s",
-				      bg_err_str(rc));
-				break;
-			}
-		}
-
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionID,
-					  &tmp_char))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionID): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if (!tmp_char) {
-			error("No Block ID was returned from database");
-			continue;
-		}
-
-		if (strncmp("RMP", tmp_char, 3)) {
-			free(tmp_char);
-			continue;
-		}
-
-		/* New BG Block record */
-
-		bg_record = xmalloc(sizeof(bg_record_t));
-		bg_record->magic = BLOCK_MAGIC;
-		list_push(curr_block_list, bg_record);
-
-		bg_record->bg_block_id = xstrdup(tmp_char);
-		free(tmp_char);
-
-		bg_record->state = NO_VAL;
-#ifndef HAVE_BGL
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionSize,
-					  &bp_cnt))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionSize): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if (bp_cnt==0)
-			continue;
-
-		bg_record->node_cnt = bp_cnt;
-		bg_record->cpu_cnt = bg_conf->cpu_ratio * bg_record->node_cnt;
-#endif
-		bg_record->job_running = NO_JOB_RUNNING;
-
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionBPNum,
-					  &bp_cnt))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_BPNum): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if (bp_cnt==0)
-			continue;
-		bg_record->bp_count = bp_cnt;
-
-		debug3("has %d BPs", bg_record->bp_count);
-
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionSwitchNum,
-					  &bg_record->switch_count))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionSwitchNum): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionSmall,
-					  &small))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionSmall): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-
-		if (small) {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionOptions,
-						  &tmp_char))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_PartitionOptions): "
-				      "%s", bg_err_str(rc));
-				continue;
-			} else if (tmp_char) {
-				switch(tmp_char[0]) {
-				case 's':
-					bg_record->conn_type = SELECT_HTC_S;
-					break;
-				case 'd':
-					bg_record->conn_type = SELECT_HTC_D;
-					break;
-				case 'v':
-					bg_record->conn_type = SELECT_HTC_V;
-					break;
-				case 'l':
-					bg_record->conn_type = SELECT_HTC_L;
-					break;
-				default:
-					bg_record->conn_type = SELECT_SMALL;
-					break;
-				}
-
-				free(tmp_char);
-			} else
-				bg_record->conn_type = SELECT_SMALL;
-
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionFirstNodeCard,
-						  &ncard))
-			    != STATUS_OK) {
-				error("bridge_get_data("
-				      "RM_PartitionFirstNodeCard): %s",
-				      bg_err_str(rc));
-				continue;
-			}
-
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionNodeCardNum,
-						  &nc_cnt))
-			    != STATUS_OK) {
-				error("bridge_get_data("
-				      "RM_PartitionNodeCardNum): %s",
-				      bg_err_str(rc));
-				continue;
-			}
-#ifdef HAVE_BGL
-			/* Translate nodecard count to ionode count */
-			if ((io_cnt = nc_cnt * bg_conf->io_ratio))
-				io_cnt--;
-
-			nc_id = 0;
-			if (nc_cnt == 1)
-				find_nodecard_num(block_ptr, ncard, &nc_id);
-
-			bg_record->node_cnt =
-				nc_cnt * bg_conf->nodecard_node_cnt;
-			bg_record->cpu_cnt =
-				bg_conf->cpu_ratio * bg_record->node_cnt;
-
-			if ((rc = bridge_get_data(ncard,
-						  RM_NodeCardQuarter,
-						  &io_start)) != STATUS_OK) {
-				error("bridge_get_data(CardQuarter): %d",rc);
-				continue;
-			}
-			io_start *= bg_conf->quarter_ionode_cnt;
-			io_start += bg_conf->nodecard_ionode_cnt * (nc_id%4);
-#else
-			/* Translate nodecard count to ionode count */
-			if ((io_cnt = nc_cnt * bg_conf->io_ratio))
-				io_cnt--;
-
-			if ((rc = bridge_get_data(ncard,
-						  RM_NodeCardID,
-						  &tmp_char)) != STATUS_OK) {
-				error("bridge_get_data(RM_NodeCardID): %d",rc);
-				continue;
-			}
-
-			if (!tmp_char)
-				continue;
-
-			/* From the first nodecard id we can figure
-			   out where to start from with the alloc of ionodes.
-			*/
-			nc_id = atoi((char*)tmp_char+1);
-			free(tmp_char);
-			io_start = nc_id * bg_conf->io_ratio;
-			if (bg_record->node_cnt < bg_conf->nodecard_node_cnt) {
-				rm_ionode_t *ionode;
-
-				/* figure out the ionode we are using */
-				if ((rc = bridge_get_data(
-					     ncard,
-					     RM_NodeCardFirstIONode,
-					     &ionode)) != STATUS_OK) {
-					error("bridge_get_data("
-					      "RM_NodeCardFirstIONode): %d",
-					      rc);
-					continue;
-				}
-				if ((rc = bridge_get_data(ionode,
-							  RM_IONodeID,
-							  &tmp_char))
-				    != STATUS_OK) {
-					error("bridge_get_data("
-					      "RM_NodeCardIONodeNum): %s",
-					      bg_err_str(rc));
-					rc = SLURM_ERROR;
-					continue;
-				}
-
-				if (!tmp_char)
-					continue;
-				/* just add the ionode num to the
-				 * io_start */
-				io_start += atoi((char*)tmp_char+1);
-				free(tmp_char);
-				/* make sure i is 0 since we are only using
-				 * 1 ionode */
-				io_cnt = 0;
-			}
-#endif
-			if (_set_ionodes(bg_record, io_start, io_cnt)
-			    == SLURM_ERROR)
-				error("couldn't create ionode_bitmap "
-				      "for ionodes %d to %d",
-				      io_start, io_start+io_cnt);
-			debug3("%s uses ionodes %s",
-			       bg_record->bg_block_id,
-			       bg_record->ionodes);
-		} else {
-#ifdef HAVE_BGL
-			bg_record->cpu_cnt = bg_conf->cpus_per_bp
-				* bg_record->bp_count;
-			bg_record->node_cnt =  bg_conf->bp_node_cnt
-				* bg_record->bp_count;
-#endif
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionConnection,
-						  &bg_record->conn_type))
-			    != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_PartitionConnection): %s",
-				      bg_err_str(rc));
-				continue;
-			}
-			/* Set the bitmap blank here if it is a full
-			   node we don't want anything set we also
-			   don't want the bg_record->ionodes set.
-			*/
-			bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
-		}
-
-		bg_record->bg_block_list = get_and_set_block_wiring(
-			bg_record->bg_block_id, block_ptr);
-		if (!bg_record->bg_block_list)
-			fatal("couldn't get the wiring info for block %s",
-			      bg_record->bg_block_id);
-
-		hostlist = hostlist_create(NULL);
-
-		for (i=0; i<bp_cnt; i++) {
-			if (i) {
-				if ((rc = bridge_get_data(block_ptr,
-							  RM_PartitionNextBP,
-							  &bp_ptr))
-				    != STATUS_OK) {
-					error("bridge_get_data(RM_NextBP): %s",
-					      bg_err_str(rc));
-					rc = SLURM_ERROR;
-					break;
-				}
-			} else {
-				if ((rc = bridge_get_data(block_ptr,
-							  RM_PartitionFirstBP,
-							  &bp_ptr))
-				    != STATUS_OK) {
-					error("bridge_get_data"
-					      "(RM_FirstBP): %s",
-					      bg_err_str(rc));
-					rc = SLURM_ERROR;
-					break;
-				}
-			}
-			if ((rc = bridge_get_data(bp_ptr, RM_BPID, &bpid))
-			    != STATUS_OK) {
-				error("bridge_get_data(RM_BPID): %s",
-				      bg_err_str(rc));
-				rc = SLURM_ERROR;
-				break;
-			}
-
-			if (!bpid) {
-				error("No BP ID was returned from database");
-				continue;
-			}
-
-			coord = find_bp_loc(bpid);
-
-			if (!coord) {
-				fatal("Could not find coordinates for "
-				      "BP ID %s", (char *) bpid);
-			}
-			free(bpid);
-
-
-			snprintf(node_name_tmp,
-				 sizeof(node_name_tmp),
-				 "%s%c%c%c",
-				 bg_conf->slurm_node_prefix,
-				 alpha_num[coord[X]], alpha_num[coord[Y]],
-				 alpha_num[coord[Z]]);
-
-
-			hostlist_push(hostlist, node_name_tmp);
-		}
-		bg_record->nodes = hostlist_ranged_string_xmalloc(hostlist);
-		hostlist_destroy(hostlist);
-		debug3("got nodes of %s", bg_record->nodes);
-		// need to get the 000x000 range for nodes
-		// also need to get coords
-
-#ifdef HAVE_BGL
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionMode,
-					  &bg_record->node_use))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionMode): %s",
-			      bg_err_str(rc));
-		}
-#endif
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionState,
-					  &bg_record->state)) != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionState): %s",
-			      bg_err_str(rc));
-			continue;
-		} else if (bg_record->state == RM_PARTITION_CONFIGURING)
-			bg_record->boot_state = 1;
-
-		debug3("Block %s is in state %d",
-		       bg_record->bg_block_id,
-		       bg_record->state);
-
-		process_nodes(bg_record, false);
-
-		/* We can stop processing information now since we
-		   don't need to rest of the information to decide if
-		   this is the correct block. */
-		if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-			bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t));
-			copy_bg_record(bg_record, tmp_record);
-			list_push(bg_lists->main, tmp_record);
-		}
-
-		if ((rc = bridge_get_data(block_ptr, RM_PartitionUsersNum,
-					  &bp_cnt)) != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionUsersNum): %s",
-			      bg_err_str(rc));
-			continue;
-		} else {
-			if (bp_cnt==0) {
-
-				bg_record->user_name =
-					xstrdup(bg_conf->slurm_user_name);
-				bg_record->target_name =
-					xstrdup(bg_conf->slurm_user_name);
-
-			} else {
-				user_name = NULL;
-				if ((rc = bridge_get_data(
-					     block_ptr,
-					     RM_PartitionFirstUser,
-					     &user_name))
-				    != STATUS_OK) {
-					error("bridge_get_data"
-					      "(RM_PartitionFirstUser): %s",
-					      bg_err_str(rc));
-					continue;
-				}
-				if (!user_name) {
-					error("No user name was "
-					      "returned from database");
-					continue;
-				}
-				bg_record->user_name = xstrdup(user_name);
-
-				if (!bg_record->boot_state)
-					bg_record->target_name =
-						xstrdup(bg_conf->
-							slurm_user_name);
-				else
-					bg_record->target_name =
-						xstrdup(user_name);
-
-				free(user_name);
-
-			}
-			if (uid_from_string (bg_record->user_name, &my_uid)<0){
-				error("uid_from_string(%s): %m",
-				      bg_record->user_name);
-			} else {
-				bg_record->user_uid = my_uid;
-			}
-		}
-
-#ifdef HAVE_BGL
-		/* get the images of the block */
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionBlrtsImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionBlrtsImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No BlrtsImg was returned from database");
-			continue;
-		}
-		bg_record->blrtsimage = xstrdup(user_name);
-
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionLinuxImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionLinuxImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No LinuxImg was returned from database");
-			continue;
-		}
-		bg_record->linuximage = xstrdup(user_name);
-
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionRamdiskImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionRamdiskImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No RamdiskImg was returned from database");
-			continue;
-		}
-		bg_record->ramdiskimage = xstrdup(user_name);
-
-#else
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionCnloadImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionCnloadImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No CnloadImg was returned from database");
-			continue;
-		}
-		bg_record->linuximage = xstrdup(user_name);
-
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionIoloadImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionIoloadImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No IoloadImg was returned from database");
-			continue;
-		}
-		bg_record->ramdiskimage = xstrdup(user_name);
-
-#endif
-		if ((rc = bridge_get_data(block_ptr,
-					  RM_PartitionMloaderImg,
-					  &user_name))
-		    != STATUS_OK) {
-			error("bridge_get_data(RM_PartitionMloaderImg): %s",
-			      bg_err_str(rc));
-			continue;
-		}
-		if (!user_name) {
-			error("No MloaderImg was returned from database");
-			continue;
-		}
-		bg_record->mloaderimage = xstrdup(user_name);
-	}
-	bridge_free_block_list(block_list);
-
-	return rc;
-}
-
-#endif
-
-extern int load_state_file(List curr_block_list, char *dir_name)
-{
-	int state_fd, i, j=0;
-	char *state_file = NULL;
-	Buf buffer = NULL;
-	char *data = NULL;
-	int data_size = 0;
-	block_info_msg_t *block_ptr = NULL;
-	bg_record_t *bg_record = NULL;
-	block_info_t *block_info = NULL;
-	bitstr_t *node_bitmap = NULL, *ionode_bitmap = NULL;
-	uint16_t geo[SYSTEM_DIMENSIONS];
-	char temp[256];
-	List results = NULL;
-	int data_allocated, data_read = 0;
-	char *ver_str = NULL;
-	uint32_t ver_str_len;
-	int blocks = 0;
-	uid_t my_uid;
-	int ionodes = 0;
-	char *name = NULL;
-	struct part_record *part_ptr = NULL;
-	char *non_usable_nodes = NULL;
-	bitstr_t *bitmap = NULL;
-	ListIterator itr = NULL;
-	uint16_t protocol_version = (uint16_t)NO_VAL;
-
-	if (!dir_name) {
-		debug2("Starting bluegene with clean slate");
-		return SLURM_SUCCESS;
-	}
-
-	xassert(curr_block_list);
-
-	state_file = xstrdup(dir_name);
-	xstrcat(state_file, "/block_state");
-	state_fd = open(state_file, O_RDONLY);
-	if (state_fd < 0) {
-		error("No block state file (%s) to recover", state_file);
-		xfree(state_file);
-		return SLURM_SUCCESS;
-	} else {
-		data_allocated = BUF_SIZE;
-		data = xmalloc(data_allocated);
-		while (1) {
-			data_read = read(state_fd, &data[data_size],
-					 BUF_SIZE);
-			if (data_read < 0) {
-				if (errno == EINTR)
-					continue;
-				else {
-					error("Read error on %s: %m",
-					      state_file);
-					break;
-				}
-			} else if (data_read == 0)	/* eof */
-				break;
-			data_size      += data_read;
-			data_allocated += data_read;
-			xrealloc(data, data_allocated);
-		}
-		close(state_fd);
-	}
-	xfree(state_file);
-
-	buffer = create_buf(data, data_size);
-	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
-	debug3("Version string in block_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, BLOCK_STATE_VERSION)) {
-			protocol_version = SLURM_PROTOCOL_VERSION;
-		} else if (!strcmp(ver_str, BLOCK_2_1_STATE_VERSION)) {
-			protocol_version = SLURM_2_1_PROTOCOL_VERSION;
-		}
-	}
-
-	if (protocol_version == (uint16_t)NO_VAL) {
-		error("***********************************************");
-		error("Can not recover block state, "
-		      "data version incompatible");
-		error("***********************************************");
-		xfree(ver_str);
-		free_buf(buffer);
-		return EFAULT;
-	}
-	xfree(ver_str);
-	if (slurm_unpack_block_info_msg(&block_ptr, buffer, protocol_version)
-	    == SLURM_ERROR) {
-		error("select_p_state_restore: problem unpacking block_info");
-		goto unpack_error;
-	}
-
-#ifdef HAVE_BG_FILES
-	for (i=0; i<block_ptr->record_count; i++) {
-		block_info = &(block_ptr->block_array[i]);
-
-		/* we only care about the states we need here
-		 * everthing else should have been set up already */
-		if (block_info->state == RM_PARTITION_ERROR) {
-			slurm_mutex_lock(&block_state_mutex);
-			if ((bg_record = find_bg_record_in_list(
-				     curr_block_list,
-				     block_info->bg_block_id)))
-				/* put_block_in_error_state should be
-				   called after the bg_lists->main has been
-				   made.  We can't call it here since
-				   this record isn't the record kept
-				   around in bg_lists->main.
-				*/
-				bg_record->state = block_info->state;
-			slurm_mutex_unlock(&block_state_mutex);
-		}
-	}
-
-	slurm_free_block_info_msg(block_ptr);
-	free_buf(buffer);
-	return SLURM_SUCCESS;
-#endif
-
-	slurm_mutex_lock(&block_state_mutex);
-	reset_ba_system(true);
-
-	/* Locks are already in place to protect part_list here */
-	bitmap = bit_alloc(node_record_count);
-	itr = list_iterator_create(part_list);
-	while ((part_ptr = list_next(itr))) {
-		/* we only want to use bps that are in partitions */
-		if (!part_ptr->node_bitmap) {
-			debug4("Partition %s doesn't have any nodes in it.",
-			       part_ptr->name);
-			continue;
-		}
-		bit_or(bitmap, part_ptr->node_bitmap);
-	}
-	list_iterator_destroy(itr);
-
-	bit_not(bitmap);
-	if (bit_ffs(bitmap) != -1) {
-		fatal("We don't have any nodes in any partitions.  "
-		      "Can't create blocks.  "
-		      "Please check your slurm.conf.");
-	}
-
-	non_usable_nodes = bitmap2node_name(bitmap);
-	FREE_NULL_BITMAP(bitmap);
-
-	node_bitmap = bit_alloc(node_record_count);
-	ionode_bitmap = bit_alloc(bg_conf->numpsets);
-	for (i=0; i<block_ptr->record_count; i++) {
-		block_info = &(block_ptr->block_array[i]);
-
-		bit_nclear(node_bitmap, 0, bit_size(node_bitmap) - 1);
-		bit_nclear(ionode_bitmap, 0, bit_size(ionode_bitmap) - 1);
-
-		j = 0;
-		while (block_info->bp_inx[j] >= 0) {
-			if (block_info->bp_inx[j+1]
-			    >= node_record_count) {
-				fatal("Job state recovered incompatible with "
-				      "bluegene.conf. bp=%u state=%d",
-				      node_record_count,
-				      block_info->bp_inx[j+1]);
-			}
-			bit_nset(node_bitmap,
-				 block_info->bp_inx[j],
-				 block_info->bp_inx[j+1]);
-			j += 2;
-		}
-
-		j = 0;
-		while (block_info->ionode_inx[j] >= 0) {
-			if (block_info->ionode_inx[j+1]
-			    >= bg_conf->numpsets) {
-				fatal("Job state recovered incompatible with "
-				      "bluegene.conf. ionodes=%u state=%d",
-				      bg_conf->numpsets,
-				      block_info->ionode_inx[j+1]);
-			}
-			bit_nset(ionode_bitmap,
-				 block_info->ionode_inx[j],
-				 block_info->ionode_inx[j+1]);
-			j += 2;
-		}
-
-		bg_record = xmalloc(sizeof(bg_record_t));
-		bg_record->magic = BLOCK_MAGIC;
-		bg_record->bg_block_id =
-			xstrdup(block_info->bg_block_id);
-		bg_record->nodes =
-			xstrdup(block_info->nodes);
-		bg_record->ionodes =
-			xstrdup(block_info->ionodes);
-		bg_record->ionode_bitmap = bit_copy(ionode_bitmap);
-		/* put_block_in_error_state should be
-		   called after the bg_lists->main has been
-		   made.  We can't call it here since
-		   this record isn't the record kept
-		   around in bg_lists->main.
-		*/
-		bg_record->state = block_info->state;
-		bg_record->job_running = NO_JOB_RUNNING;
-
-		bg_record->bp_count = bit_set_count(node_bitmap);
-		bg_record->node_cnt = block_info->node_cnt;
-		if (bg_conf->bp_node_cnt > bg_record->node_cnt) {
-			ionodes = bg_conf->bp_node_cnt
-				/ bg_record->node_cnt;
-			bg_record->cpu_cnt = bg_conf->cpus_per_bp / ionodes;
-		} else {
-			bg_record->cpu_cnt = bg_conf->cpus_per_bp
-				* bg_record->bp_count;
-		}
-#ifdef HAVE_BGL
-		bg_record->node_use = block_info->node_use;
-#endif
-		bg_record->conn_type = block_info->conn_type;
-
-		process_nodes(bg_record, true);
-
-		bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
-		bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
-
-		if (uid_from_string (bg_record->user_name, &my_uid) < 0) {
-			error("uid_from_strin(%s): %m",
-			      bg_record->user_name);
-		} else {
-			bg_record->user_uid = my_uid;
-		}
-
-#ifdef HAVE_BGL
-		bg_record->blrtsimage =
-			xstrdup(block_info->blrtsimage);
-#endif
-		bg_record->linuximage =
-			xstrdup(block_info->linuximage);
-		bg_record->mloaderimage =
-			xstrdup(block_info->mloaderimage);
-		bg_record->ramdiskimage =
-			xstrdup(block_info->ramdiskimage);
-
-		for(j=0; j<SYSTEM_DIMENSIONS; j++)
-			geo[j] = bg_record->geo[j];
-
-		if ((bg_conf->layout_mode == LAYOUT_OVERLAP)
-		    || bg_record->full_block) {
-			reset_ba_system(false);
-		}
-
-		removable_set_bps(non_usable_nodes);
-		/* we want the bps that aren't
-		 * in this record to mark them as used
-		 */
-		if (set_all_bps_except(bg_record->nodes)
-		    != SLURM_SUCCESS)
-			fatal("something happened in "
-			      "the load of %s.  "
-			      "Did you use smap to "
-			      "make the "
-			      "bluegene.conf file?",
-			      bg_record->bg_block_id);
-		results = list_create(NULL);
-		name = set_bg_block(results,
-				    bg_record->start,
-				    geo,
-				    bg_record->conn_type);
-		reset_all_removed_bps();
-
-		if (!name) {
-			error("I was unable to "
-			      "make the "
-			      "requested block.");
-			list_destroy(results);
-			destroy_bg_record(bg_record);
-			continue;
-		}
-
-
-		snprintf(temp, sizeof(temp), "%s%s",
-			 bg_conf->slurm_node_prefix,
-			 name);
-
-		xfree(name);
-		if (strcmp(temp, bg_record->nodes)) {
-			fatal("bad wiring in preserved state "
-			      "(found %s, but allocated %s) "
-			      "YOU MUST COLDSTART",
-			      bg_record->nodes, temp);
-		}
-		if (bg_record->bg_block_list)
-			list_destroy(bg_record->bg_block_list);
-		bg_record->bg_block_list =
-			list_create(destroy_ba_node);
-		copy_node_path(results, &bg_record->bg_block_list);
-		list_destroy(results);
-
-		configure_block(bg_record);
-		blocks++;
-		list_push(curr_block_list, bg_record);
-		if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-			bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t));
-			copy_bg_record(bg_record, tmp_record);
-			list_push(bg_lists->main, tmp_record);
-		}
-	}
-
-	xfree(non_usable_nodes);
-	FREE_NULL_BITMAP(ionode_bitmap);
-	FREE_NULL_BITMAP(node_bitmap);
-
-	sort_bg_record_inc_size(curr_block_list);
-	slurm_mutex_unlock(&block_state_mutex);
-
-	info("Recovered %d blocks", blocks);
-	slurm_free_block_info_msg(block_ptr);
-	free_buf(buffer);
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-	error("Incomplete block data checkpoint file");
-	free_buf(buffer);
-	return SLURM_FAILURE;
-}
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
deleted file mode 100644
index 7dbd8100a..000000000
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ /dev/null
@@ -1,1719 +0,0 @@
-/*****************************************************************************\
- *  bluegene.c - blue gene node configuration processing module.
- *
- *  $Id$
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <auble1@llnl.gov> et. al.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "bluegene.h"
-#include "defined_block.h"
-#include "src/slurmctld/locks.h"
-
-#define MMCS_POLL_TIME 30	/* seconds between poll of MMCS for
-				 * down switches and nodes */
-#define BG_POLL_TIME 1	        /* seconds between poll of state
-				 * change in bg blocks */
-#define MAX_FREE_RETRIES           200 /* max number of
-					* FREE_SLEEP_INTERVALS to wait
-					* before putting a
-					* deallocating block into
-					* error state.
-					*/
-#define FREE_SLEEP_INTERVAL        3 /* When freeing a block wait this
-				      * long before looking at state
-				      * again.
-				      */
-
-#define _DEBUG 0
-
-typedef struct {
-	List track_list;
-	uint32_t job_id;
-	bool destroy;
-} bg_free_block_list_t;
-
-/* Global variables */
-
-bg_config_t *bg_conf = NULL;
-bg_lists_t *bg_lists = NULL;
-bool agent_fini = false;
-time_t last_bg_update;
-pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
-int blocks_are_created = 0;
-int num_unused_cpus = 0;
-
-static void _destroy_bg_config(bg_config_t *bg_conf);
-static void _destroy_bg_lists(bg_lists_t *bg_lists);
-
-static void _set_bg_lists();
-static int  _validate_config_nodes(List curr_block_list,
-				   List found_block_list, char *dir);
-static int _delete_old_blocks(List curr_block_list,
-			      List found_block_list);
-static int _post_block_free(bg_record_t *bg_record, bool restore);
-static void *_track_freeing_blocks(void *args);
-static char *_get_bg_conf(void);
-static int  _reopen_bridge_log(void);
-static void _destroy_bitmap(void *object);
-
-
-/* Initialize all plugin variables */
-extern int init_bg(void)
-{
-	_set_bg_lists();
-
-	if (!bg_conf)
-		bg_conf = xmalloc(sizeof(bg_config_t));
-
-	xfree(bg_conf->slurm_user_name);
-	xfree(bg_conf->slurm_node_prefix);
-	slurm_conf_lock();
-	xassert(slurmctld_conf.slurm_user_name);
-	xassert(slurmctld_conf.node_prefix);
-	bg_conf->slurm_user_name = xstrdup(slurmctld_conf.slurm_user_name);
-	bg_conf->slurm_node_prefix = xstrdup(slurmctld_conf.node_prefix);
-	bg_conf->slurm_debug_flags = slurmctld_conf.debug_flags;
-	slurm_conf_unlock();
-
-#ifdef HAVE_BGL
-	if (bg_conf->blrts_list)
-		list_destroy(bg_conf->blrts_list);
-	bg_conf->blrts_list = list_create(destroy_image);
-#endif
-	if (bg_conf->linux_list)
-		list_destroy(bg_conf->linux_list);
-	bg_conf->linux_list = list_create(destroy_image);
-	if (bg_conf->mloader_list)
-		list_destroy(bg_conf->mloader_list);
-	bg_conf->mloader_list = list_create(destroy_image);
-	if (bg_conf->ramdisk_list)
-		list_destroy(bg_conf->ramdisk_list);
-	bg_conf->ramdisk_list = list_create(destroy_image);
-
-	ba_init(NULL, 1);
-
-	verbose("BlueGene plugin loaded successfully");
-
-	return SLURM_SUCCESS;
-}
-
-/* Purge all plugin variables */
-extern void fini_bg(void)
-{
-	if (!agent_fini) {
-		error("The agent hasn't been finied yet!");
-		agent_fini = true;
-	}
-
-	_destroy_bg_config(bg_conf);
-	_destroy_bg_lists(bg_lists);
-
-	ba_fini();
-}
-
-/*
- * block_state_mutex should be locked before calling this function
- */
-extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
-{
-	if ((rec_a->bp_count > 1) && (rec_b->bp_count > 1)) {
-		/* Test for conflicting passthroughs */
-		reset_ba_system(false);
-		check_and_set_node_list(rec_a->bg_block_list);
-		if (check_and_set_node_list(rec_b->bg_block_list)
-		    == SLURM_ERROR)
-			return true;
-	}
-
-	if (rec_a->bitmap && rec_b->bitmap
-	    && !bit_overlap(rec_a->bitmap, rec_b->bitmap))
-		return false;
-
-	if ((rec_a->node_cnt >= bg_conf->bp_node_cnt)
-	    || (rec_b->node_cnt >= bg_conf->bp_node_cnt))
-		return true;
-
-	if (rec_a->ionode_bitmap && rec_b->ionode_bitmap
-	    && !bit_overlap(rec_a->ionode_bitmap, rec_b->ionode_bitmap))
-		return false;
-
-	return true;
-}
-
-/* block_state_mutex must be unlocked before calling this. */
-extern void bg_requeue_job(uint32_t job_id, bool wait_for_start)
-{
-	int rc;
-	slurmctld_lock_t job_write_lock = {
-		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-
-	/* Wait for the slurmd to begin the batch script, slurm_fail_job()
-	   is a no-op if issued prior to the script initiation do
-	   clean up just incase the fail job isn't ran. */
-	if (wait_for_start)
-		sleep(2);
-
-	lock_slurmctld(job_write_lock);
-	if ((rc = job_requeue(0, job_id, -1, (uint16_t)NO_VAL))) {
-		error("Couldn't requeue job %u, failing it: %s",
-		      job_id, slurm_strerror(rc));
-		job_fail(job_id);
-	}
-	unlock_slurmctld(job_write_lock);
-}
-
-extern int remove_all_users(char *bg_block_id, char *user_name)
-{
-	int returnc = REMOVE_USER_NONE;
-#ifdef HAVE_BG_FILES
-	char *user;
-	rm_partition_t *block_ptr = NULL;
-	int rc, i, user_count;
-
-	/* We can't use bridge_get_block_info here because users are
-	   filled in there.  This function is very slow but necessary
-	   here to get the correct block count and the users. */
-	if ((rc = bridge_get_block(bg_block_id, &block_ptr)) != STATUS_OK) {
-		if (rc == INCONSISTENT_DATA
-		    && bg_conf->layout_mode == LAYOUT_DYNAMIC)
-			return REMOVE_USER_FOUND;
-
-		error("bridge_get_block(%s): %s",
-		      bg_block_id,
-		      bg_err_str(rc));
-		return REMOVE_USER_ERR;
-	}
-
-	if ((rc = bridge_get_data(block_ptr, RM_PartitionUsersNum,
-				  &user_count))
-	    != STATUS_OK) {
-		error("bridge_get_data(RM_PartitionUsersNum): %s",
-		      bg_err_str(rc));
-		returnc = REMOVE_USER_ERR;
-		user_count = 0;
-	} else
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("got %d users for %s", user_count, bg_block_id);
-	for(i=0; i<user_count; i++) {
-		if (i) {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionNextUser,
-						  &user))
-			    != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_PartitionNextUser): %s",
-				      bg_err_str(rc));
-				returnc = REMOVE_USER_ERR;
-				break;
-			}
-		} else {
-			if ((rc = bridge_get_data(block_ptr,
-						  RM_PartitionFirstUser,
-						  &user))
-			    != STATUS_OK) {
-				error("bridge_get_data"
-				      "(RM_PartitionFirstUser): %s",
-				      bg_err_str(rc));
-				returnc = REMOVE_USER_ERR;
-				break;
-			}
-		}
-		if (!user) {
-			error("No user was returned from database");
-			continue;
-		}
-		if (!strcmp(user, bg_conf->slurm_user_name)) {
-			free(user);
-			continue;
-		}
-
-		if (user_name) {
-			if (!strcmp(user, user_name)) {
-				returnc = REMOVE_USER_FOUND;
-				free(user);
-				continue;
-			}
-		}
-
-		info("Removing user %s from Block %s", user, bg_block_id);
-		if ((rc = bridge_remove_block_user(bg_block_id, user))
-		    != STATUS_OK) {
-			debug("user %s isn't on block %s",
-			      user,
-			      bg_block_id);
-		}
-		free(user);
-	}
-	if ((rc = bridge_free_block(block_ptr)) != STATUS_OK) {
-		error("bridge_free_block(): %s", bg_err_str(rc));
-	}
-#endif
-	return returnc;
-}
-
-/* if SLURM_ERROR you will need to fail the job with
-   slurm_fail_job(bg_record->job_running);
-*/
-
-extern int set_block_user(bg_record_t *bg_record)
-{
-	int rc = 0;
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("resetting the boot state flag and "
-		     "counter for block %s.",
-		     bg_record->bg_block_id);
-	bg_record->boot_state = 0;
-	bg_record->boot_count = 0;
-
-	if ((rc = update_block_user(bg_record, 1)) == 1) {
-		last_bg_update = time(NULL);
-		rc = SLURM_SUCCESS;
-	} else if (rc == -1) {
-		error("Unable to add user name to block %s. "
-		      "Cancelling job.",
-		      bg_record->bg_block_id);
-		rc = SLURM_ERROR;
-	}
-	xfree(bg_record->target_name);
-	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
-
-	return rc;
-}
-
-/**
- * sort the partitions by increasing size
- */
-extern void sort_bg_record_inc_size(List records){
-	if (records == NULL)
-		return;
-	list_sort(records, (ListCmpF) bg_record_cmpf_inc);
-	last_bg_update = time(NULL);
-}
-
-/*
- * block_agent - thread periodically updates status of
- * bluegene blocks.
- *
- */
-extern void *block_agent(void *args)
-{
-	static time_t last_bg_test;
-	int rc;
-	time_t now = time(NULL);
-
-	last_bg_test = now - BG_POLL_TIME;
-	while (!agent_fini) {
-		if (difftime(now, last_bg_test) >= BG_POLL_TIME) {
-			if (agent_fini)		/* don't bother */
-				break;	/* quit now */
-			if (blocks_are_created) {
-				last_bg_test = now;
-				if ((rc = update_block_list()) == 1)
-					last_bg_update = now;
-				else if (rc == -1)
-					error("Error with update_block_list");
-			}
-		}
-
-		sleep(1);
-		now = time(NULL);
-	}
-	return NULL;
-}
-
-/*
- * state_agent - thread periodically updates status of
- * bluegene nodes.
- *
- */
-extern void *state_agent(void *args)
-{
-	static time_t last_mmcs_test;
-	time_t now = time(NULL);
-
-	last_mmcs_test = now - MMCS_POLL_TIME;
-	while (!agent_fini) {
-		if (difftime(now, last_mmcs_test) >= MMCS_POLL_TIME) {
-			if (agent_fini)		/* don't bother */
-				break; 	/* quit now */
-			if (blocks_are_created) {
-				/* can run for a while so set the
-				 * time after the call so there is
-				 * always MMCS_POLL_TIME between
-				 * calls */
-				test_mmcs_failures();
-				last_mmcs_test = time(NULL);
-			}
-		}
-
-		sleep(1);
-		now = time(NULL);
-	}
-	return NULL;
-}
-
-/* must set the protecting mutex if any before this function is called */
-
-extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record)
-{
-	bg_record_t *found_record = NULL;
-	ListIterator itr;
-	int rc = SLURM_ERROR;
-
-	if (!bg_record)
-		return rc;
-
-	//slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(my_bg_list);
-	while ((found_record = list_next(itr))) {
-		if (found_record)
-			if (bg_record == found_record) {
-				list_remove(itr);
-				rc = SLURM_SUCCESS;
-				break;
-			}
-	}
-	list_iterator_destroy(itr);
-	//slurm_mutex_unlock(&block_state_mutex);
-
-	return rc;
-}
-
-/* This is here to remove from the orignal list when dealing with
- * copies like above all locks need to be set.  This function does not
- * free anything you must free it when you are done */
-extern bg_record_t *find_and_remove_org_from_bg_list(List my_list,
-						     bg_record_t *bg_record)
-{
-	ListIterator itr = list_iterator_create(my_list);
-	bg_record_t *found_record = NULL;
-
-	while ((found_record = (bg_record_t *) list_next(itr)) != NULL) {
-		/* check for full node bitmap compare */
-		if (bit_equal(bg_record->bitmap, found_record->bitmap)
-		    && bit_equal(bg_record->ionode_bitmap,
-				 found_record->ionode_bitmap)) {
-			if (!strcmp(bg_record->bg_block_id,
-				    found_record->bg_block_id)) {
-				list_remove(itr);
-				if (bg_conf->slurm_debug_flags
-				    & DEBUG_FLAG_SELECT_TYPE)
-					info("got the block");
-				break;
-			}
-		}
-	}
-	list_iterator_destroy(itr);
-	return found_record;
-}
-
-/* This is here to remove from the orignal list when dealing with
- * copies like above all locks need to be set */
-extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record)
-{
-	ListIterator itr = list_iterator_create(my_list);
-	bg_record_t *found_record = NULL;
-
-	while ((found_record = (bg_record_t *) list_next(itr)) != NULL) {
-		/* check for full node bitmap compare */
-		if (bit_equal(bg_record->bitmap, found_record->bitmap)
-		    && bit_equal(bg_record->ionode_bitmap,
-				 found_record->ionode_bitmap)) {
-
-			if (!strcmp(bg_record->bg_block_id,
-				    found_record->bg_block_id)) {
-				if (bg_conf->slurm_debug_flags
-				    & DEBUG_FLAG_SELECT_TYPE)
-					info("got the block");
-				break;
-			}
-		}
-	}
-	list_iterator_destroy(itr);
-	return found_record;
-}
-
-extern int bg_free_block(bg_record_t *bg_record, bool wait, bool locked)
-{
-	int rc = SLURM_SUCCESS;
-	int count = 0;
-
-	if (!bg_record) {
-		error("bg_free_block: there was no bg_record");
-		return SLURM_ERROR;
-	}
-
-	if (!locked)
-		slurm_mutex_lock(&block_state_mutex);
-
-	while (count < MAX_FREE_RETRIES) {
-		/* block was removed */
-		if (bg_record->magic != BLOCK_MAGIC) {
-			error("block was removed while freeing it here");
-			if (!locked)
-				slurm_mutex_unlock(&block_state_mutex);
-			return SLURM_SUCCESS;
-		}
-		/* Reset these here so we don't try to reboot it
-		   when the state goes to free.
-		*/
-		bg_record->boot_state = 0;
-		bg_record->boot_count = 0;
-		/* Here we don't need to check if the block is still
-		 * in exsistance since this function can't be called on
-		 * the same block twice.  It may
-		 * had already been removed at this point also.
-		 */
-#ifdef HAVE_BG_FILES
-		if (bg_record->state != NO_VAL
-		    && bg_record->state != RM_PARTITION_FREE
-		    && bg_record->state != RM_PARTITION_DEALLOCATING) {
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-				info("bridge_destroy %s",
-				     bg_record->bg_block_id);
-			rc = bridge_destroy_block(bg_record->bg_block_id);
-			if (rc != STATUS_OK) {
-				if (rc == PARTITION_NOT_FOUND) {
-					debug("block %s is not found",
-					      bg_record->bg_block_id);
-					break;
-				} else if (rc == INCOMPATIBLE_STATE) {
-#ifndef HAVE_BGL
-					/* If the state is error and
-					   we get an incompatible
-					   state back here, it means
-					   we set it ourselves so
-					   break out.
-					*/
-					if (bg_record->state
-					    == RM_PARTITION_ERROR)
-						break;
-#endif
-					if (bg_conf->slurm_debug_flags
-					    & DEBUG_FLAG_SELECT_TYPE)
-						info("bridge_destroy_partition"
-						     "(%s): %s State = %d",
-						     bg_record->bg_block_id,
-						     bg_err_str(rc),
-						     bg_record->state);
-				} else {
-					error("bridge_destroy_partition"
-					      "(%s): %s State = %d",
-					      bg_record->bg_block_id,
-					      bg_err_str(rc),
-					      bg_record->state);
-				}
-			}
-		}
-#else
-		/* Fake a free since we are n deallocating
-		   state before this.
-		*/
-		if (bg_record->state == RM_PARTITION_ERROR)
-			break;
-		else if (count >= 3)
-			bg_record->state = RM_PARTITION_FREE;
-		else if (bg_record->state != RM_PARTITION_FREE)
-			bg_record->state = RM_PARTITION_DEALLOCATING;
-#endif
-
-		if (!wait || (bg_record->state == RM_PARTITION_FREE)
-#ifdef HAVE_BGL
-		    ||  (bg_record->state == RM_PARTITION_ERROR)
-#endif
-			) {
-			break;
-		}
-		/* If we were locked outside of this we need to unlock
-		   to not cause deadlock on this mutex until we are
-		   done.
-		*/
-		slurm_mutex_unlock(&block_state_mutex);
-		sleep(FREE_SLEEP_INTERVAL);
-		count++;
-		slurm_mutex_lock(&block_state_mutex);
-	}
-
-	rc = SLURM_SUCCESS;
-	if ((bg_record->state == RM_PARTITION_FREE)
-	    || (bg_record->state == RM_PARTITION_ERROR))
-		remove_from_bg_list(bg_lists->booted, bg_record);
-	else if (count >= MAX_FREE_RETRIES) {
-		/* Something isn't right, go mark this one in an error
-		   state. */
-		update_block_msg_t block_msg;
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("bg_free_block: block %s is not in state "
-			     "free (%s), putting it in error state.",
-			     bg_record->bg_block_id,
-			     bg_block_state_string(bg_record->state));
-		slurm_init_update_block_msg(&block_msg);
-		block_msg.bg_block_id = bg_record->bg_block_id;
-		block_msg.state = RM_PARTITION_ERROR;
-		block_msg.reason = "Block would not deallocate";
-		slurm_mutex_unlock(&block_state_mutex);
-		select_p_update_block(&block_msg);
-		slurm_mutex_lock(&block_state_mutex);
-		rc = SLURM_ERROR;
-	}
-	if (!locked)
-		slurm_mutex_unlock(&block_state_mutex);
-
-	return rc;
-}
-
-/* block_state_mutex should be unlocked before calling this */
-extern int free_block_list(uint32_t job_id, List track_in_list,
-			   bool destroy, bool wait)
-{
-	bg_record_t *bg_record = NULL;
-	int retries;
-	ListIterator itr = NULL;
-	bg_free_block_list_t *bg_free_list;
-	pthread_attr_t attr_agent;
-	pthread_t thread_agent;
-
-	if (!track_in_list || !list_count(track_in_list))
-		return SLURM_SUCCESS;
-
-	bg_free_list = xmalloc(sizeof(bg_free_block_list_t));
-	bg_free_list->track_list = list_create(NULL);
-	bg_free_list->destroy = destroy;
-	bg_free_list->job_id = job_id;
-
-	slurm_mutex_lock(&block_state_mutex);
-	list_transfer(bg_free_list->track_list, track_in_list);
-	itr = list_iterator_create(bg_free_list->track_list);
-	while ((bg_record = list_next(itr))) {
-		if (bg_record->magic != BLOCK_MAGIC) {
-			error("block was already destroyed");
-			continue;
-		}
-
-		bg_record->free_cnt++;
-
-		if (bg_record->job_ptr
-		    && !IS_JOB_FINISHED(bg_record->job_ptr)) {
-			info("We are freeing a block (%s) that has job %u(%u).",
-			     bg_record->bg_block_id,
-			     bg_record->job_ptr->job_id,
-			     bg_record->job_running);
-			/* This is not thread safe if called from
-			   bg_job_place.c anywhere from within
-			   submit_job() */
-			slurm_mutex_unlock(&block_state_mutex);
-			bg_requeue_job(bg_record->job_ptr->job_id, 0);
-			slurm_mutex_lock(&block_state_mutex);
-		}
-		if (remove_from_bg_list(bg_lists->job_running, bg_record)
-		    == SLURM_SUCCESS)
-			num_unused_cpus += bg_record->cpu_cnt;
-
-		bg_free_block(bg_record, 0, 1);
-	}
-	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&block_state_mutex);
-
-	if (wait) {
-		/* Track_freeing_blocks waits until the list is done
-		   and frees the memory of bg_free_list.
-		*/
-		_track_freeing_blocks(bg_free_list);
-		return SLURM_SUCCESS;
-	}
-
-	/* _track_freeing_blocks handles cleanup */
-	slurm_attr_init(&attr_agent);
-	if (pthread_attr_setdetachstate(&attr_agent, PTHREAD_CREATE_DETACHED))
-		error("pthread_attr_setdetachstate error %m");
-	retries = 0;
-	while (pthread_create(&thread_agent, &attr_agent,
-			      _track_freeing_blocks,
-			      bg_free_list)) {
-		error("pthread_create error %m");
-		if (++retries > MAX_PTHREAD_RETRIES)
-			fatal("Can't create "
-			      "pthread");
-		/* sleep and retry */
-		usleep(1000);
-	}
-	slurm_attr_destroy(&attr_agent);
-	return SLURM_SUCCESS;
-}
-
-/*
- * Read and process the bluegene.conf configuration file so to interpret what
- * blocks are static/dynamic, torus/mesh, etc.
- */
-extern int read_bg_conf(void)
-{
-	int i;
-	int count = 0;
-	s_p_hashtbl_t *tbl = NULL;
-	char *layout = NULL;
-	blockreq_t **blockreq_array = NULL;
-	image_t **image_array = NULL;
-	image_t *image = NULL;
-	static time_t last_config_update = (time_t) 0;
-	struct stat config_stat;
-	ListIterator itr = NULL;
-	char* bg_conf_file = NULL;
-
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("Reading the bluegene.conf file");
-
-	/* check if config file has changed */
-	bg_conf_file = _get_bg_conf();
-
-	if (stat(bg_conf_file, &config_stat) < 0)
-		fatal("can't stat bluegene.conf file %s: %m", bg_conf_file);
-	if (last_config_update) {
-		_reopen_bridge_log();
-		if (last_config_update == config_stat.st_mtime) {
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-				info("%s unchanged", bg_conf_file);
-		} else {
-			info("Restart slurmctld for %s changes "
-			     "to take effect",
-			     bg_conf_file);
-		}
-		last_config_update = config_stat.st_mtime;
-		xfree(bg_conf_file);
-		return SLURM_SUCCESS;
-	}
-	last_config_update = config_stat.st_mtime;
-
-	/* initialization */
-	/* bg_conf defined in bg_node_alloc.h */
-	tbl = s_p_hashtbl_create(bg_conf_file_options);
-
-	if (s_p_parse_file(tbl, NULL, bg_conf_file) == SLURM_ERROR)
-		fatal("something wrong with opening/reading bluegene "
-		      "conf file");
-	xfree(bg_conf_file);
-
-#ifdef HAVE_BGL
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltBlrtsImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->blrts_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_blrtsimage, "BlrtsImage", tbl)) {
-		if (!list_count(bg_conf->blrts_list))
-			fatal("BlrtsImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->blrts_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_blrtsimage = xstrdup(image->name);
-		info("Warning: using %s as the default BlrtsImage.  "
-		     "If this isn't correct please set BlrtsImage",
-		     bg_conf->default_blrtsimage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default BlrtsImage %s",
-			     bg_conf->default_blrtsimage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_blrtsimage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->blrts_list, image);
-	}
-
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltLinuxImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->linux_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_linuximage, "LinuxImage", tbl)) {
-		if (!list_count(bg_conf->linux_list))
-			fatal("LinuxImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->linux_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_linuximage = xstrdup(image->name);
-		info("Warning: using %s as the default LinuxImage.  "
-		     "If this isn't correct please set LinuxImage",
-		     bg_conf->default_linuximage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default LinuxImage %s",
-			     bg_conf->default_linuximage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_linuximage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->linux_list, image);
-	}
-
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltRamDiskImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->ramdisk_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
-			    "RamDiskImage", tbl)) {
-		if (!list_count(bg_conf->ramdisk_list))
-			fatal("RamDiskImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->ramdisk_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_ramdiskimage = xstrdup(image->name);
-		info("Warning: using %s as the default RamDiskImage.  "
-		     "If this isn't correct please set RamDiskImage",
-		     bg_conf->default_ramdiskimage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default RamDiskImage %s",
-			     bg_conf->default_ramdiskimage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_ramdiskimage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->ramdisk_list, image);
-	}
-#else
-
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltCnloadImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->linux_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_linuximage, "CnloadImage", tbl)) {
-		if (!list_count(bg_conf->linux_list))
-			fatal("CnloadImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->linux_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_linuximage = xstrdup(image->name);
-		info("Warning: using %s as the default CnloadImage.  "
-		     "If this isn't correct please set CnloadImage",
-		     bg_conf->default_linuximage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default CnloadImage %s",
-			     bg_conf->default_linuximage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_linuximage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->linux_list, image);
-	}
-
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltIoloadImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->ramdisk_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
-			    "IoloadImage", tbl)) {
-		if (!list_count(bg_conf->ramdisk_list))
-			fatal("IoloadImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->ramdisk_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_ramdiskimage = xstrdup(image->name);
-		info("Warning: using %s as the default IoloadImage.  "
-		     "If this isn't correct please set IoloadImage",
-		     bg_conf->default_ramdiskimage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default IoloadImage %s",
-			     bg_conf->default_ramdiskimage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_ramdiskimage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->ramdisk_list, image);
-	}
-
-#endif
-	if (s_p_get_array((void ***)&image_array,
-			  &count, "AltMloaderImage", tbl)) {
-		for (i = 0; i < count; i++) {
-			list_append(bg_conf->mloader_list, image_array[i]);
-			image_array[i] = NULL;
-		}
-	}
-	if (!s_p_get_string(&bg_conf->default_mloaderimage,
-			    "MloaderImage", tbl)) {
-		if (!list_count(bg_conf->mloader_list))
-			fatal("MloaderImage not configured "
-			      "in bluegene.conf");
-		itr = list_iterator_create(bg_conf->mloader_list);
-		image = list_next(itr);
-		image->def = true;
-		list_iterator_destroy(itr);
-		bg_conf->default_mloaderimage = xstrdup(image->name);
-		info("Warning: using %s as the default MloaderImage.  "
-		     "If this isn't correct please set MloaderImage",
-		     bg_conf->default_mloaderimage);
-	} else {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("default MloaderImage %s",
-			     bg_conf->default_mloaderimage);
-		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(bg_conf->default_mloaderimage);
-		image->def = true;
-		image->groups = NULL;
-		/* we want it to be first */
-		list_push(bg_conf->mloader_list, image);
-	}
-
-	if (!s_p_get_uint16(
-		    &bg_conf->bp_node_cnt, "BasePartitionNodeCnt", tbl)) {
-		error("BasePartitionNodeCnt not configured in bluegene.conf "
-		      "defaulting to 512 as BasePartitionNodeCnt");
-		bg_conf->bp_node_cnt = 512;
-		bg_conf->quarter_node_cnt = 128;
-	} else {
-		if (bg_conf->bp_node_cnt <= 0)
-			fatal("You should have more than 0 nodes "
-			      "per base partition");
-
-		bg_conf->quarter_node_cnt = bg_conf->bp_node_cnt/4;
-	}
-	/* bg_conf->cpus_per_bp should had already been set from the
-	 * node_init */
-	if (bg_conf->cpus_per_bp < bg_conf->bp_node_cnt) {
-		fatal("For some reason we have only %u cpus per bp, but "
-		      "have %u cnodes per bp.  You need at least the same "
-		      "number of cpus as you have cnodes per bp.  "
-		      "Check the NodeName Procs= "
-		      "definition in the slurm.conf.",
-		      bg_conf->cpus_per_bp, bg_conf->bp_node_cnt);
-	}
-
-	bg_conf->cpu_ratio = bg_conf->cpus_per_bp/bg_conf->bp_node_cnt;
-	if (!bg_conf->cpu_ratio)
-		fatal("We appear to have less than 1 cpu on a cnode.  "
-		      "You specified %u for BasePartitionNodeCnt "
-		      "in the blugene.conf and %u cpus "
-		      "for each node in the slurm.conf",
-		      bg_conf->bp_node_cnt, bg_conf->cpus_per_bp);
-	num_unused_cpus =
-		DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z]
-		* bg_conf->cpus_per_bp;
-
-	if (!s_p_get_uint16(
-		    &bg_conf->nodecard_node_cnt, "NodeCardNodeCnt", tbl)) {
-		error("NodeCardNodeCnt not configured in bluegene.conf "
-		      "defaulting to 32 as NodeCardNodeCnt");
-		bg_conf->nodecard_node_cnt = 32;
-	}
-
-	if (bg_conf->nodecard_node_cnt<=0)
-		fatal("You should have more than 0 nodes per nodecard");
-
-	bg_conf->bp_nodecard_cnt =
-		bg_conf->bp_node_cnt / bg_conf->nodecard_node_cnt;
-
-	if (!s_p_get_uint16(&bg_conf->numpsets, "Numpsets", tbl))
-		fatal("Warning: Numpsets not configured in bluegene.conf");
-
-	if (bg_conf->numpsets) {
-		bitstr_t *tmp_bitmap = NULL;
-		int small_size = 1;
-
-		/* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK */
-		if (bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt) {
-			bg_conf->quarter_ionode_cnt = 2;
-			bg_conf->nodecard_ionode_cnt = 2;
-		} else {
-			bg_conf->quarter_ionode_cnt = bg_conf->numpsets/4;
-			bg_conf->nodecard_ionode_cnt =
-				bg_conf->quarter_ionode_cnt/4;
-		}
-
-		/* How many nodecards per ionode */
-		bg_conf->nc_ratio =
-			((double)bg_conf->bp_node_cnt
-			 / (double)bg_conf->nodecard_node_cnt)
-			/ (double)bg_conf->numpsets;
-		/* How many ionodes per nodecard */
-		bg_conf->io_ratio =
-			(double)bg_conf->numpsets /
-			((double)bg_conf->bp_node_cnt
-			 / (double)bg_conf->nodecard_node_cnt);
-		//info("got %f %f", bg_conf->nc_ratio, bg_conf->io_ratio);
-		/* figure out the smallest block we can have on the
-		   system */
-#ifdef HAVE_BGL
-		if (bg_conf->io_ratio >= 1)
-			bg_conf->smallest_block=32;
-		else
-			bg_conf->smallest_block=128;
-#else
-		if (bg_conf->io_ratio >= 2)
-			bg_conf->smallest_block=16;
-		else if (bg_conf->io_ratio == 1)
-			bg_conf->smallest_block=32;
-		else if (bg_conf->io_ratio == .5)
-			bg_conf->smallest_block=64;
-		else if (bg_conf->io_ratio == .25)
-			bg_conf->smallest_block=128;
-		else if (bg_conf->io_ratio == .125)
-			bg_conf->smallest_block=256;
-		else {
-			error("unknown ioratio %f.  Can't figure out "
-			      "smallest block size, setting it to midplane",
-			      bg_conf->io_ratio);
-			bg_conf->smallest_block = 512;
-		}
-#endif
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("Smallest block possible on this system is %u",
-			     bg_conf->smallest_block);
-		/* below we are creating all the possible bitmaps for
-		 * each size of small block
-		 */
-		if ((int)bg_conf->nodecard_ionode_cnt < 1) {
-			bg_conf->nodecard_ionode_cnt = 0;
-		} else {
-			bg_lists->valid_small32 = list_create(_destroy_bitmap);
-			if ((small_size = bg_conf->nodecard_ionode_cnt))
-				small_size--;
-			i = 0;
-			while (i<bg_conf->numpsets) {
-				tmp_bitmap = bit_alloc(bg_conf->numpsets);
-				bit_nset(tmp_bitmap, i, i+small_size);
-				i += small_size+1;
-				list_append(bg_lists->valid_small32,
-					    tmp_bitmap);
-			}
-		}
-		/* If we only have 1 nodecard just jump to the end
-		   since this will never need to happen below.
-		   Pretty much a hack to avoid seg fault;). */
-		if (bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)
-			goto no_calc;
-
-		bg_lists->valid_small128 = list_create(_destroy_bitmap);
-		if ((small_size = bg_conf->quarter_ionode_cnt))
-			small_size--;
-		i = 0;
-		while (i<bg_conf->numpsets) {
-			tmp_bitmap = bit_alloc(bg_conf->numpsets);
-			bit_nset(tmp_bitmap, i, i+small_size);
-			i += small_size+1;
-			list_append(bg_lists->valid_small128, tmp_bitmap);
-		}
-
-#ifndef HAVE_BGL
-		bg_lists->valid_small64 = list_create(_destroy_bitmap);
-		if ((small_size = bg_conf->nodecard_ionode_cnt * 2))
-			small_size--;
-		i = 0;
-		while (i<bg_conf->numpsets) {
-			tmp_bitmap = bit_alloc(bg_conf->numpsets);
-			bit_nset(tmp_bitmap, i, i+small_size);
-			i += small_size+1;
-			list_append(bg_lists->valid_small64, tmp_bitmap);
-		}
-
-		bg_lists->valid_small256 = list_create(_destroy_bitmap);
-		if ((small_size = bg_conf->quarter_ionode_cnt * 2))
-			small_size--;
-		i = 0;
-		while (i<bg_conf->numpsets) {
-			tmp_bitmap = bit_alloc(bg_conf->numpsets);
-			bit_nset(tmp_bitmap, i, i+small_size);
-			i += small_size+1;
-			list_append(bg_lists->valid_small256, tmp_bitmap);
-		}
-#endif
-	} else {
-		fatal("your numpsets is 0");
-	}
-
-no_calc:
-
-	if (!s_p_get_uint16(&bg_conf->bridge_api_verb, "BridgeAPIVerbose", tbl))
-		info("Warning: BridgeAPIVerbose not configured "
-		     "in bluegene.conf");
-	if (!s_p_get_string(&bg_conf->bridge_api_file,
-			    "BridgeAPILogFile", tbl))
-		info("BridgeAPILogFile not configured in bluegene.conf");
-	else
-		_reopen_bridge_log();
-
-	if (s_p_get_string(&layout, "DenyPassthrough", tbl)) {
-		if (strstr(layout, "X"))
-			ba_deny_pass |= PASS_DENY_X;
-		if (strstr(layout, "Y"))
-			ba_deny_pass |= PASS_DENY_Y;
-		if (strstr(layout, "Z"))
-			ba_deny_pass |= PASS_DENY_Z;
-		if (!strcasecmp(layout, "ALL"))
-			ba_deny_pass |= PASS_DENY_ALL;
-		bg_conf->deny_pass = ba_deny_pass;
-		xfree(layout);
-	}
-
-	if (!s_p_get_string(&layout, "LayoutMode", tbl)) {
-		info("Warning: LayoutMode was not specified in bluegene.conf "
-		     "defaulting to STATIC partitioning");
-		bg_conf->layout_mode = LAYOUT_STATIC;
-	} else {
-		if (!strcasecmp(layout,"STATIC"))
-			bg_conf->layout_mode = LAYOUT_STATIC;
-		else if (!strcasecmp(layout,"OVERLAP"))
-			bg_conf->layout_mode = LAYOUT_OVERLAP;
-		else if (!strcasecmp(layout,"DYNAMIC"))
-			bg_conf->layout_mode = LAYOUT_DYNAMIC;
-		else {
-			fatal("I don't understand this LayoutMode = %s",
-			      layout);
-		}
-		xfree(layout);
-	}
-
-	/* add blocks defined in file */
-	if (bg_conf->layout_mode != LAYOUT_DYNAMIC) {
-		if (!s_p_get_array((void ***)&blockreq_array,
-				   &count, "BPs", tbl)) {
-			info("WARNING: no blocks defined in bluegene.conf, "
-			     "only making full system block");
-			create_full_system_block(NULL);
-		}
-
-		for (i = 0; i < count; i++) {
-			add_bg_record(bg_lists->main, NULL,
-				      blockreq_array[i], 0, 0);
-		}
-	}
-	s_p_hashtbl_destroy(tbl);
-
-	return SLURM_SUCCESS;
-}
-
-extern int validate_current_blocks(char *dir)
-{
-	/* found bg blocks already on system */
-	List curr_block_list = NULL;
-	List found_block_list = NULL;
-	static time_t last_config_update = (time_t) 0;
-	ListIterator itr = NULL;
-	bg_record_t *bg_record = NULL;
-
-	/* only run on startup */
-	if (last_config_update)
-		return SLURM_SUCCESS;
-
-	last_config_update = time(NULL);
-	curr_block_list = list_create(destroy_bg_record);
-	found_block_list = list_create(NULL);
-//#if 0
-	/* Check to see if the configs we have are correct */
-	if (_validate_config_nodes(curr_block_list, found_block_list, dir)
-	    == SLURM_ERROR) {
-		_delete_old_blocks(curr_block_list, found_block_list);
-	}
-//#endif
-	/* looking for blocks only I created */
-	if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
-		init_wires();
-		info("No blocks created until jobs are submitted");
-	} else {
-		if (create_defined_blocks(bg_conf->layout_mode,
-					  found_block_list)
-		    == SLURM_ERROR) {
-			/* error in creating the static blocks, so
-			 * blocks referenced by submitted jobs won't
-			 * correspond to actual slurm blocks.
-			 */
-			fatal("Error, could not create the static blocks");
-			return SLURM_ERROR;
-		}
-	}
-
-	/* ok now since bg_lists->main has been made we now can put blocks in
-	   an error state this needs to be done outside of a lock
-	   it doesn't matter much in the first place though since
-	   no threads are started before this function. */
-	itr = list_iterator_create(bg_lists->main);
-	while ((bg_record = list_next(itr))) {
-		if (bg_record->state == RM_PARTITION_ERROR)
-			put_block_in_error_state(bg_record,
-						 BLOCK_ERROR_STATE, NULL);
-	}
-	list_iterator_destroy(itr);
-
-	list_destroy(curr_block_list);
-	curr_block_list = NULL;
-	list_destroy(found_block_list);
-	found_block_list = NULL;
-
-	slurm_mutex_lock(&block_state_mutex);
-	last_bg_update = time(NULL);
-	sort_bg_record_inc_size(bg_lists->main);
-	slurm_mutex_unlock(&block_state_mutex);
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("Blocks have finished being created.");
-	return SLURM_SUCCESS;
-}
-
-static void _destroy_bg_config(bg_config_t *bg_conf)
-{
-	if (bg_conf) {
-#ifdef HAVE_BGL
-		if (bg_conf->blrts_list) {
-			list_destroy(bg_conf->blrts_list);
-			bg_conf->blrts_list = NULL;
-		}
-		xfree(bg_conf->default_blrtsimage);
-#endif
-		xfree(bg_conf->bridge_api_file);
-		xfree(bg_conf->default_linuximage);
-		xfree(bg_conf->default_mloaderimage);
-		xfree(bg_conf->default_ramdiskimage);
-		if (bg_conf->linux_list) {
-			list_destroy(bg_conf->linux_list);
-			bg_conf->linux_list = NULL;
-		}
-
-		if (bg_conf->mloader_list) {
-			list_destroy(bg_conf->mloader_list);
-			bg_conf->mloader_list = NULL;
-		}
-
-		if (bg_conf->ramdisk_list) {
-			list_destroy(bg_conf->ramdisk_list);
-			bg_conf->ramdisk_list = NULL;
-		}
-		xfree(bg_conf->slurm_user_name);
-		xfree(bg_conf->slurm_node_prefix);
-		xfree(bg_conf);
-	}
-}
-
-static void _destroy_bg_lists(bg_lists_t *bg_lists)
-{
-	if (bg_lists) {
-		if (bg_lists->booted) {
-			list_destroy(bg_lists->booted);
-			bg_lists->booted = NULL;
-		}
-
-		if (bg_lists->job_running) {
-			list_destroy(bg_lists->job_running);
-			bg_lists->job_running = NULL;
-			num_unused_cpus = 0;
-		}
-
-		if (bg_lists->main) {
-			list_destroy(bg_lists->main);
-			bg_lists->main = NULL;
-		}
-
-		if (bg_lists->valid_small32) {
-			list_destroy(bg_lists->valid_small32);
-			bg_lists->valid_small32 = NULL;
-		}
-		if (bg_lists->valid_small64) {
-			list_destroy(bg_lists->valid_small64);
-			bg_lists->valid_small64 = NULL;
-		}
-		if (bg_lists->valid_small128) {
-			list_destroy(bg_lists->valid_small128);
-			bg_lists->valid_small128 = NULL;
-		}
-		if (bg_lists->valid_small256) {
-			list_destroy(bg_lists->valid_small256);
-			bg_lists->valid_small256 = NULL;
-		}
-
-		xfree(bg_lists);
-	}
-}
-
-static void _set_bg_lists()
-{
-	if (!bg_lists)
-		bg_lists = xmalloc(sizeof(bg_lists_t));
-
-	slurm_mutex_lock(&block_state_mutex);
-
-	if (bg_lists->booted)
-		list_destroy(bg_lists->booted);
-	bg_lists->booted = list_create(NULL);
-
-	if (bg_lists->job_running)
-		list_destroy(bg_lists->job_running);
-	bg_lists->job_running = list_create(NULL);
-
-	if (bg_lists->main)
-		list_destroy(bg_lists->main);
-	bg_lists->main = list_create(destroy_bg_record);
-
-	slurm_mutex_unlock(&block_state_mutex);
-
-}
-
-/*
- * _validate_config_nodes - Match slurm configuration information with
- *                          current BG block configuration.
- * IN/OUT curr_block_list -  List of blocks already existing on the system.
- * IN/OUT found_block_list - List of blocks found on the system
- *                              that are listed in the bluegene.conf.
- * NOTE: Both of the lists above should be created with list_create(NULL)
- *       since the bg_lists->main will contain the complete list of pointers
- *       and be destroyed with it.
- *
- * RET - SLURM_SUCCESS if they match, else an error
- * code. Writes bg_block_id into bg_lists->main records.
- */
-
-static int _validate_config_nodes(List curr_block_list,
-				  List found_block_list, char *dir)
-{
-	int rc = SLURM_ERROR;
-	bg_record_t* bg_record = NULL;
-	bg_record_t* init_bg_record = NULL;
-	int full_created = 0;
-	ListIterator itr_conf;
-	ListIterator itr_curr;
-	char tmp_char[256];
-
-	xassert(curr_block_list);
-	xassert(found_block_list);
-
-#ifdef HAVE_BG_FILES
-	/* read current bg block info into curr_block_list This
-	 * happens in the state load before this in emulation mode */
-	if (read_bg_blocks(curr_block_list) == SLURM_ERROR)
-		return SLURM_ERROR;
-	/* since we only care about error states here we don't care
-	   about the return code this must be done after the bg_lists->main
-	   is created */
-	load_state_file(curr_block_list, dir);
-#else
-	/* read in state from last run. */
-	if ((rc = load_state_file(curr_block_list, dir)) != SLURM_SUCCESS)
-		return rc;
-	/* This needs to be reset to SLURM_ERROR or it will never we
-	   that way again ;). */
-	rc = SLURM_ERROR;
-#endif
-	if (!bg_recover)
-		return SLURM_ERROR;
-
-	itr_curr = list_iterator_create(curr_block_list);
-	itr_conf = list_iterator_create(bg_lists->main);
-	while ((bg_record = list_next(itr_conf))) {
-		list_iterator_reset(itr_curr);
-		while ((init_bg_record = list_next(itr_curr))) {
-			if (strcasecmp(bg_record->nodes,
-				       init_bg_record->nodes))
-				continue; /* wrong nodes */
-			if (!bit_equal(bg_record->ionode_bitmap,
-				       init_bg_record->ionode_bitmap))
-				continue;
-#ifdef HAVE_BGL
-			if (bg_record->conn_type != init_bg_record->conn_type)
-				continue; /* wrong conn_type */
-#else
-			if ((bg_record->conn_type != init_bg_record->conn_type)
-			    && ((bg_record->conn_type < SELECT_SMALL)
-				&& (init_bg_record->conn_type < SELECT_SMALL)))
-				continue; /* wrong conn_type */
-#endif
-
-			copy_bg_record(init_bg_record, bg_record);
-			/* remove from the curr list since we just
-			   matched it no reason to keep it around
-			   anymore */
-			list_delete_item(itr_curr);
-			break;
-		}
-
-		if (!bg_record->bg_block_id) {
-			format_node_name(bg_record, tmp_char,
-					 sizeof(tmp_char));
-			info("Block found in bluegene.conf to be "
-			     "created: Nodes:%s",
-			     tmp_char);
-			rc = SLURM_ERROR;
-		} else {
-			if (bg_record->full_block)
-				full_created = 1;
-
-			list_push(found_block_list, bg_record);
-			format_node_name(bg_record, tmp_char,
-					 sizeof(tmp_char));
-			info("Existing: BlockID:%s Nodes:%s Conn:%s",
-			     bg_record->bg_block_id,
-			     tmp_char,
-			     conn_type_string(bg_record->conn_type));
-			if (((bg_record->state == RM_PARTITION_READY)
-			     || (bg_record->state == RM_PARTITION_CONFIGURING))
-			    && !block_ptr_exist_in_list(bg_lists->booted,
-							bg_record))
-				list_push(bg_lists->booted, bg_record);
-		}
-	}
-	if (bg_conf->layout_mode == LAYOUT_DYNAMIC)
-		goto finished;
-
-	if (!full_created) {
-		list_iterator_reset(itr_curr);
-		while ((init_bg_record = list_next(itr_curr))) {
-			if (init_bg_record->full_block) {
-				list_remove(itr_curr);
-				bg_record = init_bg_record;
-				list_append(bg_lists->main, bg_record);
-				list_push(found_block_list, bg_record);
-				format_node_name(bg_record, tmp_char,
-						 sizeof(tmp_char));
-				info("Existing: BlockID:%s Nodes:%s Conn:%s",
-				     bg_record->bg_block_id,
-				     tmp_char,
-				     conn_type_string(bg_record->conn_type));
-				if (((bg_record->state == RM_PARTITION_READY)
-				     || (bg_record->state
-					 == RM_PARTITION_CONFIGURING))
-				    && !block_ptr_exist_in_list(
-					    bg_lists->booted, bg_record))
-					list_push(bg_lists->booted,
-						  bg_record);
-				break;
-			}
-		}
-	}
-
-finished:
-	list_iterator_destroy(itr_conf);
-	list_iterator_destroy(itr_curr);
-	if (!list_count(curr_block_list))
-		rc = SLURM_SUCCESS;
-	return rc;
-}
-
-static int _delete_old_blocks(List curr_block_list, List found_block_list)
-{
-	ListIterator itr_curr, itr_found;
-	bg_record_t *found_record = NULL, *init_record = NULL;
-	List destroy_list = list_create(NULL);
-
-	xassert(curr_block_list);
-	xassert(found_block_list);
-
-	info("removing unspecified blocks");
-	if (!bg_recover) {
-		itr_curr = list_iterator_create(curr_block_list);
-		while ((init_record = list_next(itr_curr))) {
-			list_remove(itr_curr);
-			list_push(destroy_list, init_record);
-		}
-		list_iterator_destroy(itr_curr);
-	} else {
-		itr_curr = list_iterator_create(curr_block_list);
-		while ((init_record = list_next(itr_curr))) {
-			itr_found = list_iterator_create(found_block_list);
-			while ((found_record = list_next(itr_found))) {
-				if (!strcmp(init_record->bg_block_id,
-					    found_record->bg_block_id)) {
-					/* don't delete this one */
-					break;
-				}
-			}
-			list_iterator_destroy(itr_found);
-
-			if (found_record == NULL) {
-				list_remove(itr_curr);
-				list_push(destroy_list, init_record);
-			}
-		}
-		list_iterator_destroy(itr_curr);
-	}
-
-	free_block_list(NO_VAL, destroy_list, 1, 1);
-	list_destroy(destroy_list);
-
-	info("I am done deleting");
-
-	return SLURM_SUCCESS;
-}
-
-/* block_state_mutex should be locked before calling this */
-static int _post_block_free(bg_record_t *bg_record, bool restore)
-{
-#ifdef HAVE_BG_FILES
-	int rc = SLURM_SUCCESS;
-#endif
-	if (bg_record->magic != BLOCK_MAGIC) {
-		error("block already destroyed");
-		return SLURM_ERROR;
-	}
-
-	bg_record->free_cnt--;
-
-	if (bg_record->free_cnt > 0) {
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("%d other are trying to destroy this block %s",
-			     bg_record->free_cnt, bg_record->bg_block_id);
-		return SLURM_SUCCESS;
-	}
-
-	if ((bg_record->state != RM_PARTITION_FREE)
-	    && (bg_record->state != RM_PARTITION_ERROR)){
-		/* Something isn't right, go mark this one in an error
-		   state. */
-		update_block_msg_t block_msg;
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("_post_block_free: block %s is not in state "
-			     "free (%s), putting it in error state.",
-			     bg_record->bg_block_id,
-			     bg_block_state_string(bg_record->state));
-		slurm_init_update_block_msg(&block_msg);
-		block_msg.bg_block_id = bg_record->bg_block_id;
-		block_msg.state = RM_PARTITION_ERROR;
-		block_msg.reason = "Block would not deallocate";
-		slurm_mutex_unlock(&block_state_mutex);
-		select_p_update_block(&block_msg);
-		slurm_mutex_lock(&block_state_mutex);
-		return SLURM_SUCCESS;
-	}
-
-	/* A bit of a sanity check to make sure blocks are being
-	   removed out of all the lists.
-	*/
-	if (blocks_are_created) {
-		remove_from_bg_list(bg_lists->booted, bg_record);
-		if (remove_from_bg_list(bg_lists->job_running, bg_record)
-		    == SLURM_SUCCESS)
-			num_unused_cpus += bg_record->cpu_cnt;
-	}
-
-	if (restore)
-		return SLURM_SUCCESS;
-
-	if (blocks_are_created
-	    && remove_from_bg_list(bg_lists->main, bg_record)
-	    != SLURM_SUCCESS) {
-		/* This should only happen if called from
-		 * bg_job_place.c where the block was never added to
-		 * the list. */
-		debug("_post_block_free: It appears this block %s isn't "
-		      "in the main list anymore.",
-		      bg_record->bg_block_id);
-	}
-
-#ifdef HAVE_BG_FILES
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("_post_block_free: removing %s from database",
-		     bg_record->bg_block_id);
-
-	rc = bridge_remove_block(bg_record->bg_block_id);
-	if (rc != STATUS_OK) {
-		if (rc == PARTITION_NOT_FOUND) {
-			debug("_post_block_free: block %s is not found",
-			      bg_record->bg_block_id);
-		} else {
-			error("_post_block_free: "
-			      "rm_remove_partition(%s): %s",
-			      bg_record->bg_block_id,
-			      bg_err_str(rc));
-		}
-	} else
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("_post_block_free: done %s",
-			     bg_record->bg_block_id);
-#endif
-	destroy_bg_record(bg_record);
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("_post_block_free: destroyed");
-
-	return SLURM_SUCCESS;
-}
-
-
-static void *_track_freeing_blocks(void *args)
-{
-	bg_free_block_list_t *bg_free_list = (bg_free_block_list_t *)args;
-	List track_list = bg_free_list->track_list;
-	bool destroy = bg_free_list->destroy;
-	uint32_t job_id = bg_free_list->job_id;
-	int retry_cnt = 0;
-	int free_cnt = 0, track_cnt = list_count(track_list);
-	ListIterator itr = list_iterator_create(track_list);
-	bg_record_t *bg_record;
-	bool restore = true;
-
-	debug("_track_freeing_blocks: Going to free %d for job %u",
-	      track_cnt, job_id);
-	while (retry_cnt < MAX_FREE_RETRIES) {
-		free_cnt = 0;
-		slurm_mutex_lock(&block_state_mutex);
-		/* just to make sure state is updated */
-		update_block_list_state(track_list);
-		list_iterator_reset(itr);
-		/* just incase this changes from the update function */
-		track_cnt = list_count(track_list);
-		while ((bg_record = list_next(itr))) {
-			if (bg_record->magic != BLOCK_MAGIC) {
-				/* update_block_list_state should
-				   remove this already from the list
-				   so we shouldn't ever have this.
-				*/
-				error("_track_freeing_blocks: block was "
-				      "already destroyed");
-				free_cnt++;
-				continue;
-			}
-#ifndef HAVE_BG_FILES
-			/* Fake a free since we are n deallocating
-			   state before this.
-			*/
-			if ((bg_record->state != RM_PARTITION_ERROR)
-			    && (retry_cnt >= 3))
-				bg_record->state = RM_PARTITION_FREE;
-#endif
-			if ((bg_record->state == RM_PARTITION_FREE)
-			    || (bg_record->state == RM_PARTITION_ERROR))
-				free_cnt++;
-			else if (bg_record->state != RM_PARTITION_DEALLOCATING)
-				bg_free_block(bg_record, 0, 1);
-		}
-		slurm_mutex_unlock(&block_state_mutex);
-		if (free_cnt == track_cnt)
-			break;
-		debug("_track_freeing_blocks: freed %d of %d for job %u",
-		      free_cnt, track_cnt, job_id);
-		sleep(FREE_SLEEP_INTERVAL);
-		retry_cnt++;
-	}
-	debug("_track_freeing_blocks: Freed them all for job %u", job_id);
-
-	if ((bg_conf->layout_mode == LAYOUT_DYNAMIC) || destroy)
-		restore = false;
-
-	/* If there is a block in error state we need to keep all
-	 * these blocks around. */
-	slurm_mutex_lock(&block_state_mutex);
-	list_iterator_reset(itr);
-	while ((bg_record = list_next(itr))) {
-		/* block no longer exists */
-		if (bg_record->magic != BLOCK_MAGIC)
-			continue;
-		if (bg_record->state != RM_PARTITION_FREE) {
-			restore = true;
-			break;
-		}
-	}
-
-	list_iterator_reset(itr);
-	while ((bg_record = list_next(itr)))
-		_post_block_free(bg_record, restore);
-	slurm_mutex_unlock(&block_state_mutex);
-	last_bg_update = time(NULL);
-	list_iterator_destroy(itr);
-	list_destroy(track_list);
-	xfree(bg_free_list);
-	return NULL;
-}
-
-static char *_get_bg_conf(void)
-{
-	char *val = getenv("SLURM_CONF");
-	char *rc = NULL;
-	int i;
-
-	if (!val)
-		return xstrdup(BLUEGENE_CONFIG_FILE);
-
-	/* Replace file name on end of path */
-	i = strlen(val) - strlen("slurm.conf") + strlen("bluegene.conf") + 1;
-	rc = xmalloc(i);
-	strcpy(rc, val);
-	val = strrchr(rc, (int)'/');
-	if (val)	/* absolute path */
-		val++;
-	else		/* not absolute path */
-		val = rc;
-	strcpy(val, "bluegene.conf");
-	return rc;
-}
-
-static int _reopen_bridge_log(void)
-{
-	int rc = SLURM_SUCCESS;
-
-	if (bg_conf->bridge_api_file == NULL)
-		return rc;
-
-#ifdef HAVE_BG_FILES
-	rc = bridge_set_log_params(bg_conf->bridge_api_file,
-				   bg_conf->bridge_api_verb);
-#endif
-	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-		info("Bridge api file set to %s, verbose level %d",
-		     bg_conf->bridge_api_file, bg_conf->bridge_api_verb);
-
-	return rc;
-}
-
-static void _destroy_bitmap(void *object)
-{
-	bitstr_t *bitstr = (bitstr_t *)object;
-
-	if (bitstr) {
-		FREE_NULL_BITMAP(bitstr);
-	}
-}
diff --git a/src/plugins/select/bluegene/plugin/bluegene.h b/src/plugins/select/bluegene/plugin/bluegene.h
deleted file mode 100644
index 9b21b061b..000000000
--- a/src/plugins/select/bluegene/plugin/bluegene.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/*****************************************************************************\
- *  bluegene.h - header for blue gene configuration processing module.
- *
- *  $Id$
- *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef _BLUEGENE_H_
-#define _BLUEGENE_H_
-
-#include "bg_record_functions.h"
-
-typedef enum bg_layout_type {
-	LAYOUT_STATIC,  /* no overlaps, except for full system block
-			   blocks never change */
-	LAYOUT_OVERLAP, /* overlaps permitted, must be defined in
-			   bluegene.conf file */
-	LAYOUT_DYNAMIC	/* slurm will make all blocks */
-} bg_layout_t;
-
-typedef struct {
-#ifdef HAVE_BGL
-	List blrts_list;
-#endif
-	uint16_t bp_node_cnt;
-	uint16_t bp_nodecard_cnt;
-	char *bridge_api_file;
-	uint16_t bridge_api_verb;
-	uint32_t slurm_debug_flags;
-#ifdef HAVE_BGL
-	char *default_blrtsimage;
-#endif
-	char *default_linuximage;
-	char *default_mloaderimage;
-	char *default_ramdiskimage;
-	uint16_t deny_pass;
-	double io_ratio;
-	bg_layout_t layout_mode;
-	List linux_list;
-	List mloader_list;
-	double nc_ratio;
-	uint16_t nodecard_node_cnt;
-	uint16_t nodecard_ionode_cnt;
-	uint16_t numpsets;
-	uint16_t cpu_ratio;
-	uint32_t cpus_per_bp;
-	uint16_t quarter_node_cnt;
-	uint16_t quarter_ionode_cnt;
-	List ramdisk_list;
-	char *slurm_user_name;
-	char *slurm_node_prefix;
-	uint32_t smallest_block;
-} bg_config_t;
-
-typedef struct {
-	List booted;         /* blocks that are booted */
-	List job_running;    /* jobs running in these blocks */
-	List main;	    /* List of configured BG blocks */
-	List valid_small32;
-	List valid_small64;
-	List valid_small128;
-	List valid_small256;
-} bg_lists_t;
-
-/* Global variables */
-extern bg_config_t *bg_conf;
-extern bg_lists_t *bg_lists;
-extern ba_system_t *ba_system_ptr;
-extern time_t last_bg_update;
-extern bool agent_fini;
-extern pthread_mutex_t block_state_mutex;
-extern pthread_mutex_t request_list_mutex;
-extern int blocks_are_created;
-extern int num_unused_cpus;
-
-#define MAX_PTHREAD_RETRIES  1
-#define BLOCK_ERROR_STATE    -3
-#define ADMIN_ERROR_STATE    -4
-#define NO_JOB_RUNNING       -1
-#define BUFSIZE 4096
-#define BITSIZE 128
-/* Change BLOCK_STATE_VERSION value when changing the state save
- * format i.e. pack_block() */
-#define BLOCK_STATE_VERSION      "VER004"
-#define BLOCK_2_1_STATE_VERSION  "VER003" /*Slurm 2.1's version*/
-
-#include "bg_block_info.h"
-#include "bg_job_place.h"
-#include "bg_job_run.h"
-#include "state_test.h"
-#include "jobinfo.h"
-#include "nodeinfo.h"
-
-/* bluegene.c */
-/**********************************************/
-
-/* Initialize all plugin variables */
-extern int init_bg(void);
-
-/* Purge all plugin variables */
-extern void fini_bg(void);
-
-extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b);
-
-extern void bg_requeue_job(uint32_t job_id, bool wait_for_start);
-
-/* remove all users from a block but what is in user_name */
-/* Note return codes */
-#define REMOVE_USER_ERR  -1
-#define REMOVE_USER_NONE  0
-#define REMOVE_USER_FOUND 2
-extern int remove_all_users(char *bg_block_id, char *user_name);
-extern int set_block_user(bg_record_t *bg_record);
-
-/* sort a list of bg_records by size (node count) */
-extern void sort_bg_record_inc_size(List records);
-
-/* block_agent - detached thread periodically tests status of bluegene
- * blocks */
-extern void *block_agent(void *args);
-
-/* state_agent - thread periodically tests status of bluegene
- * nodes, nodecards, and switches */
-extern void *state_agent(void *args);
-
-extern int bg_free_block(bg_record_t *bg_record, bool wait, bool locked);
-
-extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record);
-extern bg_record_t *find_and_remove_org_from_bg_list(List my_list,
-						     bg_record_t *bg_record);
-extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record);
-extern void *mult_free_block(void *args);
-extern void *mult_destroy_block(void *args);
-extern int free_block_list(uint32_t job_id, List track_list,
-			   bool destroy, bool wait);
-extern int read_bg_conf();
-extern int validate_current_blocks(char *dir);
-
-/* block_sys.c */
-/*****************************************************/
-#ifdef HAVE_BG_FILES
-#ifdef HAVE_BGL
-extern int find_nodecard_num(rm_partition_t *block_ptr, rm_nodecard_t *ncard,
-			     int *nc_id);
-#endif
-#endif
-extern int configure_block(bg_record_t * bg_conf_record);
-extern int read_bg_blocks();
-extern int load_state_file(List curr_block_list, char *dir_name);
-
-/* bg_switch_connections.c */
-/*****************************************************/
-extern int configure_small_block(bg_record_t *bg_record);
-extern int configure_block_switches(bg_record_t * bg_conf_record);
-
-
-/* select_bluegene.c */
-/*****************************************************/
-extern int select_p_update_block(update_block_msg_t *block_desc_ptr);
-
-
-#endif /* _BLUEGENE_H_ */
-
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
deleted file mode 100644
index fb4aadc6a..000000000
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ /dev/null
@@ -1,1483 +0,0 @@
-/*****************************************************************************\
- *  select_bluegene.c - node selection plugin for Blue Gene system.
- *****************************************************************************
- *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Dan Phung <phung4@llnl.gov> Danny Auble <da@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "src/common/slurm_xlator.h"
-#include "bluegene.h"
-#include "nodeinfo.h"
-#include "jobinfo.h"
-
-//#include "src/common/uid.h"
-#include "src/slurmctld/trigger_mgr.h"
-#include <fcntl.h>
-
-#define HUGE_BUF_SIZE (1024*16)
-#define NOT_FROM_CONTROLLER -2
-/* These are defined here so when we link with something other than
- * the slurmctld we will have these symbols defined.  They will get
- * overwritten when linking with the slurmctld.
- */
-#if defined (__APPLE__)
-slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
-struct node_record *node_record_table_ptr  __attribute__((weak_import)) = NULL;
-int bg_recover __attribute__((weak_import)) = NOT_FROM_CONTROLLER;
-List part_list  __attribute__((weak_import)) = NULL;
-int node_record_count __attribute__((weak_import));
-time_t last_node_update __attribute__((weak_import));
-time_t last_job_update __attribute__((weak_import));
-char *alpha_num  __attribute__((weak_import)) =
-	"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-void *acct_db_conn  __attribute__((weak_import)) = NULL;
-char *slurmctld_cluster_name  __attribute__((weak_import)) = NULL;
-slurmdb_cluster_rec_t *working_cluster_rec  __attribute__((weak_import)) = NULL;
-#else
-slurm_ctl_conf_t slurmctld_conf;
-struct node_record *node_record_table_ptr = NULL;
-int bg_recover = NOT_FROM_CONTROLLER;
-List part_list = NULL;
-int node_record_count;
-time_t last_node_update;
-time_t last_job_update;
-char *alpha_num = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-void *acct_db_conn = NULL;
-char *slurmctld_cluster_name = NULL;
-slurmdb_cluster_rec_t *working_cluster_rec = NULL;
-#endif
-
-/*
- * These variables are required by the generic plugin interface.  If they
- * are not found in the plugin, the plugin loader will ignore it.
- *
- * plugin_name - a string giving a human-readable description of the
- * plugin.  There is no maximum length, but the symbol must refer to
- * a valid string.
- *
- * plugin_type - a string suggesting the type of the plugin or its
- * applicability to a particular form of data or method of data handling.
- * If the low-level plugin API is used, the contents of this string are
- * unimportant and may be anything.  SLURM uses the higher-level plugin
- * interface which requires this string to be of the form
- *
- *	<application>/<method>
- *
- * where <application> is a description of the intended application of
- * the plugin (e.g., "select" for SLURM node selection) and <method>
- * is a description of how this plugin satisfies that application.  SLURM will
- * only load select plugins if the plugin_type string has a
- * prefix of "select/".
- *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
- */
-const char plugin_name[]       	= "BlueGene node selection plugin";
-const char plugin_type[]       	= "select/bluegene";
-const uint32_t plugin_id	= 100;
-const uint32_t plugin_version	= 200;
-
-/* pthread stuff for updating BG node status */
-#ifdef HAVE_BG_L_P
-static pthread_t block_thread = 0;
-static pthread_t state_thread = 0;
-static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/** initialize the status pthread */
-static int _init_status_pthread(void);
-
-extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data);
-
-static int _init_status_pthread(void)
-{
-	pthread_attr_t attr;
-
-	pthread_mutex_lock(&thread_flag_mutex);
-	if (block_thread) {
-		debug2("Bluegene threads already running, not starting "
-		       "another");
-		pthread_mutex_unlock(&thread_flag_mutex);
-		return SLURM_ERROR;
-	}
-
-	slurm_attr_init(&attr);
-	/* since we do a join on this later we don't make it detached */
-	if (pthread_create(&block_thread, &attr, block_agent, NULL))
-		error("Failed to create block_agent thread");
-	slurm_attr_init(&attr);
-	/* since we do a join on this later we don't make it detached */
-	if (pthread_create(&state_thread, &attr, state_agent, NULL))
-		error("Failed to create state_agent thread");
-	pthread_mutex_unlock(&thread_flag_mutex);
-	slurm_attr_destroy(&attr);
-
-	return SLURM_SUCCESS;
-}
-
-static List _get_config(void)
-{
-	config_key_pair_t *key_pair;
-	List my_list = list_create(destroy_config_key_pair);
-
-	if (!my_list)
-		fatal("malloc failure on list_create");
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("BasePartitionNodeCnt");
-	key_pair->value = xstrdup_printf("%u", bg_conf->bp_node_cnt);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("NodeCPUCnt");
-	key_pair->value = xstrdup_printf("%u", bg_conf->cpu_ratio);
-	list_append(my_list, key_pair);
-
-
-#ifdef HAVE_BGL
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("BlrtsImage");
-	key_pair->value = xstrdup(bg_conf->default_blrtsimage);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("LinuxImage");
-	key_pair->value = xstrdup(bg_conf->default_linuximage);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("RamDiskImage");
-	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
-	list_append(my_list, key_pair);
-#else
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("CnloadImage");
-	key_pair->value = xstrdup(bg_conf->default_linuximage);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("IoloadImage");
-	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
-	list_append(my_list, key_pair);
-#endif
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("BridgeAPILogFile");
-	key_pair->value = xstrdup(bg_conf->bridge_api_file);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("BridgeAPIVerbose");
-	key_pair->value = xstrdup_printf("%u", bg_conf->bridge_api_verb);
-	list_append(my_list, key_pair);
-
-	if (bg_conf->deny_pass) {
-		key_pair = xmalloc(sizeof(config_key_pair_t));
-		key_pair->name = xstrdup("DenyPassThrough");
-		if (bg_conf->deny_pass & PASS_DENY_X)
-			xstrcat(key_pair->value, "X,");
-		if (bg_conf->deny_pass & PASS_DENY_Y)
-			xstrcat(key_pair->value, "Y,");
-		if (bg_conf->deny_pass & PASS_DENY_Z)
-			xstrcat(key_pair->value, "Z,");
-		if (key_pair->value)
-			key_pair->value[strlen(key_pair->value)-1] = '\0';
-		list_append(my_list, key_pair);
-	}
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("LayoutMode");
-	switch(bg_conf->layout_mode) {
-	case LAYOUT_STATIC:
-		key_pair->value = xstrdup("Static");
-		break;
-	case LAYOUT_OVERLAP:
-		key_pair->value = xstrdup("Overlap");
-		break;
-	case LAYOUT_DYNAMIC:
-		key_pair->value = xstrdup("Dynamic");
-		break;
-	default:
-		key_pair->value = xstrdup("Unknown");
-		break;
-	}
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("MloaderImage");
-	key_pair->value = xstrdup(bg_conf->default_mloaderimage);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("NodeCardNodeCnt");
-	key_pair->value = xstrdup_printf("%u", bg_conf->nodecard_node_cnt);
-	list_append(my_list, key_pair);
-
-	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("Numpsets");
-	key_pair->value = xstrdup_printf("%u", bg_conf->numpsets);
-	list_append(my_list, key_pair);
-
-	list_sort(my_list, (ListCmpF) sort_key_pairs);
-
-	return my_list;
-}
-#endif
-
-/*
- * init() is called when the plugin is loaded, before any other functions
- * are called.  Put global initialization here.
- */
-extern int init ( void )
-{
-
-#ifdef HAVE_BG_L_P
-	if (bg_recover != NOT_FROM_CONTROLLER) {
-#if (SYSTEM_DIMENSIONS != 3)
-		fatal("SYSTEM_DIMENSIONS value (%d) invalid for BlueGene",
-		      SYSTEM_DIMENSIONS);
-#endif
-
-#ifdef HAVE_BG_FILES
-#ifdef HAVE_BGL
-	        if (!getenv("CLASSPATH") || !getenv("DB2INSTANCE")
-		    || !getenv("VWSPATH"))
-			fatal("db2profile has not been "
-			      "run to setup DB2 environment");
-
-		if ((SELECT_COPROCESSOR_MODE  != RM_PARTITION_COPROCESSOR_MODE)
-		    || (SELECT_VIRTUAL_NODE_MODE
-			!= RM_PARTITION_VIRTUAL_NODE_MODE))
-			fatal("enum node_use_type out of sync with rm_api.h");
-#endif
-		if ((SELECT_MESH  != RM_MESH)
-		    || (SELECT_TORUS != RM_TORUS)
-		    || (SELECT_NAV   != RM_NAV))
-			fatal("enum conn_type out of sync with rm_api.h");
-#endif
-
-		verbose("%s loading...", plugin_name);
-		/* if this is coming from something other than the controller
-		   we don't want to read the config or anything like that. */
-		if (init_bg() || _init_status_pthread())
-			return SLURM_ERROR;
-	}
-	verbose("%s loaded", plugin_name);
-#else
-	if (bg_recover != NOT_FROM_CONTROLLER)
-		fatal("select/bluegene is incompatible with a "
-		      "non BlueGene system");
-#endif
-	return SLURM_SUCCESS;
-}
-
-extern int fini ( void )
-{
-	int rc = SLURM_SUCCESS;
-
-#ifdef HAVE_BG_L_P
-	agent_fini = true;
-	pthread_mutex_lock(&thread_flag_mutex);
-	if ( block_thread ) {
-		verbose("Bluegene select plugin shutting down");
-		pthread_join(block_thread, NULL);
-		block_thread = 0;
-	}
-	if ( state_thread ) {
-		pthread_join(state_thread, NULL);
-		state_thread = 0;
-	}
-	pthread_mutex_unlock(&thread_flag_mutex);
-	fini_bg();
-#endif
-	return rc;
-}
-
-/*
- * The remainder of this file implements the standard SLURM
- * node selection API.
- */
-
-/* We rely upon DB2 to save and restore BlueGene state */
-extern int select_p_state_save(char *dir_name)
-{
-#ifdef HAVE_BG_L_P
-	ListIterator itr;
-	bg_record_t *bg_record = NULL;
-	int error_code = 0, log_fd;
-	char *old_file, *new_file, *reg_file;
-	uint32_t blocks_packed = 0, tmp_offset, block_offset;
-	Buf buffer = init_buf(BUF_SIZE);
-	DEF_TIMERS;
-
-	debug("bluegene: select_p_state_save");
-	START_TIMER;
-	/* write header: time */
-	packstr(BLOCK_STATE_VERSION, buffer);
-	block_offset = get_buf_offset(buffer);
-	pack32(blocks_packed, buffer);
-	pack_time(time(NULL), buffer);
-
-	/* write block records to buffer */
-	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_lists->main);
-	while ((bg_record = list_next(itr))) {
-		if (bg_record->magic != BLOCK_MAGIC)
-			continue;
-		/* on real bluegene systems we only want to keep track of
-		 * the blocks in an error state
-		 */
-#ifdef HAVE_BG_FILES
-		if (bg_record->state != RM_PARTITION_ERROR)
-			continue;
-#endif
-		xassert(bg_record->bg_block_id != NULL);
-
-		pack_block(bg_record, buffer, SLURM_PROTOCOL_VERSION);
-		blocks_packed++;
-	}
-	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&block_state_mutex);
-	tmp_offset = get_buf_offset(buffer);
-	set_buf_offset(buffer, block_offset);
-	pack32(blocks_packed, buffer);
-	set_buf_offset(buffer, tmp_offset);
-	/* Maintain config read lock until we copy state_save_location *\
-	   \* unlock_slurmctld(part_read_lock);          - see below      */
-
-	/* write the buffer to file */
-	slurm_conf_lock();
-	old_file = xstrdup(slurmctld_conf.state_save_location);
-	xstrcat(old_file, "/block_state.old");
-	reg_file = xstrdup(slurmctld_conf.state_save_location);
-	xstrcat(reg_file, "/block_state");
-	new_file = xstrdup(slurmctld_conf.state_save_location);
-	xstrcat(new_file, "/block_state.new");
-	slurm_conf_unlock();
-
-	log_fd = creat(new_file, 0600);
-	if (log_fd < 0) {
-		error("Can't save state, error creating file %s, %m",
-		      new_file);
-		error_code = errno;
-	} else {
-		int pos = 0, nwrite = get_buf_offset(buffer), amount;
-		char *data = (char *)get_buf_data(buffer);
-
-		while (nwrite > 0) {
-			amount = write(log_fd, &data[pos], nwrite);
-			if ((amount < 0) && (errno != EINTR)) {
-				error("Error writing file %s, %m", new_file);
-				error_code = errno;
-				break;
-			}
-			nwrite -= amount;
-			pos    += amount;
-		}
-		fsync(log_fd);
-		close(log_fd);
-	}
-	if (error_code)
-		(void) unlink(new_file);
-	else {			/* file shuffle */
-		(void) unlink(old_file);
-		if (link(reg_file, old_file))
-			debug4("unable to create link for %s -> %s: %m",
-			       reg_file, old_file);
-		(void) unlink(reg_file);
-		if (link(new_file, reg_file))
-			debug4("unable to create link for %s -> %s: %m",
-			       new_file, reg_file);
-		(void) unlink(new_file);
-	}
-	xfree(old_file);
-	xfree(reg_file);
-	xfree(new_file);
-
-	free_buf(buffer);
-	END_TIMER2("select_p_state_save");
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_state_restore(char *dir_name)
-{
-#ifdef HAVE_BG_L_P
-	debug("bluegene: select_p_state_restore");
-
-	return validate_current_blocks(dir_name);
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/* Sync BG blocks to currently active jobs */
-extern int select_p_job_init(List job_list)
-{
-#ifdef HAVE_BG_L_P
-	int rc = sync_jobs(job_list);
-
-	/* after we have synced the blocks then we say they are
-	   created. */
-	blocks_are_created = 1;
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/* All initialization is performed by init() */
-extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
-{
-#ifdef HAVE_BG_L_P
-	if (node_cnt>0 && bg_conf)
-		if (node_ptr->cpus >= bg_conf->bp_node_cnt)
-			bg_conf->cpus_per_bp = node_ptr->cpus;
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-/*
- * Called by slurmctld when a new configuration file is loaded
- * or scontrol is used to change block configuration
- */
-extern int select_p_block_init(List part_list)
-{
-#ifdef HAVE_BG_L_P
-	/* select_p_node_init needs to be called before this to set
-	   this up correctly
-	*/
-	if (read_bg_conf() == SLURM_ERROR) {
-		fatal("Error, could not read the file");
-		return SLURM_ERROR;
-	}
-
-	if (part_list) {
-		struct part_record *part_ptr = NULL;
-		ListIterator itr = list_iterator_create(part_list);
-		while ((part_ptr = list_next(itr))) {
-			part_ptr->max_nodes = part_ptr->max_nodes_orig;
-			part_ptr->min_nodes = part_ptr->min_nodes_orig;
-			select_p_alter_node_cnt(SELECT_SET_BP_CNT,
-						&part_ptr->max_nodes);
-			select_p_alter_node_cnt(SELECT_SET_BP_CNT,
-						&part_ptr->min_nodes);
-		}
-		list_iterator_destroy(itr);
-	}
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-
-/*
- * select_p_job_test - Given a specification of scheduling requirements,
- *	identify the nodes which "best" satify the request. The specified
- *	nodes may be DOWN or BUSY at the time of this test as may be used
- *	to deterime if a job could ever run.
- * IN/OUT job_ptr - pointer to job being scheduled start_time is set
- *	when we can possibly start job.
- * IN/OUT bitmap - usable nodes are set on input, nodes not required to
- *	satisfy the request are cleared, other left set
- * IN min_nodes - minimum count of nodes
- * IN max_nodes - maximum count of nodes (0==don't care)
- * IN req_nodes - requested (or desired) count of nodes
- * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now
- *           SELECT_MODE_TEST_ONLY: test if job can ever run
- *           SELECT_MODE_WILL_RUN: determine when and where job can run
- * IN preemptee_candidates - List of pointers to jobs which can be preempted.
- * IN/OUT preemptee_job_list - Pointer to list of job pointers. These are the
- *		jobs to be preempted to initiate the pending job. Not set
- *		if mode=SELECT_MODE_TEST_ONLY or input pointer is NULL.
- * RET zero on success, EINVAL otherwise
- * NOTE: bitmap must be a superset of req_nodes at the time that
- *	select_p_job_test is called
- */
-extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			     uint32_t min_nodes, uint32_t max_nodes,
-			     uint32_t req_nodes, uint16_t mode,
-			     List preemptee_candidates,
-			     List *preemptee_job_list)
-{
-#ifdef HAVE_BG_L_P
-	/* submit_job - is there a block where we have:
-	 * 1) geometry requested
-	 * 2) min/max nodes (BPs) requested
-	 * 3) type: TORUS or MESH or NAV (torus else mesh)
-	 *
-	 * note: we don't have to worry about security at this level
-	 * as the SLURM block logic will handle access rights.
-	 */
-
-	return submit_job(job_ptr, bitmap, min_nodes, max_nodes,
-			  req_nodes, mode, preemptee_candidates,
-			  preemptee_job_list);
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_begin(struct job_record *job_ptr)
-{
-#ifdef HAVE_BG_L_P
-	return start_job(job_ptr);
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_ready(struct job_record *job_ptr)
-{
-#ifdef HAVE_BG_L_P
-	return block_ready(job_ptr);
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_resized(struct job_record *job_ptr,
-				struct node_record *node_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_job_fini(struct job_record *job_ptr)
-{
-#ifdef HAVE_BG_L_P
-	return term_job(job_ptr);
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_job_suspend(struct job_record *job_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_job_resume(struct job_record *job_ptr)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
-extern int select_p_pack_select_info(time_t last_query_time,
-				     uint16_t show_flags, Buf *buffer_ptr,
-				     uint16_t protocol_version)
-{
-#ifdef HAVE_BG_L_P
-	ListIterator itr;
-	bg_record_t *bg_record = NULL;
-	uint32_t blocks_packed = 0, tmp_offset;
-	Buf buffer;
-
-	/* check to see if data has changed */
-	if (last_query_time >= last_bg_update) {
-		debug2("Node select info hasn't changed since %ld",
-		       last_bg_update);
-		return SLURM_NO_CHANGE_IN_DATA;
-	} else if (blocks_are_created) {
-		*buffer_ptr = NULL;
-		buffer = init_buf(HUGE_BUF_SIZE);
-		pack32(blocks_packed, buffer);
-		pack_time(last_bg_update, buffer);
-
-		if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-			if (bg_lists->main) {
-				slurm_mutex_lock(&block_state_mutex);
-				itr = list_iterator_create(bg_lists->main);
-				while ((bg_record = list_next(itr))) {
-					if (bg_record->magic != BLOCK_MAGIC)
-						continue;
-					pack_block(bg_record, buffer,
-						   protocol_version);
-					blocks_packed++;
-				}
-				list_iterator_destroy(itr);
-				slurm_mutex_unlock(&block_state_mutex);
-			} else {
-				error("select_p_pack_select_info: "
-				      "no bg_lists->main");
-				return SLURM_ERROR;
-			}
-		}
-		tmp_offset = get_buf_offset(buffer);
-		set_buf_offset(buffer, 0);
-		pack32(blocks_packed, buffer);
-		set_buf_offset(buffer, tmp_offset);
-
-		*buffer_ptr = buffer;
-	} else {
-		error("select_p_pack_node_info: bg_lists->main not ready yet");
-		return SLURM_ERROR;
-	}
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_select_nodeinfo_pack(select_nodeinfo_t *nodeinfo,
-					 Buf buffer,
-					 uint16_t protocol_version)
-{
-	return select_nodeinfo_pack(nodeinfo, buffer, protocol_version);
-}
-
-extern int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
-					   Buf buffer,
-					   uint16_t protocol_version)
-{
-	return select_nodeinfo_unpack(nodeinfo, buffer, protocol_version);
-}
-
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size)
-{
-	return select_nodeinfo_alloc(size);
-}
-
-extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
-{
-	return select_nodeinfo_free(nodeinfo);
-}
-
-extern int select_p_select_nodeinfo_set_all(time_t last_query_time)
-{
-	return select_nodeinfo_set_all(last_query_time);
-}
-
-extern int select_p_select_nodeinfo_set(struct job_record *job_ptr)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
-					enum select_nodedata_type dinfo,
-					enum node_states state,
-					void *data)
-{
-	return select_nodeinfo_get(nodeinfo, dinfo, state, data);
-}
-
-select_jobinfo_t *select_p_select_jobinfo_alloc(void)
-{
-	return alloc_select_jobinfo();
-}
-
-extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
-				       enum select_jobdata_type data_type,
-				       void *data)
-{
-	return set_select_jobinfo(jobinfo, data_type, data);
-}
-
-extern int select_p_select_jobinfo_get(select_jobinfo_t *jobinfo,
-				       enum select_jobdata_type data_type,
-				       void *data)
-{
-	return get_select_jobinfo(jobinfo, data_type, data);
-}
-
-extern select_jobinfo_t *select_p_select_jobinfo_copy(select_jobinfo_t *jobinfo)
-{
-	return copy_select_jobinfo(jobinfo);
-}
-
-extern int select_p_select_jobinfo_free(select_jobinfo_t *jobinfo)
-{
-	return free_select_jobinfo(jobinfo);
-}
-
-extern int  select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
-					 uint16_t protocol_version)
-{
-	return pack_select_jobinfo(jobinfo, buffer, protocol_version);
-}
-
-extern int  select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo,
-					   Buf buffer,
-					   uint16_t protocol_version)
-{
-	return unpack_select_jobinfo(jobinfo, buffer, protocol_version);
-}
-
-extern char *select_p_select_jobinfo_sprint(select_jobinfo_t *jobinfo,
-					    char *buf, size_t size, int mode)
-{
-	return sprint_select_jobinfo(jobinfo, buf, size, mode);
-}
-
-extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
-					     int mode)
-{
-	return xstrdup_select_jobinfo(jobinfo, mode);
-}
-
-extern int select_p_update_block(update_block_msg_t *block_desc_ptr)
-{
-#ifdef HAVE_BG_L_P
-	int rc = SLURM_SUCCESS;
-	bg_record_t *bg_record = NULL;
-	char reason[200];
-
-	if (!block_desc_ptr->bg_block_id) {
-		error("update_block: No name specified");
-		return ESLURM_INVALID_BLOCK_NAME;
-	}
-
-	slurm_mutex_lock(&block_state_mutex);
-	bg_record = find_bg_record_in_list(bg_lists->main,
-					   block_desc_ptr->bg_block_id);
-	if (!bg_record) {
-		slurm_mutex_unlock(&block_state_mutex);
-		return ESLURM_INVALID_BLOCK_NAME;
-	}
-
-	if (block_desc_ptr->reason)
-		snprintf(reason, sizeof(reason), "%s", block_desc_ptr->reason);
-	else if (block_desc_ptr->state == RM_PARTITION_CONFIGURING)
-		snprintf(reason, sizeof(reason),
-			 "update_block: "
-			 "Admin recreated %s.", bg_record->bg_block_id);
-	else if (block_desc_ptr->state == RM_PARTITION_NAV) {
-		if (bg_record->conn_type < SELECT_SMALL)
-			snprintf(reason, sizeof(reason),
-				 "update_block: "
-				 "Admin removed block %s",
-				 bg_record->bg_block_id);
-		else
-			snprintf(reason, sizeof(reason),
-				 "update_block: "
-				 "Removed all blocks on midplane %s",
-				 bg_record->nodes);
-
-	} else
-		snprintf(reason, sizeof(reason),
-			 "update_block: "
-			 "Admin set block %s state to %s",
-			 bg_record->bg_block_id,
-			 bg_block_state_string(block_desc_ptr->state));
-
-	/* First fail any job running on this block */
-	if (bg_record->job_running > NO_JOB_RUNNING) {
-		slurm_mutex_unlock(&block_state_mutex);
-		bg_requeue_job(bg_record->job_running, 0);
-		slurm_mutex_lock(&block_state_mutex);
-		if (!block_ptr_exist_in_list(bg_lists->main, bg_record)) {
-			slurm_mutex_unlock(&block_state_mutex);
-			error("while trying to put block in "
-			      "error state it disappeared");
-			return SLURM_ERROR;
-		}
-		/* need to set the job_ptr to NULL
-		   here or we will get error message
-		   about us trying to free this block
-		   with a job in it.
-		*/
-		bg_record->job_ptr = NULL;
-	}
-
-	if (block_desc_ptr->state == RM_PARTITION_ERROR) {
-		bg_record_t *found_record = NULL;
-		ListIterator itr;
-		List delete_list = list_create(NULL);
-		/* This loop shouldn't do much in regular Dynamic mode
-		   since there shouldn't be overlapped blocks.  But if
-		   there is a trouble block that isn't going away and
-		   we need to mark it in an error state there could be
-		   blocks overlapped where we need to requeue the jobs.
-		*/
-		itr = list_iterator_create(bg_lists->main);
-		while ((found_record = list_next(itr))) {
-			if (bg_record == found_record)
-				continue;
-
-			if (!blocks_overlap(bg_record, found_record)) {
-				debug2("block %s isn't part of errored %s",
-				       found_record->bg_block_id,
-				       bg_record->bg_block_id);
-				continue;
-			}
-			if (found_record->job_running > NO_JOB_RUNNING) {
-				if (found_record->job_ptr
-				    && IS_JOB_CONFIGURING(
-					    found_record->job_ptr))
-					info("Pending job %u on block %s "
-					     "will try to be requeued "
-					     "because overlapping block %s "
-					     "is in an error state.",
-					     found_record->job_running,
-					     found_record->bg_block_id,
-					     bg_record->bg_block_id);
-				else
-					info("Failing job %u on block %s "
-					     "because overlapping block %s "
-					     "is in an error state.",
-					     found_record->job_running,
-					     found_record->bg_block_id,
-					     bg_record->bg_block_id);
-
-				/* This job will be requeued in the
-				   free_block_list code below, just
-				   make note of it here.
-				*/
-			} else {
-				debug2("block %s is part of errored %s "
-				       "but no running job",
-				       found_record->bg_block_id,
-				       bg_record->bg_block_id);
-			}
-			list_push(delete_list, found_record);
-		}
-		list_iterator_destroy(itr);
-		slurm_mutex_unlock(&block_state_mutex);
-		free_block_list(NO_VAL, delete_list, 0, 0);
-		list_destroy(delete_list);
-		put_block_in_error_state(bg_record, BLOCK_ERROR_STATE, reason);
-	} else if (block_desc_ptr->state == RM_PARTITION_FREE) {
-		bg_free_block(bg_record, 0, 1);
-		resume_block(bg_record);
-		slurm_mutex_unlock(&block_state_mutex);
-	} else if (block_desc_ptr->state == RM_PARTITION_DEALLOCATING) {
-		/* This can't be RM_PARTITION_READY since the enum
-		   changed from BGL to BGP and if we are running cross
-		   cluster it just doesn't work.
-		*/
-		resume_block(bg_record);
-		slurm_mutex_unlock(&block_state_mutex);
-	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC
-		   && (block_desc_ptr->state == RM_PARTITION_NAV)) {
-		/* This means remove the block from the system.  If
-		   the block is a small block we need to remove all the
-		   blocks on that midplane.
-		*/
-		bg_record_t *found_record = NULL;
-		ListIterator itr;
-		List delete_list = list_create(NULL);
-
-		list_push(delete_list, bg_record);
-		/* only do the while loop if we are dealing with a
-		   small block */
-		if (bg_record->conn_type < SELECT_SMALL)
-			goto large_block;
-
-		itr = list_iterator_create(bg_lists->main);
-		while ((found_record = list_next(itr))) {
-			if (bg_record == found_record)
-				continue;
-
-			if (!bit_equal(bg_record->bitmap,
-				       found_record->bitmap)) {
-				debug2("block %s isn't part of to be freed %s",
-				       found_record->bg_block_id,
-				       bg_record->bg_block_id);
-				continue;
-			}
-			if (found_record->job_running > NO_JOB_RUNNING) {
-				if (found_record->job_ptr
-				    && IS_JOB_CONFIGURING(
-					    found_record->job_ptr))
-					info("Pending job %u on block %s "
-					     "will try to be requeued "
-					     "because overlapping block %s "
-					     "is in an error state.",
-					     found_record->job_running,
-					     found_record->bg_block_id,
-					     bg_record->bg_block_id);
-				else
-					info("Failing job %u on block %s "
-					     "because overlapping block %s "
-					     "is in an error state.",
-					     found_record->job_running,
-					     found_record->bg_block_id,
-					     bg_record->bg_block_id);
-				/* This job will be requeued in the
-				   free_block_list code below, just
-				   make note of it here.
-				*/
-			} else {
-				debug2("block %s is part of to be freed %s "
-				       "but no running job",
-				       found_record->bg_block_id,
-				       bg_record->bg_block_id);
-			}
-			list_push(delete_list, found_record);
-		}
-		list_iterator_destroy(itr);
-
-	large_block:
-		/* make sure if we are removing a block to put it back
-		   to a normal state in accounting first */
-		itr = list_iterator_create(delete_list);
-		while ((found_record = list_next(itr))) {
-			if (found_record->state == RM_PARTITION_ERROR)
-				resume_block(found_record);
-		}
-		list_iterator_destroy(itr);
-
-		slurm_mutex_unlock(&block_state_mutex);
-		free_block_list(NO_VAL, delete_list, 0, 0);
-		list_destroy(delete_list);
-	} else if (block_desc_ptr->state == RM_PARTITION_CONFIGURING) {
-		/* This means recreate the block, remove it and then
-		   recreate it.
-		*/
-
-		/* make sure if we are removing a block to put it back
-		   to a normal state in accounting first */
-		if (bg_record->state == RM_PARTITION_ERROR)
-			resume_block(bg_record);
-
-		term_jobs_on_block(bg_record->bg_block_id);
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("select_p_update_block: "
-			     "freeing the block %s.", bg_record->bg_block_id);
-		bg_free_block(bg_record, 1, 1);
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("select_p_update_block: done");
-
-		/* Now remove it from the main list since we are
-		   looking for a state change and it won't be caught
-		   unless it is in the main list until now.
-		*/
-		remove_from_bg_list(bg_lists->main, bg_record);
-
-#ifdef HAVE_BG_FILES
-		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-			info("select_p_update_block: "
-			     "removing %s from database",
-			     bg_record->bg_block_id);
-
-		rc = bridge_remove_block(bg_record->bg_block_id);
-		if (rc != STATUS_OK) {
-			if (rc == PARTITION_NOT_FOUND) {
-				debug("select_p_update_block: "
-				      "block %s is not found",
-				      bg_record->bg_block_id);
-			} else {
-				error("select_p_update_block: "
-				      "rm_remove_partition(%s): %s",
-				      bg_record->bg_block_id,
-				      bg_err_str(rc));
-			}
-		} else
-			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
-				info("select_p_update_block: done %s",
-				     (char *)bg_record->bg_block_id);
-#endif
-		xfree(bg_record->bg_block_id);
-		if (configure_block(bg_record) == SLURM_ERROR) {
-			destroy_bg_record(bg_record);
-			error("select_p_update_block: "
-			      "unable to configure block in api");
-		} else {
-			print_bg_record(bg_record);
-			list_append(bg_lists->main, bg_record);
-			sort_bg_record_inc_size(bg_lists->main);
-		}
-
-		slurm_mutex_unlock(&block_state_mutex);
-	} else {
-		slurm_mutex_unlock(&block_state_mutex);
-		error("state is ? %s",
-		      bg_block_state_string(block_desc_ptr->state));
-		return ESLURM_INVALID_NODE_STATE;
-	}
-
-	/* info("%s", reason); */
-	last_bg_update = time(NULL);
-
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_update_sub_node (update_block_msg_t *block_desc_ptr)
-{
-#ifdef HAVE_BG_L_P
-	int rc = SLURM_SUCCESS;
-	int i = 0, j = 0;
-	char coord[SYSTEM_DIMENSIONS+1], *node_name = NULL;
-	char ionodes[128];
-	int set = 0;
-	double nc_pos = 0, last_pos = -1;
-	bitstr_t *ionode_bitmap = NULL;
-	char *name = NULL;
-
-	if (bg_conf->layout_mode != LAYOUT_DYNAMIC) {
-		info("You can't use this call unless you are on a Dynamically "
-		     "allocated system.  Please use update BlockName instead");
-		rc = ESLURM_INVALID_BLOCK_LAYOUT;
-		goto end_it;
-	}
-
-	memset(coord, 0, sizeof(coord));
-	memset(ionodes, 0, 128);
-	if (!block_desc_ptr->nodes) {
-		error("update_sub_node: No name specified");
-		rc = ESLURM_INVALID_BLOCK_NAME;
-		goto end_it;
-	}
-	name = block_desc_ptr->nodes;
-
-	while (name[j] != '\0') {
-		if (name[j] == '[') {
-			if (set<1) {
-				rc = SLURM_ERROR;
-				goto end_it;
-			}
-			i = j++;
-			if ((name[j] < '0'
-			     || name[j] > 'Z'
-			     || (name[j] > '9'
-				 && name[j] < 'A'))) {
-				error("update_sub_node: sub block is empty");
-				rc = SLURM_ERROR;
-				goto end_it;
-			}
-			while (name[i] != '\0') {
-				if (name[i] == ']')
-					break;
-				i++;
-			}
-			if (name[i] != ']') {
-				error("update_sub_node: "
-				      "No close (']') on sub block");
-				rc = SLURM_ERROR;
-				goto end_it;
-			}
-
-			strncpy(ionodes, name+j, i-j);
-			set++;
-			break;
-		} else if ((name[j] >= '0'
-			    && name[j] <= '9')
-			   || (name[j] >= 'A'
-			       && name[j] <= 'Z')) {
-			if (set) {
-				rc = SLURM_ERROR;
-				goto end_it;
-			}
-			/* make sure we are asking for a correct name */
-			for(i = 0; i < SYSTEM_DIMENSIONS; i++) {
-				if ((name[j+i] >= '0'
-				     && name[j+i] <= '9')
-				    || (name[j+i] >= 'A'
-					&& name[j+i] <= 'Z'))
-					continue;
-
-				error("update_sub_node: "
-				      "misformatted name given %s",
-				      name);
-				rc = SLURM_ERROR;
-				goto end_it;
-			}
-
-			strncpy(coord, name+j,
-				SYSTEM_DIMENSIONS);
-			j += SYSTEM_DIMENSIONS-1;
-			set++;
-		}
-		j++;
-	}
-
-	if (set != 2) {
-		error("update_sub_node: "
-		      "I didn't get the base partition and the sub part.");
-		rc = SLURM_ERROR;
-		goto end_it;
-	}
-	ionode_bitmap = bit_alloc(bg_conf->numpsets);
-	bit_unfmt(ionode_bitmap, ionodes);
-	if (bit_ffs(ionode_bitmap) == -1) {
-		error("update_sub_node: Invalid ionode '%s' given.", ionodes);
-		rc = SLURM_ERROR;
-		FREE_NULL_BITMAP(ionode_bitmap);
-		goto end_it;
-	}
-	node_name = xstrdup_printf("%s%s", bg_conf->slurm_node_prefix, coord);
-	/* find out how many nodecards to get for each ionode */
-	if (block_desc_ptr->state == RM_PARTITION_ERROR) {
-		info("Admin setting %s[%s] in an error state",
-		     node_name, ionodes);
-		for(i = 0; i<bg_conf->numpsets; i++) {
-			if (bit_test(ionode_bitmap, i)) {
-				if ((int)nc_pos != (int)last_pos) {
-					/* find first bit in nc */
-					int start_io =
-						(int)nc_pos * bg_conf->io_ratio;
-					down_nodecard(node_name, start_io, 0);
-					last_pos = nc_pos;
-				}
-			}
-			nc_pos += bg_conf->nc_ratio;
-		}
-	} else if (block_desc_ptr->state == RM_PARTITION_FREE) {
-		info("Admin setting %s[%s] in an free state",
-		     node_name, ionodes);
-		up_nodecard(node_name, ionode_bitmap);
-	} else {
-		error("update_sub_node: Unknown state %s",
-		      bg_block_state_string(block_desc_ptr->state));
-		rc = ESLURM_INVALID_BLOCK_STATE;
-	}
-
-	FREE_NULL_BITMAP(ionode_bitmap);
-	xfree(node_name);
-
-	last_bg_update = time(NULL);
-end_it:
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_get_info_from_plugin (enum select_plugindata_info dinfo,
-					  struct job_record *job_ptr,
-					  void *data)
-{
-#ifdef HAVE_BG_L_P
-	uint16_t *tmp16 = (uint16_t *) data;
-	uint32_t *tmp32 = (uint32_t *) data;
-	List *tmp_list = (List *) data;
-	int rc = SLURM_SUCCESS;
-
-	switch(dinfo) {
-	case SELECT_CR_PLUGIN:
-		*tmp32 = 0;
-		break;
-	case SELECT_STATIC_PART:
-		if (bg_conf->layout_mode == LAYOUT_STATIC)
-			*tmp16 = 1;
-		else
-			*tmp16 = 0;
-		break;
-
-	case SELECT_CONFIG_INFO:
-		*tmp_list = _get_config();
-		break;
-	default:
-		error("select_p_get_info_from_plugin info %d invalid",
-		      dinfo);
-		rc = SLURM_ERROR;
-		break;
-	}
-
-	return rc;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_update_node_config (int index)
-{
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_update_node_state (int index, uint16_t state)
-{
-#ifdef HAVE_BG_L_P
-	int x, y, z;
-	for (y = DIM_SIZE[Y] - 1; y >= 0; y--) {
-		for (z = 0; z < DIM_SIZE[Z]; z++) {
-			for (x = 0; x < DIM_SIZE[X]; x++) {
-				if (ba_system_ptr->grid[x][y][z].index
-				    == index) {
-					ba_update_node_state(
-						&ba_system_ptr->grid[x][y][z],
-						state);
-					return SLURM_SUCCESS;
-				}
-			}
-		}
-	}
-#endif
-	return SLURM_ERROR;
-}
-
-extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
-{
-#ifdef HAVE_BG_L_P
-	job_desc_msg_t *job_desc = (job_desc_msg_t *)data;
-	uint16_t *cpus = (uint16_t *)data;
-	uint32_t *nodes = (uint32_t *)data, tmp = 0;
-	int i;
-	uint16_t req_geometry[SYSTEM_DIMENSIONS];
-
-	if (!bg_conf->bp_node_cnt) {
-		fatal("select_p_alter_node_cnt: This can't be called "
-		      "before init");
-	}
-
-	switch (type) {
-	case SELECT_GET_NODE_SCALING:
-		if ((*nodes) != INFINITE)
-			(*nodes) = bg_conf->bp_node_cnt;
-		break;
-	case SELECT_GET_NODE_CPU_CNT:
-		if ((*cpus) != (uint16_t)INFINITE)
-			(*cpus) = bg_conf->cpu_ratio;
-		break;
-	case SELECT_GET_BP_CPU_CNT:
-		if ((*nodes) != INFINITE)
-			(*nodes) = bg_conf->cpus_per_bp;
-		break;
-	case SELECT_SET_BP_CNT:
-		if (((*nodes) == INFINITE) || ((*nodes) == NO_VAL))
-			tmp = (*nodes);
-		else if ((*nodes) > bg_conf->bp_node_cnt) {
-			tmp = (*nodes);
-			tmp /= bg_conf->bp_node_cnt;
-			if (tmp < 1)
-				tmp = 1;
-		} else
-			tmp = 1;
-		(*nodes) = tmp;
-		break;
-	case SELECT_APPLY_NODE_MIN_OFFSET:
-		if ((*nodes) == 1) {
-			/* Job will actually get more than one c-node,
-			 * but we can't be sure exactly how much so we
-			 * don't scale up this value. */
-			break;
-		}
-		(*nodes) *= bg_conf->bp_node_cnt;
-		break;
-	case SELECT_APPLY_NODE_MAX_OFFSET:
-		if ((*nodes) != INFINITE)
-			(*nodes) *= bg_conf->bp_node_cnt;
-		break;
-	case SELECT_SET_NODE_CNT:
-		get_select_jobinfo(job_desc->select_jobinfo->data,
-				   SELECT_JOBDATA_ALTERED, &tmp);
-		if (tmp == 1) {
-			return SLURM_SUCCESS;
-		}
-		tmp = 1;
-		set_select_jobinfo(job_desc->select_jobinfo->data,
-				   SELECT_JOBDATA_ALTERED, &tmp);
-
-		if (job_desc->min_nodes == (uint32_t) NO_VAL)
-			return SLURM_SUCCESS;
-
-		get_select_jobinfo(job_desc->select_jobinfo->data,
-				   SELECT_JOBDATA_GEOMETRY, &req_geometry);
-
-		if (req_geometry[0] != 0
-		    && req_geometry[0] != (uint16_t)NO_VAL) {
-			job_desc->min_nodes = 1;
-			for (i=0; i<SYSTEM_DIMENSIONS; i++)
-				job_desc->min_nodes *=
-					(uint16_t)req_geometry[i];
-			job_desc->min_nodes *= bg_conf->bp_node_cnt;
-			job_desc->max_nodes = job_desc->min_nodes;
-		}
-
-		/* make sure if the user only specified min_cpus to
-		   set min_nodes correctly
-		*/
-		if ((job_desc->min_cpus != NO_VAL)
-		    && (job_desc->min_cpus > job_desc->min_nodes))
-			job_desc->min_nodes =
-				job_desc->min_cpus / bg_conf->cpu_ratio;
-
-		/* initialize min_cpus to the min_nodes */
-		job_desc->min_cpus = job_desc->min_nodes * bg_conf->cpu_ratio;
-
-		if ((job_desc->max_nodes == (uint32_t) NO_VAL)
-		    || (job_desc->max_nodes < job_desc->min_nodes))
-			job_desc->max_nodes = job_desc->min_nodes;
-
-		/* See if min_nodes is greater than one base partition */
-		if (job_desc->min_nodes > bg_conf->bp_node_cnt) {
-			/*
-			 * if it is make sure it is a factor of
-			 * bg_conf->bp_node_cnt, if it isn't make it
-			 * that way
-			 */
-			tmp = job_desc->min_nodes % bg_conf->bp_node_cnt;
-			if (tmp > 0)
-				job_desc->min_nodes +=
-					(bg_conf->bp_node_cnt-tmp);
-		}
-		tmp = job_desc->min_nodes / bg_conf->bp_node_cnt;
-
-		/* this means it is greater or equal to one bp */
-		if (tmp > 0) {
-			set_select_jobinfo(job_desc->select_jobinfo->data,
-					   SELECT_JOBDATA_NODE_CNT,
-					   &job_desc->min_nodes);
-			job_desc->min_nodes = tmp;
-			job_desc->min_cpus = bg_conf->cpus_per_bp * tmp;
-		} else {
-#ifdef HAVE_BGL
-			if (job_desc->min_nodes <= bg_conf->nodecard_node_cnt
-			    && bg_conf->nodecard_ionode_cnt)
-				job_desc->min_nodes =
-					bg_conf->nodecard_node_cnt;
-			else if (job_desc->min_nodes
-				 <= bg_conf->quarter_node_cnt)
-				job_desc->min_nodes =
-					bg_conf->quarter_node_cnt;
-			else
-				job_desc->min_nodes =
-					bg_conf->bp_node_cnt;
-
-			set_select_jobinfo(job_desc->select_jobinfo->data,
-					   SELECT_JOBDATA_NODE_CNT,
-					   &job_desc->min_nodes);
-
-			tmp = bg_conf->bp_node_cnt/job_desc->min_nodes;
-
-			job_desc->min_cpus = bg_conf->cpus_per_bp/tmp;
-			job_desc->min_nodes = 1;
-#else
-			i = bg_conf->smallest_block;
-			while (i <= bg_conf->bp_node_cnt) {
-				if (job_desc->min_nodes <= i) {
-					job_desc->min_nodes = i;
-					break;
-				}
-				i *= 2;
-			}
-
-			set_select_jobinfo(job_desc->select_jobinfo->data,
-					   SELECT_JOBDATA_NODE_CNT,
-					   &job_desc->min_nodes);
-
-			job_desc->min_cpus = job_desc->min_nodes
-				* bg_conf->cpu_ratio;
-			job_desc->min_nodes = 1;
-#endif
-		}
-
-		if (job_desc->max_nodes > bg_conf->bp_node_cnt) {
-			tmp = job_desc->max_nodes % bg_conf->bp_node_cnt;
-			if (tmp > 0)
-				job_desc->max_nodes +=
-					(bg_conf->bp_node_cnt-tmp);
-		}
-		tmp = job_desc->max_nodes / bg_conf->bp_node_cnt;
-
-		if (tmp > 0) {
-			job_desc->max_nodes = tmp;
-			job_desc->max_cpus =
-				job_desc->max_nodes * bg_conf->cpus_per_bp;
-			tmp = NO_VAL;
-		} else {
-#ifdef HAVE_BGL
-			if (job_desc->max_nodes <= bg_conf->nodecard_node_cnt
-			    && bg_conf->nodecard_ionode_cnt)
-				job_desc->max_nodes =
-					bg_conf->nodecard_node_cnt;
-			else if (job_desc->max_nodes
-				 <= bg_conf->quarter_node_cnt)
-				job_desc->max_nodes =
-					bg_conf->quarter_node_cnt;
-			else
-				job_desc->max_nodes =
-					bg_conf->bp_node_cnt;
-
-			tmp = bg_conf->bp_node_cnt/job_desc->max_nodes;
-			job_desc->max_cpus = bg_conf->cpus_per_bp/tmp;
-			job_desc->max_nodes = 1;
-#else
-			i = bg_conf->smallest_block;
-			while (i <= bg_conf->bp_node_cnt) {
-				if (job_desc->max_nodes <= i) {
-					job_desc->max_nodes = i;
-					break;
-				}
-				i *= 2;
-			}
-			job_desc->max_cpus =
-				job_desc->max_nodes * bg_conf->cpu_ratio;
-
-			job_desc->max_nodes = 1;
-#endif
-		}
-		tmp = NO_VAL;
-
-		break;
-	default:
-		error("unknown option %d for alter_node_cnt", type);
-	}
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
-extern int select_p_reconfigure(void)
-{
-#ifdef HAVE_BG_L_P
-	slurm_conf_lock();
-	if (!slurmctld_conf.slurm_user_name
-	    || strcmp(bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name))
-		error("The slurm user has changed from '%s' to '%s'.  "
-		      "If this is really what you "
-		      "want you will need to restart slurm for this "
-		      "change to be enforced in the bluegene plugin.",
-		      bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name);
-	if (!slurmctld_conf.node_prefix
-	    || strcmp(bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix))
-		error("Node Prefix has changed from '%s' to '%s'.  "
-		      "If this is really what you "
-		      "want you will need to restart slurm for this "
-		      "change to be enforced in the bluegene plugin.",
-		      bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix);
-	bg_conf->slurm_debug_flags = slurmctld_conf.debug_flags;
-	set_ba_debug_flags(bg_conf->slurm_debug_flags);
-	slurm_conf_unlock();
-
-	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
-}
-
diff --git a/src/plugins/select/bluegene/runjob_plugin.cc b/src/plugins/select/bluegene/runjob_plugin.cc
new file mode 100644
index 000000000..49c23ca3b
--- /dev/null
+++ b/src/plugins/select/bluegene/runjob_plugin.cc
@@ -0,0 +1,319 @@
+/*****************************************************************************\
+ *  runjob_plugin.cc - This plug is used to convey to runjob the
+ *                     desires of slurm based on the allocation that
+ *                     has surrounded it.  If runjob was ran outside
+ *                     of SLURM this plugin will terminate the job at
+ *                     that moment.
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <auble1@llnl.gov> et. al.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+extern "C" {
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+#include "src/common/xmalloc.h"
+#include <slurm/slurm.h>
+
+}
+
+#ifdef HAVE_BG_FILES
+
+#include <bgsched/runjob/Plugin.h>
+#include <bgsched/Dimension.h>
+//#include "ProcessTree.h"
+
+#include <boost/thread/mutex.hpp>
+#include <boost/foreach.hpp>
+
+#include <iosfwd>
+
+using namespace bgsched;
+
+class Plugin : public bgsched::runjob::Plugin
+{
+public:
+	Plugin();
+
+	~Plugin();
+
+	void execute(
+		bgsched::runjob::Verify& data
+		);
+
+	void execute(
+		const bgsched::runjob::Started& data
+		);
+
+	void execute(
+		const bgsched::runjob::Terminated& data
+		);
+
+private:
+	boost::mutex _mutex;
+};
+
+Plugin::Plugin() :
+	bgsched::runjob::Plugin(),
+	_mutex()
+{
+	assert(HIGHEST_DIMENSIONS >= Dimension::NodeDims);
+
+	std::cout << "Slurm runjob plugin loaded" << std::endl;
+}
+
+Plugin::~Plugin()
+{
+	std::cout << "Slurm runjob plugin finished" << std::endl;
+}
+
+void Plugin::execute(bgsched::runjob::Verify& verify)
+{
+	boost::lock_guard<boost::mutex> lock( _mutex );
+	unsigned geo[Dimension::NodeDims];
+	unsigned start_coords[Dimension::NodeDims];
+	int found = 0;
+	int looking_for = 2;
+	int block_cnode_cnt = 0;
+	int step_cnode_cnt = 0;
+	bool sub_block_job = 0;
+	job_step_info_response_msg_t * step_resp = NULL;
+	job_step_info_t *step_ptr = NULL;
+	uint32_t job_id = NO_VAL, step_id = NO_VAL;
+	char *bg_block_id = NULL;
+
+	geo[0] = NO_VAL;
+	start_coords[0] = NO_VAL;
+
+	/* Get the job/step id's from the environment and then go
+	 * verify with the slurmctld where this step should be running.
+	 */
+	BOOST_FOREACH(const bgsched::runjob::Environment& env_var,
+		      verify.envs()) {
+		if (env_var.getKey() == "SLURM_JOB_ID") {
+			job_id = atoi(env_var.getValue().c_str());
+			found++;
+		} else if (env_var.getKey() == "SLURM_STEP_ID") {
+			step_id = atoi(env_var.getValue().c_str());
+			found++;
+		}
+
+		if (found == looking_for)
+			break;
+	}
+
+	if (found != looking_for)
+		goto deny_job;
+
+	if (slurm_get_job_steps((time_t) 0, job_id, step_id,
+				&step_resp, SHOW_ALL)) {
+		slurm_perror((char *)"slurm_get_job_steps error");
+		goto deny_job;
+	}
+
+	if (!step_resp->job_step_count) {
+		std::cerr << "No steps match this id "
+			  << job_id << "." << step_id << std::endl;
+		goto deny_job;
+	}
+
+	step_ptr = &step_resp->job_steps[0];
+
+	/* A bit of verification to make sure this is the correct user
+	   supposed to be running.
+	*/
+	if (verify.user().uid() != step_ptr->user_id) {
+		std::cerr << "Jobstep " << job_id << "." << step_id
+			  << " should be ran by uid " << step_ptr->user_id
+			  << " but it is trying to be ran by "
+			  << verify.user().uid() << std::endl;
+		goto deny_job;
+	}
+
+	if (slurm_get_select_jobinfo(step_ptr->select_jobinfo,
+				     SELECT_JOBDATA_BLOCK_ID,
+				     &bg_block_id)) {
+		std::cerr << "Can't get the block id!" << std::endl;
+		goto deny_job;
+	}
+	verify.block(bg_block_id);
+	xfree(bg_block_id);
+
+	if (slurm_get_select_jobinfo(step_ptr->select_jobinfo,
+				     SELECT_JOBDATA_BLOCK_NODE_CNT,
+				     &block_cnode_cnt)) {
+		std::cerr << "Can't get the block node count!" << std::endl;
+		goto deny_job;
+	}
+
+	if (slurm_get_select_jobinfo(step_ptr->select_jobinfo,
+				     SELECT_JOBDATA_NODE_CNT,
+				     &step_cnode_cnt)) {
+		std::cerr << "Can't get the step node count!" << std::endl;
+		goto deny_job;
+	}
+
+	if (!step_cnode_cnt || !block_cnode_cnt) {
+		std::cerr << "We didn't get both the step cnode "
+			  << "count and the block cnode cnt! step="
+			  << step_cnode_cnt << " block="
+			  << block_cnode_cnt << std::endl;
+		goto deny_job;
+	} else if (step_cnode_cnt < block_cnode_cnt) {
+		uint16_t dim;
+		uint16_t tmp_uint16[HIGHEST_DIMENSIONS];
+
+		sub_block_job = 1;
+		if (slurm_get_select_jobinfo(step_ptr->select_jobinfo,
+					     SELECT_JOBDATA_GEOMETRY,
+					     &tmp_uint16)) {
+			std::cerr << "Can't figure out the geo "
+				  << "given for sub-block job!" << std::endl;
+			goto deny_job;
+		}
+		/* since geo is an unsigned (who really knows what
+		   that is depending on the arch) we need to convert
+		   our uint16_t to the unsigned array
+		*/
+		for (dim=0; dim<Dimension::NodeDims; dim++)
+			geo[dim] = tmp_uint16[dim];
+
+		/* Since IBM's stuff relies on a relative location we
+		   have stored this information in the conn_type of
+		   the select_jobinfo structure.  If you want the
+		   absolute location use the SELECT_JOBDATA_START_LOC
+		   variable.
+		*/
+		if (slurm_get_select_jobinfo(step_ptr->select_jobinfo,
+					     SELECT_JOBDATA_CONN_TYPE,
+					     &tmp_uint16)) {
+			std::cerr << "Can't figure out the start loc "
+				  << "for sub-block job!" << std::endl;
+			goto deny_job;
+		}
+		for (dim=0; dim<Dimension::NodeDims; dim++)
+			start_coords[dim] = tmp_uint16[dim];
+	}
+
+	if (sub_block_job && start_coords[0] != NO_VAL)
+		verify.corner(bgsched::runjob::Corner(start_coords));
+	else if (sub_block_job) {
+		std::cerr << "No corner given for sub-block job!" << std::endl;
+		goto deny_job;
+	}
+
+	if (sub_block_job && geo[0] != NO_VAL)
+		verify.shape(bgsched::runjob::Shape(geo));
+	else if (sub_block_job) {
+		std::cerr << "No shape given for sub-block job!" << std::endl;
+		goto deny_job;
+	}
+
+	if (verify.block().empty() || (verify.block().length() < 3)) {
+		std::cerr << "YOU ARE OUTSIDE OF SLURM!!!!" << std::endl;
+		goto deny_job;
+	}
+
+	// std::cout << "executable: " << verify.exe() << std::endl;
+	// std::cout << "args      : ";
+	// std::copy(verify.args().begin(), verify.args().end(),
+	// 	  std::ostream_iterator<std::string>(std::cout, " "));
+	// std::cout << std::endl;
+	// std::cout << "envs      : ";
+	// std::copy(verify.envs().begin(), verify.envs().end(),
+	// 	  std::ostream_iterator<std::string>(std::cout, " "));
+	// std::cout << std::endl;
+	// std::cout << "block     : " << verify.block() << std::endl;
+	// if (!verify.corner().location().empty()) {
+	// 	std::cout << "corner:     " <<
+	// 		verify.corner().location() << std::endl;
+	// }
+	// if (!verify.shape().value().empty()) {
+	// 	std::cout << "shape:      " << verify.shape().value()
+	// 		  << std::endl;
+	// }
+
+	// const ProcessTree tree( verify.pid() );
+	// std::cout << tree << std::endl;
+
+	slurm_free_job_step_info_response_msg(step_resp);
+	return;
+
+deny_job:
+	slurm_free_job_step_info_response_msg(step_resp);
+	verify.deny_job(bgsched::runjob::Verify::DenyJob::Yes);
+	return;
+}
+
+void Plugin::execute(const bgsched::runjob::Started& data)
+{
+	boost::lock_guard<boost::mutex> lock( _mutex );
+	// std::cout << "runjob " << data.pid()
+	// 	  << " started with ID " << data.job() << std::endl;
+}
+
+void Plugin::execute(const bgsched::runjob::Terminated& data)
+{
+	boost::lock_guard<boost::mutex> lock( _mutex );
+	// std::cout << "runjob " << data.pid() << " shadowing job "
+	// 	  << data.job() << " finished with status "
+	// 	  << data.status() << std::endl;
+
+	// output failed nodes
+	const bgsched::runjob::Terminated::Nodes& nodes =
+		data.software_error_nodes();
+	if (!nodes.empty()) {
+		/* FIXME: We sould tell the slurmctld about this
+		   instead of just printing it out.
+		*/
+		std::cerr << nodes.size() << " failed nodes" << std::endl;
+		BOOST_FOREACH(const bgsched::runjob::Node& i, nodes) {
+			std::cerr << i.location() << ": "
+				  << i.coordinates() << std::endl;
+		}
+	}
+}
+
+extern "C" bgsched::runjob::Plugin* create()
+{
+	return new Plugin();
+}
+
+extern "C" void destroy(bgsched::runjob::Plugin* p)
+{
+	delete p;
+}
+
+#endif
diff --git a/src/plugins/select/bluegene/select_bluegene.c b/src/plugins/select/bluegene/select_bluegene.c
new file mode 100644
index 000000000..12a70de92
--- /dev/null
+++ b/src/plugins/select/bluegene/select_bluegene.c
@@ -0,0 +1,2766 @@
+/*****************************************************************************\
+ *  select_bluegene.c - node selection plugin for Blue Gene system.
+ *****************************************************************************
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Dan Phung <phung4@llnl.gov> Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/slurm_xlator.h"
+#include "src/common/uid.h"
+#include "bg_core.h"
+#include "bg_read_config.h"
+#include "bg_defined_block.h"
+
+#ifdef HAVE_BGQ
+# include "ba_bgq/block_allocator.h"
+#else
+# include "ba/block_allocator.h"
+#endif
+
+//#include "src/common/uid.h"
+#include "src/slurmctld/trigger_mgr.h"
+#include <fcntl.h>
+
+#define HUGE_BUF_SIZE (1024*16)
+
+/* These are defined here so when we link with something other than
+ * the slurmctld we will have these symbols defined.  They will get
+ * overwritten when linking with the slurmctld.
+ */
+#if defined (__APPLE__)
+slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
+struct node_record *node_record_table_ptr  __attribute__((weak_import)) = NULL;
+int bg_recover __attribute__((weak_import)) = NOT_FROM_CONTROLLER;
+List part_list  __attribute__((weak_import)) = NULL;
+int node_record_count __attribute__((weak_import));
+time_t last_node_update __attribute__((weak_import));
+time_t last_job_update __attribute__((weak_import));
+char *alpha_num  __attribute__((weak_import)) =
+	"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+void *acct_db_conn  __attribute__((weak_import)) = NULL;
+char *slurmctld_cluster_name  __attribute__((weak_import)) = NULL;
+slurmdb_cluster_rec_t *working_cluster_rec  __attribute__((weak_import)) = NULL;
+#else
+slurm_ctl_conf_t slurmctld_conf;
+struct node_record *node_record_table_ptr = NULL;
+int bg_recover = NOT_FROM_CONTROLLER;
+List part_list = NULL;
+int node_record_count;
+time_t last_node_update;
+time_t last_job_update;
+char *alpha_num = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+void *acct_db_conn = NULL;
+char *slurmctld_cluster_name = NULL;
+slurmdb_cluster_rec_t *working_cluster_rec = NULL;
+#endif
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "select" for SLURM node selection) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load select plugins if the plugin_type string has a
+ * prefix of "select/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum version for their plugins as the node selection API matures.
+ */
+const char plugin_name[]       	= "BlueGene node selection plugin";
+const char plugin_type[]       	= "select/bluegene";
+const uint32_t plugin_id	= 100;
+const uint32_t plugin_version	= 200;
+
+/* Global variables */
+bg_config_t *bg_conf = NULL;
+bg_lists_t *bg_lists = NULL;
+time_t last_bg_update;
+pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
+int blocks_are_created = 0;
+int num_unused_cpus = 0;
+
+extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data);
+
+static void _destroy_bg_config(bg_config_t *bg_conf)
+{
+	if (bg_conf) {
+		if (bg_conf->blrts_list) {
+			list_destroy(bg_conf->blrts_list);
+			bg_conf->blrts_list = NULL;
+		}
+
+		xfree(bg_conf->bridge_api_file);
+
+		xfree(bg_conf->default_blrtsimage);
+		xfree(bg_conf->default_linuximage);
+		xfree(bg_conf->default_mloaderimage);
+		xfree(bg_conf->default_ramdiskimage);
+
+		if (bg_conf->linux_list) {
+			list_destroy(bg_conf->linux_list);
+			bg_conf->linux_list = NULL;
+		}
+
+		if (bg_conf->mloader_list) {
+			list_destroy(bg_conf->mloader_list);
+			bg_conf->mloader_list = NULL;
+		}
+
+		if (bg_conf->ramdisk_list) {
+			list_destroy(bg_conf->ramdisk_list);
+			bg_conf->ramdisk_list = NULL;
+		}
+
+		xfree(bg_conf->slurm_user_name);
+		xfree(bg_conf->slurm_node_prefix);
+		xfree(bg_conf);
+	}
+}
+
+static void _destroy_bg_lists(bg_lists_t *bg_lists)
+{
+	if (bg_lists) {
+		if (bg_lists->booted) {
+			list_destroy(bg_lists->booted);
+			bg_lists->booted = NULL;
+		}
+
+		if (bg_lists->job_running) {
+			list_destroy(bg_lists->job_running);
+			bg_lists->job_running = NULL;
+			num_unused_cpus = 0;
+		}
+
+		if (bg_lists->main) {
+			list_destroy(bg_lists->main);
+			bg_lists->main = NULL;
+		}
+
+		if (bg_lists->valid_small32) {
+			list_destroy(bg_lists->valid_small32);
+			bg_lists->valid_small32 = NULL;
+		}
+		if (bg_lists->valid_small64) {
+			list_destroy(bg_lists->valid_small64);
+			bg_lists->valid_small64 = NULL;
+		}
+		if (bg_lists->valid_small128) {
+			list_destroy(bg_lists->valid_small128);
+			bg_lists->valid_small128 = NULL;
+		}
+		if (bg_lists->valid_small256) {
+			list_destroy(bg_lists->valid_small256);
+			bg_lists->valid_small256 = NULL;
+		}
+
+		xfree(bg_lists);
+	}
+}
+
+#ifdef HAVE_BG
+static int _delete_old_blocks(List curr_block_list, List found_block_list)
+{
+	ListIterator itr_curr, itr_found;
+	bg_record_t *found_record = NULL, *init_record = NULL;
+	List destroy_list = list_create(NULL);
+
+	xassert(curr_block_list);
+	xassert(found_block_list);
+
+	slurm_mutex_lock(&block_state_mutex);
+	if (!bg_recover) {
+		info("removing all current blocks (clean start)");
+		itr_curr = list_iterator_create(curr_block_list);
+		while ((init_record = list_next(itr_curr))) {
+			list_remove(itr_curr);
+
+			init_record->modifying = 0;
+
+			/* The block needs to exist in the main list
+			 * just to make sure we query the state. */
+			if (!(found_record = find_bg_record_in_list(
+				      bg_lists->main,
+				      init_record->bg_block_id)))
+				list_push(bg_lists->main, init_record);
+			else {
+				destroy_bg_record(init_record);
+				init_record = found_record;
+			}
+			/* Make sure this block isn't in an
+			   error state since if it is it won't
+			   disappear. */
+			if (init_record->state & BG_BLOCK_ERROR_FLAG)
+				resume_block(init_record);
+			list_push(destroy_list, init_record);
+		}
+		list_iterator_destroy(itr_curr);
+	} else {
+		info("removing unspecified blocks");
+		itr_curr = list_iterator_create(curr_block_list);
+		while ((init_record = list_next(itr_curr))) {
+			itr_found = list_iterator_create(found_block_list);
+			while ((found_record = list_next(itr_found))) {
+				if (!strcmp(init_record->bg_block_id,
+					    found_record->bg_block_id)) {
+					/* don't delete this one */
+					break;
+				}
+			}
+			list_iterator_destroy(itr_found);
+
+			if (found_record == NULL) {
+				list_remove(itr_curr);
+
+				init_record->modifying = 0;
+
+				/* The block needs to exist in the main list
+				 * just to make sure we query the state. */
+				if (!(found_record = find_bg_record_in_list(
+					      bg_lists->main,
+					      init_record->bg_block_id)))
+					list_push(bg_lists->main, init_record);
+				else {
+					destroy_bg_record(init_record);
+					init_record = found_record;
+				}
+				/* Make sure this block isn't in an
+				   error state since if it is it won't
+				   disappear. */
+				if (init_record->state & BG_BLOCK_ERROR_FLAG)
+					resume_block(init_record);
+
+				/* Since we can't requeue a running
+				   job in the free block function (not
+				   thread safe here) we must do it
+				   now.
+				*/
+				if ((init_record->job_running > NO_JOB_RUNNING)
+				    || init_record->job_ptr) {
+					/* Don't worry about dealing
+					   with this job here.  Trying
+					   to requeue/cancel now will
+					   cause a race condition
+					   locking up the slurmctld.
+					   It will be handled when the
+					   blocks are synced.  This
+					   should only happen if the
+					   bluegene.conf gets changed
+					   and jobs are running on
+					   blocks that don't exist in
+					   the new config (hopefully
+					   rarely).
+					*/
+					init_record->job_running =
+						NO_JOB_RUNNING;
+					init_record->job_ptr = NULL;
+				}
+				list_push(destroy_list, init_record);
+			}
+		}
+		list_iterator_destroy(itr_curr);
+	}
+	slurm_mutex_unlock(&block_state_mutex);
+
+	free_block_list(NO_VAL, destroy_list, 1, 0);
+	list_destroy(destroy_list);
+
+	return SLURM_SUCCESS;
+}
+
+static void _set_bg_lists()
+{
+	if (!bg_lists)
+		bg_lists = xmalloc(sizeof(bg_lists_t));
+
+	slurm_mutex_lock(&block_state_mutex);
+
+	if (bg_lists->booted)
+		list_destroy(bg_lists->booted);
+	bg_lists->booted = list_create(NULL);
+
+	if (bg_lists->job_running)
+		list_destroy(bg_lists->job_running);
+	bg_lists->job_running = list_create(NULL);
+
+	if (bg_lists->main)
+		list_destroy(bg_lists->main);
+	bg_lists->main = list_create(destroy_bg_record);
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+}
+
+static bg_record_t *_translate_info_2_record(block_info_t *block_info)
+{
+	uid_t my_uid;
+	bg_record_t *bg_record = NULL;
+	bitstr_t *mp_bitmap = NULL, *ionode_bitmap = NULL, *used_bitmap = NULL;
+
+	mp_bitmap = bit_alloc(node_record_count);
+	used_bitmap = bit_alloc(node_record_count);
+	ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+
+	if (block_info->mp_inx
+	    && inx2bitstr(mp_bitmap, block_info->mp_inx) == -1)
+		error("Job state recovered incompatible with "
+		      "bluegene.conf. mp=%u",
+		      node_record_count);
+	if (block_info->mp_used_inx
+	    && inx2bitstr(used_bitmap, block_info->mp_used_inx) == -1)
+		error("Job state recovered incompatible with "
+		      "bluegene.conf. used=%u",
+		      node_record_count);
+	if (block_info->ionode_inx
+	    && inx2bitstr(ionode_bitmap, block_info->ionode_inx) == -1)
+		error("Job state recovered incompatible with "
+		      "bluegene.conf. ionodes=%u",
+		      bg_conf->ionodes_per_mp);
+
+	bg_record = xmalloc(sizeof(bg_record_t));
+	bg_record->magic = BLOCK_MAGIC;
+	bg_record->bg_block_id = block_info->bg_block_id;
+	block_info->bg_block_id = NULL;
+	bg_record->mp_str = block_info->mp_str;
+	block_info->mp_str = NULL;
+	bg_record->ionode_str = block_info->ionode_str;
+	block_info->ionode_str = NULL;
+	bg_record->ionode_bitmap = ionode_bitmap;
+	ionode_bitmap = NULL;
+	bg_record->mp_used_bitmap = used_bitmap;
+	used_bitmap = NULL;
+
+	bg_record->mp_bitmap = mp_bitmap;
+	mp_bitmap = NULL;
+
+	/* put_block_in_error_state should be
+	   called after the bg_lists->main has been
+	   made.  We can't call it here since
+	   this record isn't the record kept
+	   around in bg_lists->main.
+	*/
+	bg_record->state = block_info->state;
+
+	bg_record->job_running = block_info->job_running;
+	if (bg_record->job_running > NO_JOB_RUNNING)
+		bg_record->job_ptr = find_job_record(bg_record->job_running);
+	bg_record->job_list = block_info->job_list;
+	block_info->job_list = NULL;
+
+	bg_record->cnode_cnt = block_info->cnode_cnt;
+	bg_record->mp_count = bit_set_count(bg_record->mp_bitmap);
+
+#ifdef HAVE_BGL
+	bg_record->node_use = block_info->node_use;
+#endif
+	memcpy(bg_record->conn_type, block_info->conn_type,
+	       sizeof(bg_record->conn_type));
+
+	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
+	bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
+
+	if (uid_from_string(bg_record->user_name, &my_uid) < 0) {
+		error("uid_from_strin(%s): %m",
+		      bg_record->user_name);
+	} else {
+		bg_record->user_uid = my_uid;
+	}
+
+	bg_record->blrtsimage = block_info->blrtsimage;
+	block_info->blrtsimage = NULL;
+	bg_record->linuximage =	block_info->linuximage;
+	block_info->linuximage = NULL;
+	bg_record->mloaderimage = block_info->mloaderimage;
+	block_info->mloaderimage = NULL;
+	bg_record->ramdiskimage = block_info->ramdiskimage;
+	block_info->ramdiskimage = NULL;
+
+	bg_record->reason = block_info->reason;
+	block_info->reason = NULL;
+
+	slurm_free_block_info_members(block_info);
+	return bg_record;
+}
+
+/* Pack all relevent information about a block */
+/* NOTE: There is a matching pack function in
+ * common/slurm_protocol_pack.c dealing with the block_info_t
+ * structure there.  If anything changes here please update that as well.
+ * The unpack for this is in common/slurm_protocol_pack.c
+ */
+static void _pack_block(bg_record_t *bg_record, Buf buffer,
+			uint16_t protocol_version)
+{
+#ifdef HAVE_BGQ
+	int dim;
+#endif
+	uint32_t count = NO_VAL;
+	block_job_info_t *job;
+	ListIterator itr;
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		packstr(bg_record->bg_block_id, buffer);
+		packstr(bg_record->blrtsimage, buffer);
+		pack_bit_fmt(bg_record->mp_bitmap, buffer);
+#ifdef HAVE_BGQ
+		pack32(SYSTEM_DIMENSIONS, buffer);
+		for (dim=0; dim<SYSTEM_DIMENSIONS; dim++)
+			pack16(bg_record->conn_type[dim], buffer);
+#else
+		pack32(1, buffer); /* for dimensions of conn_type */
+		pack16(bg_record->conn_type[0], buffer);
+#endif
+		packstr(bg_record->ionode_str, buffer);
+		pack_bit_fmt(bg_record->ionode_bitmap, buffer);
+
+		if (bg_record->job_list)
+			count = list_count(bg_record->job_list);
+		pack32(count, buffer);
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(bg_record->job_list);
+			while ((job = list_next(itr))) {
+				slurm_pack_block_job_info(job, buffer,
+							  protocol_version);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32((uint32_t)bg_record->job_running, buffer);
+		packstr(bg_record->linuximage, buffer);
+		packstr(bg_record->mloaderimage, buffer);
+		packstr(bg_record->mp_str, buffer);
+		packstr(bg_record->mp_used_str, buffer);
+		pack32((uint32_t)bg_record->cnode_cnt, buffer);
+		pack16((uint16_t)bg_record->node_use, buffer);
+		packstr(bg_record->user_name, buffer);
+		packstr(bg_record->ramdiskimage, buffer);
+		packstr(bg_record->reason, buffer);
+		pack16((uint16_t)bg_record->state, buffer);
+		pack_bit_fmt(bg_record->mp_used_bitmap, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		packstr(bg_record->bg_block_id, buffer);
+#ifdef HAVE_BGL
+		packstr(bg_record->blrtsimage, buffer);
+#endif
+		pack_bit_fmt(bg_record->mp_bitmap, buffer);
+		pack16((uint16_t)bg_record->conn_type[0], buffer);
+		packstr(bg_record->ionode_str, buffer);
+		pack_bit_fmt(bg_record->ionode_bitmap, buffer);
+		pack32((uint32_t)bg_record->job_running, buffer);
+		packstr(bg_record->linuximage, buffer);
+		packstr(bg_record->mloaderimage, buffer);
+		packstr(bg_record->mp_str, buffer);
+		pack32((uint32_t)bg_record->cnode_cnt, buffer);
+#ifdef HAVE_BGL
+		pack16((uint16_t)bg_record->node_use, buffer);
+#endif
+		packstr(bg_record->user_name, buffer);
+		packstr(bg_record->ramdiskimage, buffer);
+		packstr(bg_record->reason, buffer);
+		pack16((uint16_t)bg_record->state, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		packstr(bg_record->bg_block_id, buffer);
+#ifdef HAVE_BGL
+		packstr(bg_record->blrtsimage, buffer);
+#endif
+		pack_bit_fmt(bg_record->mp_bitmap, buffer);
+		pack16((uint16_t)bg_record->conn_type[0], buffer);
+		packstr(bg_record->ionode_str, buffer);
+		pack_bit_fmt(bg_record->ionode_bitmap, buffer);
+		pack32((uint32_t)bg_record->job_running, buffer);
+		packstr(bg_record->linuximage, buffer);
+		packstr(bg_record->mloaderimage, buffer);
+		packstr(bg_record->mp_str, buffer);
+		pack32((uint32_t)bg_record->cnode_cnt, buffer);
+#ifdef HAVE_BGL
+		pack16((uint16_t)bg_record->node_use, buffer);
+#endif
+		packstr(bg_record->user_name, buffer);
+		packstr(bg_record->ramdiskimage, buffer);
+		pack16((uint16_t)bg_record->state, buffer);
+	}
+}
+
+/* Pack all extra information about a block (Only needed for saving state.) */
+static void _pack_block_ext(bg_record_t *bg_record, Buf buffer,
+			    uint16_t protocol_version)
+{
+	ListIterator itr;
+	ba_mp_t *ba_mp;
+	uint32_t count = NO_VAL;
+	int i;
+
+	xassert(bg_record);
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		if (bg_record->ba_mp_list)
+			count = list_count(bg_record->ba_mp_list);
+		pack32(count, buffer);
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(bg_record->ba_mp_list);
+			while ((ba_mp = list_next(itr)))
+				pack_ba_mp(ba_mp, buffer, protocol_version);
+			list_iterator_destroy(itr);
+
+		}
+		pack32(bg_record->cpu_cnt, buffer);
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			pack16(bg_record->geo[i], buffer);
+			pack16(bg_record->start[i], buffer);
+		}
+
+		pack16(bg_record->full_block, buffer);
+		pack32(bg_record->switch_count, buffer);
+	} else {
+		/* didn't exist before 2.3 */
+	}
+}
+
+/* UNPack all extra information about a block */
+static int _unpack_block_ext(bg_record_t *bg_record, Buf buffer,
+			     uint16_t protocol_version)
+{
+	ba_mp_t *ba_mp;
+	uint32_t count = NO_VAL;
+	int i;
+	uint16_t temp16;
+
+	xassert(bg_record);
+
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		if (count == NO_VAL) {
+			error("_unpack_block_ext: bg_record record has no "
+			      "mp_list");
+			goto unpack_error;
+		}
+		bg_record->ba_mp_list = list_create(destroy_ba_mp);
+		for (i=0; i<count; i++) {
+			if (unpack_ba_mp(&ba_mp, buffer, protocol_version)
+			    == SLURM_ERROR)
+				goto unpack_error;
+			list_append(bg_record->ba_mp_list, ba_mp);
+		}
+		safe_unpack32(&bg_record->cpu_cnt, buffer);
+		for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+			safe_unpack16(&bg_record->geo[i], buffer);
+			safe_unpack16(&bg_record->start[i], buffer);
+		}
+		safe_unpack16(&temp16, buffer);
+		bg_record->full_block = temp16;
+		safe_pack32(bg_record->switch_count, buffer);
+	} else {
+		/* packing didn't exist before 2.3, so set things up
+		 * to go forward */
+		if (bg_conf->mp_cnode_cnt > bg_record->cnode_cnt) {
+			bg_record->cpu_cnt = bg_conf->cpus_per_mp /
+				(bg_conf->mp_cnode_cnt / bg_record->cnode_cnt);
+		} else {
+			bg_record->cpu_cnt = bg_conf->cpus_per_mp
+				* bg_record->mp_count;
+		}
+		process_nodes(bg_record, true);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	error("Problem unpacking extended block info for %s, "
+	      "removing from list",
+	      bg_record->bg_block_id);
+	return SLURM_ERROR;
+}
+
+static int _load_state_file(List curr_block_list, char *dir_name)
+{
+	int state_fd, i;
+	char *state_file = NULL;
+	Buf buffer = NULL;
+	char *data = NULL;
+	int data_size = 0;
+	block_info_msg_t *block_ptr = NULL;
+	bg_record_t *bg_record = NULL;
+	char temp[256];
+	List results = NULL;
+	int data_allocated, data_read = 0;
+	char *ver_str = NULL;
+	uint32_t ver_str_len;
+	char *name = NULL;
+	struct part_record *part_ptr = NULL;
+	bitstr_t *usable_mp_bitmap = NULL;
+	ListIterator itr = NULL;
+	uint16_t protocol_version = (uint16_t)NO_VAL;
+	uint32_t record_count;
+
+	xassert(curr_block_list);
+	xassert(dir_name);
+
+	state_file = xstrdup(dir_name);
+	xstrcat(state_file, "/block_state");
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		error("No block state file (%s) to recover", state_file);
+		xfree(state_file);
+		return SLURM_SUCCESS;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m",
+					      state_file);
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+
+	buffer = create_buf(data, data_size);
+	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
+	debug3("Version string in block_state header is %s", ver_str);
+	if (ver_str) {
+		if (!strcmp(ver_str, BLOCK_STATE_VERSION)) {
+			protocol_version = SLURM_PROTOCOL_VERSION;
+		} else if (!strcmp(ver_str, BLOCK_2_2_STATE_VERSION)) {
+			protocol_version = SLURM_2_2_PROTOCOL_VERSION;
+		} else if (!strcmp(ver_str, BLOCK_2_1_STATE_VERSION)) {
+			protocol_version = SLURM_2_1_PROTOCOL_VERSION;
+		}
+	}
+
+	if (protocol_version == (uint16_t)NO_VAL) {
+		error("***********************************************");
+		error("Can not recover block state, "
+		      "data version incompatible");
+		error("***********************************************");
+		xfree(ver_str);
+		free_buf(buffer);
+		return EFAULT;
+	}
+	xfree(ver_str);
+	safe_unpack32(&record_count, buffer);
+
+	/* In older versions of the code we stored things in a
+	   block_info_msg_t.  This isn't the case anymore so in the
+	   newer code we don't store the timestamp since it isn't
+	   really needed.
+	*/
+	if (protocol_version <= SLURM_2_2_PROTOCOL_VERSION) {
+		time_t last_save;
+		safe_unpack_time(&last_save, buffer);
+	}
+
+	slurm_mutex_lock(&block_state_mutex);
+	reset_ba_system(true);
+
+	/* Locks are already in place to protect part_list here */
+	usable_mp_bitmap = bit_alloc(node_record_count);
+	itr = list_iterator_create(part_list);
+	while ((part_ptr = list_next(itr))) {
+		/* we only want to use mps that are in partitions */
+		if (!part_ptr->node_bitmap) {
+			debug4("Partition %s doesn't have any nodes in it.",
+			       part_ptr->name);
+			continue;
+		}
+		bit_or(usable_mp_bitmap, part_ptr->node_bitmap);
+	}
+	list_iterator_destroy(itr);
+
+	if (bit_ffs(usable_mp_bitmap) == -1) {
+		fatal("We don't have any nodes in any partitions.  "
+		      "Can't create blocks.  "
+		      "Please check your slurm.conf.");
+	}
+
+	for (i=0; i<record_count; i++) {
+		block_info_t block_info;
+
+		if (slurm_unpack_block_info_members(
+			    &block_info, buffer, protocol_version))
+				goto unpack_error;
+
+		if (!(bg_record = _translate_info_2_record(&block_info)))
+			continue;
+
+		if (_unpack_block_ext(bg_record, buffer, protocol_version)
+		    != SLURM_SUCCESS) {
+			goto unpack_error;
+		}
+
+		/* This means the block here wasn't able to be
+		   processed correctly, so don't add.
+		*/
+		if (!bg_record->mp_count) {
+			error("block %s(%s) can't be made in the current "
+			      "system, but was around in the previous one.",
+			      bg_record->bg_block_id, bg_record->mp_str);
+			list_destroy(results);
+			destroy_bg_record(bg_record);
+			continue;
+		}
+
+		if ((bg_conf->layout_mode == LAYOUT_OVERLAP)
+		    || bg_record->full_block)
+			reset_ba_system(false);
+
+		if (bg_record->ba_mp_list) {
+			/* only do this for blocks bigger than 1
+			   midplane */
+			if (bg_record->cpu_cnt >= bg_conf->cpus_per_mp)
+				if (check_and_set_mp_list(bg_record->ba_mp_list)
+				    == SLURM_ERROR)
+					error("something happened in the "
+					      "load of %s, keeping it "
+					      "around though",
+					      bg_record->bg_block_id);
+		} else {
+			ba_set_removable_mps(usable_mp_bitmap, 1);
+			/* we want the mps that aren't
+			 * in this record to mark them as used
+			 */
+			if (ba_set_removable_mps(bg_record->mp_bitmap, 1)
+			    != SLURM_SUCCESS)
+				fatal("1 It doesn't seem we have a bitmap "
+				      "for %s",
+				      bg_record->bg_block_id);
+#ifdef HAVE_BGQ
+			results = list_create(destroy_ba_mp);
+#else
+			results = list_create(NULL);
+#endif
+			/* info("adding back %s %s", bg_record->bg_block_id, */
+			/*      bg_record->mp_str); */
+			name = set_bg_block(results,
+					    bg_record->start,
+					    bg_record->geo,
+					    bg_record->conn_type);
+			ba_reset_all_removed_mps();
+
+			if (!name) {
+				error("I was unable to make the "
+				      "requested block.");
+				list_destroy(results);
+				destroy_bg_record(bg_record);
+				bg_record = NULL;
+				continue;
+			}
+
+
+			snprintf(temp, sizeof(temp), "%s%s",
+				 bg_conf->slurm_node_prefix,
+				 name);
+
+			xfree(name);
+			if (strcmp(temp, bg_record->mp_str)) {
+				fatal("bad wiring in preserved state "
+				      "(found %s, but allocated %s) "
+				      "YOU MUST COLDSTART",
+				      bg_record->mp_str, temp);
+			}
+			if (bg_record->ba_mp_list)
+				list_destroy(bg_record->ba_mp_list);
+#ifdef HAVE_BGQ
+			bg_record->ba_mp_list =	results;
+			results = NULL;
+#else
+			bg_record->ba_mp_list =	list_create(destroy_ba_mp);
+			copy_node_path(results, &bg_record->ba_mp_list);
+			list_destroy(results);
+#endif
+		}
+
+//		bridge_block_create(bg_record);
+		list_push(curr_block_list, bg_record);
+	}
+
+	FREE_NULL_BITMAP(usable_mp_bitmap);
+
+	sort_bg_record_inc_size(curr_block_list);
+	slurm_mutex_unlock(&block_state_mutex);
+
+	info("Recovered %d blocks", list_count(curr_block_list));
+	slurm_free_block_info_msg(block_ptr);
+	free_buf(buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	FREE_NULL_BITMAP(usable_mp_bitmap);
+	slurm_mutex_unlock(&block_state_mutex);
+	error("Incomplete block data checkpoint file");
+	free_buf(buffer);
+	return SLURM_FAILURE;
+}
+
+
+/*
+ * _validate_config_blocks - Match slurm configuration information with
+ *                           current BG block configuration.
+ * IN/OUT curr_block_list -  List of blocks already existing on the system.
+ * IN/OUT found_block_list - List of blocks found on the system
+ *                              that are listed in the bluegene.conf.
+ * NOTE: Both of the lists above should be created with list_create(NULL)
+ *       since the bg_lists->main will contain the complete list of pointers
+ *       and be destroyed with it.
+ *
+ * RET - SLURM_SUCCESS if no blocks need to be deleted, else an error
+ * code. Writes bg_block_id into bg_lists->main records.
+ */
+
+static int _validate_config_blocks(List curr_block_list,
+				   List found_block_list, char *dir)
+{
+	int rc = SLURM_ERROR;
+	bg_record_t* bg_record = NULL;
+	bg_record_t* init_bg_record = NULL;
+	int full_created = 0;
+	ListIterator itr_conf;
+	ListIterator itr_curr;
+	char tmp_char[256];
+	int dim;
+
+	xassert(curr_block_list);
+	xassert(found_block_list);
+
+	/* read in state from last run. */
+	rc = _load_state_file(curr_block_list, dir);
+
+#ifndef HAVE_BG_FILES
+	if (rc != SLURM_SUCCESS)
+		return rc;
+#endif
+	/* read current bg block info into curr_block_list This
+	 * happens in the state load before this in emulation mode */
+	if (bridge_blocks_load_curr(curr_block_list) == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	if (!bg_recover)
+		return SLURM_ERROR;
+
+#ifdef HAVE_BG_FILES
+	/* Since we just checked all the blocks from state against that
+	   in the database we can now check to see if there were once
+	   blocks that are now gone from the database and remove them
+	   from the list.
+	*/
+	itr_curr = list_iterator_create(curr_block_list);
+	while ((bg_record = list_next(itr_curr))) {
+		if (bg_record->modifying) {
+			bg_record->modifying = 0;
+			continue;
+		}
+		error("Found state for block %s, but that "
+		      "block isn't in the system anymore, removing",
+		      bg_record->bg_block_id);
+		list_delete_item(itr_curr);
+	}
+	list_iterator_destroy(itr_curr);
+#endif
+
+	if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+		/* Since we don't read the blocks in a Dynamic system
+		   we can just transfer the list here and return.
+		*/
+		list_transfer(bg_lists->main, curr_block_list);
+
+		itr_conf = list_iterator_create(bg_lists->main);
+		while ((bg_record = list_next(itr_conf))) {
+			format_node_name(bg_record, tmp_char,
+					 sizeof(tmp_char));
+			info("Existing: BlockID:%s Nodes:%s Conn:%s",
+			     bg_record->bg_block_id,
+			     tmp_char,
+			     conn_type_string(bg_record->conn_type[0]));
+			if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+				put_block_in_error_state(bg_record, NULL);
+			else if (((bg_record->state == BG_BLOCK_INITED)
+				  || (bg_record->state == BG_BLOCK_BOOTING))
+				 && !block_ptr_exist_in_list(bg_lists->booted,
+							     bg_record))
+				list_push(bg_lists->booted, bg_record);
+		}
+		return SLURM_SUCCESS;
+	}
+
+	/* Only when we are looking at a non-dynamic system do we need
+	   to go through the following logic to make sure things are insync.
+	*/
+	itr_curr = list_iterator_create(curr_block_list);
+	itr_conf = list_iterator_create(bg_lists->main);
+	while ((bg_record = list_next(itr_conf))) {
+		list_iterator_reset(itr_curr);
+		while ((init_bg_record = list_next(itr_curr))) {
+			if (!bit_equal(bg_record->mp_bitmap,
+				       init_bg_record->mp_bitmap))
+				continue; /* wrong nodes */
+			if (!bit_equal(bg_record->ionode_bitmap,
+				       init_bg_record->ionode_bitmap))
+				continue;
+			if ((bg_record->conn_type[0] < SELECT_SMALL)
+			    && (init_bg_record->conn_type[0] < SELECT_SMALL)) {
+				for (dim = 0; dim < SYSTEM_DIMENSIONS; dim++) {
+					/* Only look at how far we
+					   have set.  The bg_record
+					   should of been set up
+					   correctly in the
+					   parse_blockreq() function.
+					*/
+					if (bg_record->conn_type[dim] ==
+					    (uint16_t)NO_VAL) {
+						dim = SYSTEM_DIMENSIONS;
+						break;
+					}
+
+					if (bg_record->conn_type[dim] !=
+					    init_bg_record->conn_type[dim])
+						break; /* wrong conn_type */
+				}
+				if (dim < SYSTEM_DIMENSIONS)
+					continue;
+			}
+			copy_bg_record(init_bg_record, bg_record);
+			/* remove from the curr list since we just
+			   matched it no reason to keep it around
+			   anymore */
+			list_delete_item(itr_curr);
+			break;
+		}
+
+		if (!bg_record->bg_block_id) {
+			format_node_name(bg_record, tmp_char,
+					 sizeof(tmp_char));
+			info("Block found in bluegene.conf to be "
+			     "created: Nodes:%s",
+			     tmp_char);
+		} else {
+			if (bg_record->full_block)
+				full_created = 1;
+
+			list_push(found_block_list, bg_record);
+			format_node_name(bg_record, tmp_char,
+					 sizeof(tmp_char));
+			info("Existing: BlockID:%s Nodes:%s Conn:%s",
+			     bg_record->bg_block_id,
+			     tmp_char,
+			     conn_type_string(bg_record->conn_type[0]));
+			if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+				put_block_in_error_state(bg_record, NULL);
+			else if (((bg_record->state == BG_BLOCK_INITED)
+				  || (bg_record->state == BG_BLOCK_BOOTING))
+				 && !block_ptr_exist_in_list(bg_lists->booted,
+							     bg_record))
+				list_push(bg_lists->booted, bg_record);
+		}
+	}
+
+	if (!full_created) {
+		list_iterator_reset(itr_curr);
+		while ((init_bg_record = list_next(itr_curr))) {
+			if (init_bg_record->full_block) {
+				list_remove(itr_curr);
+				bg_record = init_bg_record;
+				list_append(bg_lists->main, bg_record);
+				list_push(found_block_list, bg_record);
+				format_node_name(bg_record, tmp_char,
+						 sizeof(tmp_char));
+				info("Existing: BlockID:%s Nodes:%s Conn:%s",
+				     bg_record->bg_block_id,
+				     tmp_char,
+				     conn_type_string(bg_record->conn_type[0]));
+				if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+					put_block_in_error_state(
+						bg_record, NULL);
+				else if (((bg_record->state
+					     == BG_BLOCK_INITED)
+					    || (bg_record->state
+						== BG_BLOCK_BOOTING))
+				    && !block_ptr_exist_in_list(
+					    bg_lists->booted, bg_record))
+					list_push(bg_lists->booted, bg_record);
+				break;
+			}
+		}
+	}
+
+	list_iterator_destroy(itr_conf);
+	list_iterator_destroy(itr_curr);
+	if (!list_count(curr_block_list))
+		rc = SLURM_SUCCESS;
+	else
+		rc = SLURM_ERROR;
+	return rc;
+}
+
+static List _get_config(void)
+{
+	config_key_pair_t *key_pair;
+	List my_list = list_create(destroy_config_key_pair);
+
+	if (!my_list)
+		fatal("malloc failure on list_create");
+
+#ifndef HAVE_BG_FILES
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("Emulated");
+	key_pair->value = xstrdup("yes");
+	list_append(my_list, key_pair);
+#endif
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MidPlaneNodeCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->mp_cnode_cnt);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("NodeCPUCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->cpu_ratio);
+	list_append(my_list, key_pair);
+
+#ifdef HAVE_BGL
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BlrtsImage");
+	key_pair->value = xstrdup(bg_conf->default_blrtsimage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LinuxImage");
+	key_pair->value = xstrdup(bg_conf->default_linuximage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("RamDiskImage");
+	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
+	list_append(my_list, key_pair);
+#elif defined HAVE_BGP
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("CnloadImage");
+	key_pair->value = xstrdup(bg_conf->default_linuximage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("IoloadImage");
+	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
+	list_append(my_list, key_pair);
+#endif
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BridgeAPILogFile");
+	key_pair->value = xstrdup(bg_conf->bridge_api_file);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BridgeAPIVerbose");
+	key_pair->value = xstrdup_printf("%u", bg_conf->bridge_api_verb);
+	list_append(my_list, key_pair);
+
+	if (bg_conf->deny_pass) {
+		key_pair = xmalloc(sizeof(config_key_pair_t));
+		key_pair->name = xstrdup("DenyPassThrough");
+		if (bg_conf->deny_pass & PASS_DENY_A)
+			xstrcat(key_pair->value, "A,");
+		if (bg_conf->deny_pass & PASS_DENY_X)
+			xstrcat(key_pair->value, "X,");
+		if (bg_conf->deny_pass & PASS_DENY_Y)
+			xstrcat(key_pair->value, "Y,");
+		if (bg_conf->deny_pass & PASS_DENY_Z)
+			xstrcat(key_pair->value, "Z,");
+		if (key_pair->value)
+			key_pair->value[strlen(key_pair->value)-1] = '\0';
+		list_append(my_list, key_pair);
+	}
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LayoutMode");
+	switch(bg_conf->layout_mode) {
+	case LAYOUT_STATIC:
+		key_pair->value = xstrdup("Static");
+		break;
+	case LAYOUT_OVERLAP:
+		key_pair->value = xstrdup("Overlap");
+		break;
+	case LAYOUT_DYNAMIC:
+		key_pair->value = xstrdup("Dynamic");
+		break;
+	default:
+		key_pair->value = xstrdup("Unknown");
+		break;
+	}
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MloaderImage");
+	key_pair->value = xstrdup(bg_conf->default_mloaderimage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("NodeCardNodeCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->nodecard_cnode_cnt);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("IONodesPerMP");
+	key_pair->value = xstrdup_printf("%u", bg_conf->ionodes_per_mp);
+	list_append(my_list, key_pair);
+
+	list_sort(my_list, (ListCmpF) sort_key_pairs);
+
+	return my_list;
+}
+#endif
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+
+#ifdef HAVE_BG
+	if (!bg_conf) {
+		/* This is needed on all systems where srun wraps the
+		   bluegene calling program (i.e. runjob).
+		*/
+		bg_conf = xmalloc(sizeof(bg_config_t));
+		/* set some defaults for most systems */
+		bg_conf->mp_cnode_cnt = 512;
+		bg_conf->quarter_cnode_cnt = 128;
+		bg_conf->nodecard_cnode_cnt = 32;
+		bg_conf->mp_nodecard_cnt = bg_conf->mp_cnode_cnt
+			/ bg_conf->nodecard_cnode_cnt;
+	}
+	if (bg_recover != NOT_FROM_CONTROLLER) {
+#if defined HAVE_BG_L_P && (SYSTEM_DIMENSIONS != 3)
+		fatal("SYSTEM_DIMENSIONS value (%d) invalid for BlueGene",
+		      SYSTEM_DIMENSIONS);
+#elif defined HAVE_BGQ && (SYSTEM_DIMENSIONS != 4)
+		fatal("SYSTEM_DIMENSIONS value (%d) invalid for BGQ",
+		      SYSTEM_DIMENSIONS);
+#endif
+
+#if defined HAVE_BG_FILES && defined HAVE_BG_L_P
+#ifdef HAVE_BGL
+	        if (!getenv("CLASSPATH") || !getenv("DB2INSTANCE")
+		    || !getenv("VWSPATH"))
+			fatal("db2profile has not been "
+			      "run to setup DB2 environment");
+
+		if ((SELECT_COPROCESSOR_MODE  != RM_PARTITION_COPROCESSOR_MODE)
+		    || (SELECT_VIRTUAL_NODE_MODE
+			!= RM_PARTITION_VIRTUAL_NODE_MODE))
+			fatal("enum node_use_type out of sync with rm_api.h");
+#endif
+		if ((SELECT_MESH  != RM_MESH)
+		    || (SELECT_TORUS != RM_TORUS)
+		    || (SELECT_NAV   != RM_NAV))
+			fatal("enum conn_type out of sync with rm_api.h");
+#endif
+
+		verbose("%s loading...", plugin_name);
+		/* if this is coming from something other than the controller
+		   we don't want to read the config or anything like that. */
+		_set_bg_lists();
+
+		xfree(bg_conf->slurm_user_name);
+		xfree(bg_conf->slurm_node_prefix);
+		slurm_conf_lock();
+		xassert(slurmctld_conf.slurm_user_name);
+		xassert(slurmctld_conf.node_prefix);
+		bg_conf->slurm_user_name =
+			xstrdup(slurmctld_conf.slurm_user_name);
+		bg_conf->slurm_node_prefix =
+			xstrdup(slurmctld_conf.node_prefix);
+		bg_conf->slurm_debug_flags = slurmctld_conf.debug_flags;
+		bg_conf->slurm_debug_level = slurmctld_conf.slurmctld_debug;
+		slurm_conf_unlock();
+
+		if (bg_conf->blrts_list)
+			list_destroy(bg_conf->blrts_list);
+		bg_conf->blrts_list = list_create(destroy_image);
+		if (bg_conf->linux_list)
+			list_destroy(bg_conf->linux_list);
+		bg_conf->linux_list = list_create(destroy_image);
+		if (bg_conf->mloader_list)
+			list_destroy(bg_conf->mloader_list);
+		bg_conf->mloader_list = list_create(destroy_image);
+		if (bg_conf->ramdisk_list)
+			list_destroy(bg_conf->ramdisk_list);
+		bg_conf->ramdisk_list = list_create(destroy_image);
+
+		ba_init(NULL, 1);
+
+		verbose("BlueGene plugin loaded successfully");
+	}
+	verbose("%s loaded", plugin_name);
+#else
+	if (bg_recover != NOT_FROM_CONTROLLER)
+		fatal("select/bluegene is incompatible with a "
+		      "non BlueGene system");
+#endif
+	return SLURM_SUCCESS;
+}
+
+extern int fini ( void )
+{
+	int rc = SLURM_SUCCESS;
+
+	_destroy_bg_config(bg_conf);
+	_destroy_bg_lists(bg_lists);
+
+	ba_fini();
+
+	return rc;
+}
+
+/*
+ * The remainder of this file implements the standard SLURM
+ * node selection API.
+ */
+
+/* We rely upon DB2 to save and restore BlueGene state */
+extern int select_p_state_save(char *dir_name)
+{
+#ifdef HAVE_BG
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+	int error_code = 0, log_fd;
+	char *old_file, *new_file, *reg_file;
+	uint32_t blocks_packed = 0, tmp_offset, block_offset;
+	Buf buffer = init_buf(BUF_SIZE);
+	DEF_TIMERS;
+
+	debug("bluegene: select_p_state_save");
+	START_TIMER;
+	/* write header: time */
+	packstr(BLOCK_STATE_VERSION, buffer);
+	block_offset = get_buf_offset(buffer);
+	pack32(blocks_packed, buffer);
+
+	/* write block records to buffer */
+	slurm_mutex_lock(&block_state_mutex);
+	itr = list_iterator_create(bg_lists->main);
+	while ((bg_record = list_next(itr))) {
+		if (bg_record->magic != BLOCK_MAGIC)
+			continue;
+
+		xassert(bg_record->bg_block_id != NULL);
+
+		_pack_block(bg_record, buffer, SLURM_PROTOCOL_VERSION);
+		_pack_block_ext(bg_record, buffer, SLURM_PROTOCOL_VERSION);
+		blocks_packed++;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&block_state_mutex);
+	tmp_offset = get_buf_offset(buffer);
+	set_buf_offset(buffer, block_offset);
+	pack32(blocks_packed, buffer);
+	set_buf_offset(buffer, tmp_offset);
+	/* Maintain config read lock until we copy state_save_location *\
+	   \* unlock_slurmctld(part_read_lock);          - see below      */
+
+	/* write the buffer to file */
+	slurm_conf_lock();
+	old_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(old_file, "/block_state.old");
+	reg_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(reg_file, "/block_state");
+	new_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(new_file, "/block_state.new");
+	slurm_conf_unlock();
+
+	log_fd = creat(new_file, 0600);
+	if (log_fd < 0) {
+		error("Can't save state, error creating file %s, %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+		fsync(log_fd);
+		close(log_fd);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		if (link(reg_file, old_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
+		(void) unlink(reg_file);
+		if (link(new_file, reg_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+
+	free_buf(buffer);
+	END_TIMER2("select_p_state_save");
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_state_restore(char *dir_name)
+{
+#ifdef HAVE_BG
+	debug("bluegene: select_p_state_restore");
+
+	/* found bg blocks already on system */
+	List curr_block_list = NULL;
+	List found_block_list = NULL;
+	static time_t last_config_update = (time_t) 0;
+
+	/* only run on startup */
+	if (last_config_update)
+		return SLURM_SUCCESS;
+
+	last_config_update = time(NULL);
+	curr_block_list = list_create(destroy_bg_record);
+	found_block_list = list_create(NULL);
+//#if 0
+	/* Check to see if the configs we have are correct */
+	if (_validate_config_blocks(curr_block_list, found_block_list, dir_name)
+	    == SLURM_ERROR) {
+		_delete_old_blocks(curr_block_list, found_block_list);
+	}
+//#endif
+	/* looking for blocks only I created */
+	if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+		info("No blocks created until jobs are submitted");
+	} else {
+		if (create_defined_blocks(bg_conf->layout_mode,
+					  found_block_list)
+		    == SLURM_ERROR) {
+			/* error in creating the static blocks, so
+			 * blocks referenced by submitted jobs won't
+			 * correspond to actual slurm blocks.
+			 */
+			fatal("Error, could not create the static blocks");
+			return SLURM_ERROR;
+		}
+	}
+
+	list_destroy(curr_block_list);
+	curr_block_list = NULL;
+	list_destroy(found_block_list);
+	found_block_list = NULL;
+
+	slurm_mutex_lock(&block_state_mutex);
+	last_bg_update = time(NULL);
+	sort_bg_record_inc_size(bg_lists->main);
+	slurm_mutex_unlock(&block_state_mutex);
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("Blocks have finished being created.");
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+/* Sync BG blocks to currently active jobs */
+extern int select_p_job_init(List job_list)
+{
+#ifdef HAVE_BG
+	int rc = sync_jobs(job_list);
+
+	/* after we have synced the blocks then we say they are
+	   created. */
+	blocks_are_created = 1;
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)
+{
+	return false;
+}
+
+/* All initialization is performed by init() */
+extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
+{
+#ifdef HAVE_BG
+	if (node_cnt>0 && bg_conf)
+		if (node_ptr->cpus >= bg_conf->mp_cnode_cnt)
+			bg_conf->cpus_per_mp = node_ptr->cpus;
+
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+/*
+ * Called by slurmctld when a new configuration file is loaded
+ * or scontrol is used to change block configuration
+ */
+extern int select_p_block_init(List part_list)
+{
+#ifdef HAVE_BG
+	/* select_p_node_init needs to be called before this to set
+	   this up correctly
+	*/
+	if (read_bg_conf() == SLURM_ERROR) {
+		fatal("Error, could not read the file");
+		return SLURM_ERROR;
+	}
+
+	if (part_list) {
+		struct part_record *part_ptr = NULL;
+		ListIterator itr = list_iterator_create(part_list);
+		while ((part_ptr = list_next(itr))) {
+			part_ptr->max_nodes = part_ptr->max_nodes_orig;
+			part_ptr->min_nodes = part_ptr->min_nodes_orig;
+			select_p_alter_node_cnt(SELECT_SET_MP_CNT,
+						&part_ptr->max_nodes);
+			select_p_alter_node_cnt(SELECT_SET_MP_CNT,
+						&part_ptr->min_nodes);
+		}
+		list_iterator_destroy(itr);
+	}
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+
+/*
+ * select_p_job_test - Given a specification of scheduling requirements,
+ *	identify the nodes which "best" satify the request. The specified
+ *	nodes may be DOWN or BUSY at the time of this test as may be used
+ *	to deterime if a job could ever run.
+ * IN/OUT job_ptr - pointer to job being scheduled start_time is set
+ *	when we can possibly start job.
+ * IN/OUT bitmap - usable nodes are set on input, nodes not required to
+ *	satisfy the request are cleared, other left set
+ * IN min_nodes - minimum count of nodes
+ * IN max_nodes - maximum count of nodes (0==don't care)
+ * IN req_nodes - requested (or desired) count of nodes
+ * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now
+ *           SELECT_MODE_TEST_ONLY: test if job can ever run
+ *           SELECT_MODE_WILL_RUN: determine when and where job can run
+ * IN preemptee_candidates - List of pointers to jobs which can be preempted.
+ * IN/OUT preemptee_job_list - Pointer to list of job pointers. These are the
+ *		jobs to be preempted to initiate the pending job. Not set
+ *		if mode=SELECT_MODE_TEST_ONLY or input pointer is NULL.
+ * RET zero on success, EINVAL otherwise
+ * NOTE: bitmap must be a superset of req_nodes at the time that
+ *	select_p_job_test is called
+ */
+extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
+			     uint32_t min_nodes, uint32_t max_nodes,
+			     uint32_t req_nodes, uint16_t mode,
+			     List preemptee_candidates,
+			     List *preemptee_job_list)
+{
+#ifdef HAVE_BG
+	/* submit_job - is there a block where we have:
+	 * 1) geometry requested
+	 * 2) min/max nodes (MPs) requested
+	 * 3) type: TORUS or MESH or NAV (torus else mesh)
+	 *
+	 * note: we don't have to worry about security at this level
+	 * as the SLURM block logic will handle access rights.
+	 */
+
+	return submit_job(job_ptr, bitmap, min_nodes, max_nodes,
+			  req_nodes, mode, preemptee_candidates,
+			  preemptee_job_list);
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_job_begin(struct job_record *job_ptr)
+{
+#ifdef HAVE_BG
+	return start_job(job_ptr);
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_job_ready(struct job_record *job_ptr)
+{
+#ifdef HAVE_BG
+	int rc = 1;
+	char *block_id = NULL;
+	bg_record_t *bg_record = NULL;
+
+	rc = get_select_jobinfo(job_ptr->select_jobinfo->data,
+				SELECT_JOBDATA_BLOCK_ID, &block_id);
+	if (rc == SLURM_SUCCESS) {
+		slurm_mutex_lock(&block_state_mutex);
+		bg_record = find_bg_record_in_list(bg_lists->main, block_id);
+
+		if (bg_record) {
+			uint32_t job_id = NO_JOB_RUNNING, uid = NO_VAL;
+			if (bg_record->job_list) {
+				block_job_info_t *job_info;
+				ListIterator itr = list_iterator_create(
+					bg_record->job_list);
+				xassert(itr);
+				while ((job_info = list_next(itr))) {
+					if (job_info->job_id
+					    == job_ptr->job_id) {
+						job_id = job_info->job_id;
+						uid = job_info->user_id;
+						break;
+					}
+				}
+				list_iterator_destroy(itr);
+			} else {
+				uid = bg_record->user_uid;
+				job_id = bg_record->job_running;
+			}
+
+			if (job_id != job_ptr->job_id) {
+				rc = 0;
+			} else if ((uid == job_ptr->user_id)
+				   && (bg_record->state == BG_BLOCK_INITED)) {
+				/* Clear the state just incase we
+				 * missed it somehow. */
+				job_ptr->job_state &= (~JOB_CONFIGURING);
+				last_job_update = time(NULL);
+				rc = 1;
+			} else if (uid != job_ptr->user_id)
+				rc = 0;
+			else
+				rc = READY_JOB_ERROR;	/* try again */
+
+		} else {
+			/* This means the block has been removed and
+			   is no longer valid.  This could happen
+			   often during an epilog on a busy system.
+			*/
+			debug2("block_ready: block %s not in bg_lists->main.",
+			       block_id);
+			rc = READY_JOB_FATAL;	/* fatal error */
+		}
+		slurm_mutex_unlock(&block_state_mutex);
+	} else
+		rc = READY_JOB_ERROR;
+	/* info("returning %d for job %u block %s %d %d", */
+	/*      rc, job_ptr->job_id, block_id, */
+	/*      READY_JOB_ERROR, READY_JOB_FATAL); */
+	xfree(block_id);
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_job_resized(struct job_record *job_ptr,
+				struct node_record *node_ptr)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern bool select_p_job_expand_allow(void)
+{
+	return false;
+}
+
+extern int select_p_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int select_p_job_signal(struct job_record *job_ptr, int signal)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int select_p_job_fini(struct job_record *job_ptr)
+{
+#ifdef HAVE_BG
+	return term_job(job_ptr);
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_job_suspend(struct job_record *job_ptr, bool indf_susp)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int select_p_job_resume(struct job_record *job_ptr, bool indf_susp)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern bitstr_t *select_p_step_pick_nodes(struct job_record *job_ptr,
+					  select_jobinfo_t *step_jobinfo,
+					  uint32_t node_count)
+{
+	bitstr_t *picked_mps = NULL;
+	bitstr_t *avail_mps = NULL;
+	bg_record_t *bg_record = NULL;
+	char *tmp_char = NULL, *tmp_char2 = NULL;
+	ba_mp_t *ba_mp = NULL;
+	select_jobinfo_t *jobinfo = NULL;
+	xassert(job_ptr);
+
+	slurm_mutex_lock(&block_state_mutex);
+	jobinfo = job_ptr->select_jobinfo->data;
+	bg_record = jobinfo->bg_record;
+
+	if (!bg_record)
+		fatal("This job %u does not have a bg block "
+		      "assigned to it, but for some reason we are "
+		      "trying to start a step on it?",
+		      job_ptr->job_id);
+
+	xassert(bg_record->mp_used_bitmap);
+	xassert(!step_jobinfo->units_used);
+
+	if (!(avail_mps = bit_copy(bg_record->mp_used_bitmap)))
+		fatal("bit_copy malloc failure");
+
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+		tmp_char = bitmap2node_name(avail_mps);
+		info("select_p_step_pick_nodes: looking to run a new "
+		     "step for job %u requesting %u nodes on block %s with "
+		     "used midplanes %s", job_ptr->job_id, node_count,
+		     bg_record->bg_block_id, tmp_char);
+		xfree(tmp_char);
+	}
+
+	xfree(step_jobinfo->bg_block_id);
+	step_jobinfo->bg_block_id = xstrdup(bg_record->bg_block_id);
+	step_jobinfo->block_cnode_cnt = bg_record->cnode_cnt;
+
+	if (((cluster_flags & CLUSTER_FLAG_BGL)
+	     || (cluster_flags & CLUSTER_FLAG_BGP))
+	    || (node_count == bg_record->cnode_cnt)) {
+		/* If we are using the whole block we need to verify
+		   if anything else is used.  If anything else is used
+		   return NULL, else return that we can use the entire
+		   thing.
+		   On BGL/P This is always the default, no matter how
+		   big the step is since you can only run 1 step per block.
+		*/
+		step_jobinfo->dim_cnt = jobinfo->dim_cnt;
+		if (bit_ffs(avail_mps) != -1) {
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
+				info("select_p_step_pick_nodes: Looking "
+				     "for the entire block %s for job %u, "
+				     "but some of it is used.",
+				     bg_record->bg_block_id, job_ptr->job_id);
+			goto end_it;
+		}
+		if (!(picked_mps = bit_copy(job_ptr->node_bitmap)))
+			fatal("bit_copy malloc failure");
+
+		if (cluster_flags & CLUSTER_FLAG_BGQ
+		    && (bg_record->mp_count == 1)) {
+			ba_mp = list_peek(bg_record->ba_mp_list);
+			xassert(ba_mp);
+			if (!ba_mp->cnode_bitmap)
+				ba_mp->cnode_bitmap =
+					ba_create_ba_mp_cnode_bitmap(bg_record);
+			step_jobinfo->units_used =
+				bit_copy(ba_mp->cnode_bitmap);
+			bit_not(step_jobinfo->units_used);
+			bit_or(ba_mp->cnode_bitmap, step_jobinfo->units_used);
+		}
+
+		bit_or(bg_record->mp_used_bitmap, picked_mps);
+		step_jobinfo->ionode_str = xstrdup(jobinfo->ionode_str);
+		goto found_it;
+	} else if ((ba_mp = ba_pick_sub_block_cnodes(
+			    bg_record, &node_count,
+			    step_jobinfo))) {
+		int dim;
+		if (!(picked_mps = bit_alloc(bit_size(job_ptr->node_bitmap))))
+			fatal("bit_copy malloc failure");
+		bit_set(bg_record->mp_used_bitmap, ba_mp->index);
+		bit_set(picked_mps, ba_mp->index);
+		for (dim = 0; dim < step_jobinfo->dim_cnt; dim++) {
+			/* The IBM software works off a relative
+			   position in the block instead of the
+			   absolute position used in SLURM.
+			   Since conn_type doesn't mean anything for a
+			   step we can just overload it since it is getting
+			   sent aready and we don't need to bloat
+			   anything if we don't have to.
+			   So setting it here we can have both
+			   absolute and relative.
+			*/
+			step_jobinfo->conn_type[dim] =
+				step_jobinfo->start_loc[dim]
+				- jobinfo->start_loc[dim];
+		}
+	}
+
+found_it:
+	if (picked_mps) {
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+			tmp_char = bitmap2node_name(picked_mps);
+			tmp_char2 = bitmap2node_name(bg_record->mp_used_bitmap);
+			info("select_p_step_pick_nodes: picked %s mps on "
+			     "block %s used is now %s",
+			     tmp_char, bg_record->bg_block_id,
+			     tmp_char2);
+			xfree(tmp_char);
+			xfree(tmp_char2);
+		}
+		step_jobinfo->cnode_cnt = node_count;
+	}
+
+end_it:
+	FREE_NULL_BITMAP(avail_mps);
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+	return picked_mps;
+}
+
+extern int select_p_step_finish(struct step_record *step_ptr)
+{
+	bg_record_t *bg_record = NULL;
+	select_jobinfo_t *jobinfo = NULL;
+	int rc = SLURM_SUCCESS;
+	char *tmp_char = NULL, *tmp_char2 = NULL;
+
+	xassert(step_ptr);
+
+	slurm_mutex_lock(&block_state_mutex);
+
+	jobinfo = step_ptr->job_ptr->select_jobinfo->data;
+	bg_record = jobinfo->bg_record;
+
+	if (!bg_record)
+		fatal("This step %u.%u does not have a bg block "
+		      "assigned to it, but for some reason we are "
+		      "trying to end the step?",
+		      step_ptr->job_ptr->job_id, step_ptr->step_id);
+	/* At this moment the step_node_bitmap has already been
+	   cleared and the step_node_bitmap has been set so use it
+	   instead.
+	*/
+	bit_not(step_ptr->step_node_bitmap);
+	bit_and(bg_record->mp_used_bitmap, step_ptr->step_node_bitmap);
+	bit_not(step_ptr->step_node_bitmap);
+
+	if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK) {
+		tmp_char = bitmap2node_name(bg_record->mp_used_bitmap);
+		tmp_char2 = bitmap2node_name(step_ptr->step_node_bitmap);
+		info("select_p_step_finish: cleared %s "
+		     "from job %u, now %s used",
+		     tmp_char2, step_ptr->job_ptr->job_id, tmp_char);
+		xfree(tmp_char);
+		xfree(tmp_char2);
+	}
+	rc = ba_clear_sub_block_cnodes(bg_record, step_ptr);
+
+	slurm_mutex_unlock(&block_state_mutex);
+
+	return rc;
+}
+
+/* The unpack for this is in common/slurm_protocol_pack.c */
+extern int select_p_pack_select_info(time_t last_query_time,
+				     uint16_t show_flags, Buf *buffer_ptr,
+				     uint16_t protocol_version)
+{
+#ifdef HAVE_BG
+	ListIterator itr;
+	bg_record_t *bg_record = NULL;
+	uint32_t blocks_packed = 0, tmp_offset;
+	Buf buffer;
+
+	/* check to see if data has changed */
+	if (last_query_time >= last_bg_update) {
+		debug2("Node select info hasn't changed since %ld",
+		       last_bg_update);
+		return SLURM_NO_CHANGE_IN_DATA;
+	} else if (blocks_are_created) {
+		*buffer_ptr = NULL;
+		buffer = init_buf(HUGE_BUF_SIZE);
+		pack32(blocks_packed, buffer);
+		pack_time(last_bg_update, buffer);
+
+		if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+			if (bg_lists->main) {
+				slurm_mutex_lock(&block_state_mutex);
+				itr = list_iterator_create(bg_lists->main);
+				while ((bg_record = list_next(itr))) {
+					if (bg_record->magic != BLOCK_MAGIC)
+						continue;
+					_pack_block(bg_record, buffer,
+						    protocol_version);
+					blocks_packed++;
+				}
+				list_iterator_destroy(itr);
+				slurm_mutex_unlock(&block_state_mutex);
+			} else {
+				error("select_p_pack_select_info: "
+				      "no bg_lists->main");
+				return SLURM_ERROR;
+			}
+		}
+		tmp_offset = get_buf_offset(buffer);
+		set_buf_offset(buffer, 0);
+		pack32(blocks_packed, buffer);
+		set_buf_offset(buffer, tmp_offset);
+
+		*buffer_ptr = buffer;
+	} else {
+		error("select_p_pack_select_info: bg_lists->main not created "
+		      "yet");
+		return SLURM_ERROR;
+	}
+
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_select_nodeinfo_pack(select_nodeinfo_t *nodeinfo,
+					 Buf buffer,
+					 uint16_t protocol_version)
+{
+	return select_nodeinfo_pack(nodeinfo, buffer, protocol_version);
+}
+
+extern int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
+					   Buf buffer,
+					   uint16_t protocol_version)
+{
+	return select_nodeinfo_unpack(nodeinfo, buffer, protocol_version);
+}
+
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(void)
+{
+	return select_nodeinfo_alloc(0);
+}
+
+extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
+{
+	return select_nodeinfo_free(nodeinfo);
+}
+
+extern int select_p_select_nodeinfo_set_all(time_t last_query_time)
+{
+	return select_nodeinfo_set_all(last_query_time);
+}
+
+extern int select_p_select_nodeinfo_set(struct job_record *job_ptr)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
+					enum select_nodedata_type dinfo,
+					enum node_states state,
+					void *data)
+{
+	return select_nodeinfo_get(nodeinfo, dinfo, state, data);
+}
+
+extern select_jobinfo_t *select_p_select_jobinfo_alloc(void)
+{
+	return alloc_select_jobinfo();
+}
+
+extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
+				       enum select_jobdata_type data_type,
+				       void *data)
+{
+	return set_select_jobinfo(jobinfo, data_type, data);
+}
+
+extern int select_p_select_jobinfo_get(select_jobinfo_t *jobinfo,
+				       enum select_jobdata_type data_type,
+				       void *data)
+{
+	return get_select_jobinfo(jobinfo, data_type, data);
+}
+
+extern select_jobinfo_t *select_p_select_jobinfo_copy(select_jobinfo_t *jobinfo)
+{
+	return copy_select_jobinfo(jobinfo);
+}
+
+extern int select_p_select_jobinfo_free(select_jobinfo_t *jobinfo)
+{
+	return free_select_jobinfo(jobinfo);
+}
+
+extern int  select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
+					 uint16_t protocol_version)
+{
+	return pack_select_jobinfo(jobinfo, buffer, protocol_version);
+}
+
+extern int  select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo,
+					   Buf buffer,
+					   uint16_t protocol_version)
+{
+	return unpack_select_jobinfo(jobinfo, buffer, protocol_version);
+}
+
+extern char *select_p_select_jobinfo_sprint(select_jobinfo_t *jobinfo,
+					    char *buf, size_t size, int mode)
+{
+	return sprint_select_jobinfo(jobinfo, buf, size, mode);
+}
+
+extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
+					     int mode)
+{
+	return xstrdup_select_jobinfo(jobinfo, mode);
+}
+
+extern int select_p_update_block(update_block_msg_t *block_desc_ptr)
+{
+#ifdef HAVE_BG
+	int rc = SLURM_SUCCESS;
+	bg_record_t *bg_record = NULL;
+	char reason[200];
+
+	if (!block_desc_ptr->bg_block_id) {
+		error("update_block: No name specified");
+		return ESLURM_INVALID_BLOCK_NAME;
+	}
+
+	slurm_mutex_lock(&block_state_mutex);
+	bg_record = find_bg_record_in_list(bg_lists->main,
+					   block_desc_ptr->bg_block_id);
+	if (!bg_record) {
+		slurm_mutex_unlock(&block_state_mutex);
+		return ESLURM_INVALID_BLOCK_NAME;
+	}
+
+	if (block_desc_ptr->reason)
+		snprintf(reason, sizeof(reason), "%s", block_desc_ptr->reason);
+	else if (block_desc_ptr->state == BG_BLOCK_BOOTING)
+		snprintf(reason, sizeof(reason),
+			 "update_block: "
+			 "Admin recreated %s.", bg_record->bg_block_id);
+	else if (block_desc_ptr->state == BG_BLOCK_NAV) {
+		if (bg_record->conn_type[0] < SELECT_SMALL)
+			snprintf(reason, sizeof(reason),
+				 "update_block: "
+				 "Admin removed block %s",
+				 bg_record->bg_block_id);
+		else
+			snprintf(reason, sizeof(reason),
+				 "update_block: "
+				 "Removed all blocks on midplane %s",
+				 bg_record->mp_str);
+
+	} else {
+		uint16_t state = bg_record->state;
+
+		if (block_desc_ptr->state == BG_BLOCK_ERROR_FLAG)
+			state |= BG_BLOCK_ERROR_FLAG;
+		else if (state & BG_BLOCK_ERROR_FLAG)
+			state &= (~BG_BLOCK_ERROR_FLAG);
+		else
+			state = block_desc_ptr->state;
+
+		snprintf(reason, sizeof(reason),
+			 "update_block: "
+			 "Admin set block %s state to %s",
+			 bg_record->bg_block_id,
+			 bg_block_state_string(state));
+	}
+
+	/* First fail any job running on this block */
+	if (bg_record->job_running > NO_JOB_RUNNING) {
+		slurm_mutex_unlock(&block_state_mutex);
+		bg_requeue_job(bg_record->job_running, 0);
+		slurm_mutex_lock(&block_state_mutex);
+		if (!block_ptr_exist_in_list(bg_lists->main, bg_record)) {
+			slurm_mutex_unlock(&block_state_mutex);
+			error("while trying to put block in "
+			      "error state it disappeared");
+			return SLURM_ERROR;
+		}
+		/* need to set the job_ptr to NULL
+		   here or we will get error message
+		   about us trying to free this block
+		   with a job in it.
+		*/
+		bg_record->job_ptr = NULL;
+	}
+
+	if (block_desc_ptr->state == BG_BLOCK_ERROR_FLAG) {
+		bg_record_t *found_record = NULL;
+		ListIterator itr;
+		List delete_list = list_create(NULL);
+		/* This loop shouldn't do much in regular Dynamic mode
+		   since there shouldn't be overlapped blocks.  But if
+		   there is a trouble block that isn't going away and
+		   we need to mark it in an error state there could be
+		   blocks overlapped where we need to requeue the jobs.
+		*/
+		itr = list_iterator_create(bg_lists->main);
+		while ((found_record = list_next(itr))) {
+			if (bg_record == found_record)
+				continue;
+
+			if (!blocks_overlap(bg_record, found_record)) {
+				debug2("block %s isn't part of errored %s",
+				       found_record->bg_block_id,
+				       bg_record->bg_block_id);
+				continue;
+			}
+			if (found_record->job_running > NO_JOB_RUNNING) {
+				if (found_record->job_ptr
+				    && IS_JOB_CONFIGURING(
+					    found_record->job_ptr))
+					info("Pending job %u on block %s "
+					     "will try to be requeued "
+					     "because overlapping block %s "
+					     "is in an error state.",
+					     found_record->job_running,
+					     found_record->bg_block_id,
+					     bg_record->bg_block_id);
+				else
+					info("Failing job %u on block %s "
+					     "because overlapping block %s "
+					     "is in an error state.",
+					     found_record->job_running,
+					     found_record->bg_block_id,
+					     bg_record->bg_block_id);
+
+				/* This job will be requeued in the
+				   free_block_list code below, just
+				   make note of it here.
+				*/
+			} else {
+				debug2("block %s is part of errored %s "
+				       "but no running job",
+				       found_record->bg_block_id,
+				       bg_record->bg_block_id);
+			}
+			list_push(delete_list, found_record);
+		}
+		list_iterator_destroy(itr);
+		slurm_mutex_unlock(&block_state_mutex);
+		free_block_list(NO_VAL, delete_list, 0, 0);
+		list_destroy(delete_list);
+		put_block_in_error_state(bg_record, reason);
+	} else if (block_desc_ptr->state == BG_BLOCK_FREE) {
+		/* Resume the block first and then free the block */
+		resume_block(bg_record);
+
+		/* Increment free_cnt to make sure we don't loose this
+		 * block since bg_free_block will unlock block_state_mutex.
+		 */
+		bg_record->free_cnt++;
+		bg_free_block(bg_record, 0, 1);
+		bg_record->free_cnt--;
+		slurm_mutex_unlock(&block_state_mutex);
+	} else if (block_desc_ptr->state == BG_BLOCK_TERM) {
+		/* This can't be RM_PARTITION_READY since the enum
+		   changed from BGL to BGP and if we are running cross
+		   cluster it just doesn't work.
+		*/
+		resume_block(bg_record);
+		slurm_mutex_unlock(&block_state_mutex);
+	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC
+		   && (block_desc_ptr->state == BG_BLOCK_NAV)) {
+		/* This means remove the block from the system.  If
+		   the block is a small block we need to remove all the
+		   blocks on that midplane.
+		*/
+		bg_record_t *found_record = NULL;
+		ListIterator itr;
+		List delete_list = list_create(NULL);
+
+		list_push(delete_list, bg_record);
+		/* only do the while loop if we are dealing with a
+		   small block */
+		if (bg_record->conn_type[0] < SELECT_SMALL)
+			goto large_block;
+
+		itr = list_iterator_create(bg_lists->main);
+		while ((found_record = list_next(itr))) {
+			if (bg_record == found_record)
+				continue;
+
+			if (!bit_equal(bg_record->mp_bitmap,
+				       found_record->mp_bitmap)) {
+				debug2("block %s isn't part of to be freed %s",
+				       found_record->bg_block_id,
+				       bg_record->bg_block_id);
+				continue;
+			}
+			if (found_record->job_running > NO_JOB_RUNNING) {
+				if (found_record->job_ptr
+				    && IS_JOB_CONFIGURING(
+					    found_record->job_ptr))
+					info("Pending job %u on block %s "
+					     "will try to be requeued "
+					     "because overlapping block %s "
+					     "is in an error state.",
+					     found_record->job_running,
+					     found_record->bg_block_id,
+					     bg_record->bg_block_id);
+				else
+					info("Failing job %u on block %s "
+					     "because overlapping block %s "
+					     "is in an error state.",
+					     found_record->job_running,
+					     found_record->bg_block_id,
+					     bg_record->bg_block_id);
+				/* This job will be requeued in the
+				   free_block_list code below, just
+				   make note of it here.
+				*/
+			} else {
+				debug2("block %s is part of to be freed %s "
+				       "but no running job",
+				       found_record->bg_block_id,
+				       bg_record->bg_block_id);
+			}
+			list_push(delete_list, found_record);
+		}
+		list_iterator_destroy(itr);
+
+	large_block:
+		/* make sure if we are removing a block to put it back
+		   to a normal state in accounting first */
+		itr = list_iterator_create(delete_list);
+		while ((found_record = list_next(itr))) {
+			if (found_record->state & BG_BLOCK_ERROR_FLAG)
+				resume_block(found_record);
+		}
+		list_iterator_destroy(itr);
+
+		slurm_mutex_unlock(&block_state_mutex);
+		free_block_list(NO_VAL, delete_list, 0, 0);
+		list_destroy(delete_list);
+	} else if (block_desc_ptr->state == BG_BLOCK_BOOTING) {
+		/* This means recreate the block, remove it and then
+		   recreate it.
+		*/
+
+		/* make sure if we are removing a block to put it back
+		   to a normal state in accounting first */
+		if (bg_record->state & BG_BLOCK_ERROR_FLAG)
+			resume_block(bg_record);
+
+		term_jobs_on_block(bg_record->bg_block_id);
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("select_p_update_block: "
+			     "freeing the block %s.", bg_record->bg_block_id);
+		/* Increment free_cnt to make sure we don't loose this
+		 * block since bg_free_block will unlock block_state_mutex.
+		 */
+		bg_record->free_cnt++;
+		bg_free_block(bg_record, 1, 1);
+		bg_record->free_cnt--;
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("select_p_update_block: done");
+
+		/* Now remove it from the main list since we are
+		   looking for a state change and it won't be caught
+		   unless it is in the main list until now.
+		*/
+		remove_from_bg_list(bg_lists->main, bg_record);
+
+#if defined HAVE_BG_FILES
+		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+			info("select_p_update_block: "
+			     "removing %s from database",
+			     bg_record->bg_block_id);
+
+		rc = bridge_block_remove(bg_record);
+		if (rc != SLURM_SUCCESS) {
+			if (rc == BG_ERROR_BLOCK_NOT_FOUND) {
+				debug("select_p_update_block: "
+				      "block %s is not found",
+				      bg_record->bg_block_id);
+			} else {
+				error("select_p_update_block: "
+				      "rm_remove_partition(%s): %s",
+				      bg_record->bg_block_id,
+				      bg_err_str(rc));
+			}
+		} else
+			if (bg_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+				info("select_p_update_block: done %s",
+				     (char *)bg_record->bg_block_id);
+#endif
+		xfree(bg_record->bg_block_id);
+		if (bridge_block_create(bg_record) == SLURM_ERROR) {
+			destroy_bg_record(bg_record);
+			error("select_p_update_block: "
+			      "unable to configure block in api");
+		} else {
+			print_bg_record(bg_record);
+			list_append(bg_lists->main, bg_record);
+			sort_bg_record_inc_size(bg_lists->main);
+		}
+
+		slurm_mutex_unlock(&block_state_mutex);
+	} else {
+		slurm_mutex_unlock(&block_state_mutex);
+		error("state is ? %s",
+		      bg_block_state_string(block_desc_ptr->state));
+		return ESLURM_INVALID_NODE_STATE;
+	}
+
+	/* info("%s", reason); */
+	last_bg_update = time(NULL);
+
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_update_sub_node (update_block_msg_t *block_desc_ptr)
+{
+#ifdef HAVE_BG
+	int rc = SLURM_SUCCESS;
+	int i = 0, j = 0;
+	char coord[SYSTEM_DIMENSIONS+1], *node_name = NULL;
+	char ionodes[128];
+	int set = 0;
+	double nc_pos = 0, last_pos = -1;
+	bitstr_t *ionode_bitmap = NULL;
+	char *name = NULL;
+
+	if (bg_conf->layout_mode != LAYOUT_DYNAMIC) {
+		info("You can't use this call unless you are on a Dynamically "
+		     "allocated system.  Please use update BlockName instead");
+		rc = ESLURM_INVALID_BLOCK_LAYOUT;
+		goto end_it;
+	}
+
+	memset(coord, 0, sizeof(coord));
+	memset(ionodes, 0, 128);
+	if (!block_desc_ptr->mp_str) {
+		error("update_sub_node: No name specified");
+		rc = ESLURM_INVALID_BLOCK_NAME;
+		goto end_it;
+	}
+	name = block_desc_ptr->mp_str;
+
+	while (name[j] != '\0') {
+		if (name[j] == '[') {
+			if (set<1) {
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+			i = j++;
+			if ((name[j] < '0'
+			     || name[j] > 'Z'
+			     || (name[j] > '9'
+				 && name[j] < 'A'))) {
+				error("update_sub_node: sub block is empty");
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+			while (name[i] != '\0') {
+				if (name[i] == ']')
+					break;
+				i++;
+			}
+			if (name[i] != ']') {
+				error("update_sub_node: "
+				      "No close (']') on sub block");
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+
+			strncpy(ionodes, name+j, i-j);
+			set++;
+			break;
+		} else if ((name[j] >= '0'
+			    && name[j] <= '9')
+			   || (name[j] >= 'A'
+			       && name[j] <= 'Z')) {
+			if (set) {
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+			/* make sure we are asking for a correct name */
+			for(i = 0; i < SYSTEM_DIMENSIONS; i++) {
+				if ((name[j+i] >= '0'
+				     && name[j+i] <= '9')
+				    || (name[j+i] >= 'A'
+					&& name[j+i] <= 'Z'))
+					continue;
+
+				error("update_sub_node: "
+				      "misformatted name given %s",
+				      name);
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+
+			strncpy(coord, name+j,
+				SYSTEM_DIMENSIONS);
+			j += SYSTEM_DIMENSIONS-1;
+			set++;
+		}
+		j++;
+	}
+
+	if (set != 2) {
+		error("update_sub_node: "
+		      "I didn't get the base partition and the sub part.");
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+	ionode_bitmap = bit_alloc(bg_conf->ionodes_per_mp);
+	bit_unfmt(ionode_bitmap, ionodes);
+	if (bit_ffs(ionode_bitmap) == -1) {
+		error("update_sub_node: Invalid ionode '%s' given.", ionodes);
+		rc = SLURM_ERROR;
+		FREE_NULL_BITMAP(ionode_bitmap);
+		goto end_it;
+	}
+	node_name = xstrdup_printf("%s%s", bg_conf->slurm_node_prefix, coord);
+	/* find out how many nodecards to get for each ionode */
+	if (block_desc_ptr->state == BG_BLOCK_ERROR_FLAG) {
+		info("Admin setting %s[%s] in an error state",
+		     node_name, ionodes);
+		for(i = 0; i<bg_conf->ionodes_per_mp; i++) {
+			if (bit_test(ionode_bitmap, i)) {
+				if ((int)nc_pos != (int)last_pos) {
+					/* find first bit in nc */
+					int start_io =
+						(int)nc_pos * bg_conf->io_ratio;
+					down_nodecard(node_name, start_io, 0);
+					last_pos = nc_pos;
+				}
+			}
+			nc_pos += bg_conf->nc_ratio;
+		}
+	} else if (block_desc_ptr->state == BG_BLOCK_FREE) {
+		info("Admin setting %s[%s] in an free state",
+		     node_name, ionodes);
+		up_nodecard(node_name, ionode_bitmap);
+	} else {
+		error("update_sub_node: Unknown state %s",
+		      bg_block_state_string(block_desc_ptr->state));
+		rc = ESLURM_INVALID_BLOCK_STATE;
+	}
+
+	FREE_NULL_BITMAP(ionode_bitmap);
+	xfree(node_name);
+
+	last_bg_update = time(NULL);
+end_it:
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_get_info_from_plugin (enum select_plugindata_info dinfo,
+					  struct job_record *job_ptr,
+					  void *data)
+{
+#ifdef HAVE_BG
+	uint16_t *tmp16 = (uint16_t *) data;
+	uint32_t *tmp32 = (uint32_t *) data;
+	List *tmp_list = (List *) data;
+	int rc = SLURM_SUCCESS;
+
+	switch(dinfo) {
+	case SELECT_CR_PLUGIN:
+		*tmp32 = 0;
+		break;
+	case SELECT_STATIC_PART:
+		if (bg_conf->layout_mode == LAYOUT_STATIC)
+			*tmp16 = 1;
+		else
+			*tmp16 = 0;
+		break;
+
+	case SELECT_CONFIG_INFO:
+		*tmp_list = _get_config();
+		break;
+	default:
+		error("select_p_get_info_from_plugin info %d invalid",
+		      dinfo);
+		rc = SLURM_ERROR;
+		break;
+	}
+
+	return rc;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_update_node_config (int index)
+{
+#ifdef HAVE_BG
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_update_node_state(struct node_record *node_ptr)
+{
+#ifdef HAVE_BG
+	ba_mp_t *curr_mp;
+
+	xassert(node_ptr);
+
+	if(!(curr_mp = str2ba_mp(node_ptr->name)))
+		return SLURM_ERROR;
+
+	ba_update_mp_state(curr_mp, node_ptr->node_state);
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
+{
+#ifdef HAVE_BG
+	job_desc_msg_t *job_desc = (job_desc_msg_t *)data;
+	uint16_t *cpus = (uint16_t *)data;
+	uint32_t *nodes = (uint32_t *)data, tmp = 0;
+	int i;
+	uint16_t req_geometry[SYSTEM_DIMENSIONS];
+
+	if (!bg_conf->mp_cnode_cnt) {
+		fatal("select_p_alter_node_cnt: This can't be called "
+		      "before init");
+	}
+
+	switch (type) {
+	case SELECT_GET_NODE_SCALING:
+		if ((*nodes) != INFINITE)
+			(*nodes) = bg_conf->mp_cnode_cnt;
+		break;
+	case SELECT_GET_NODE_CPU_CNT:
+		if ((*cpus) != (uint16_t)INFINITE)
+			(*cpus) = bg_conf->cpu_ratio;
+		break;
+	case SELECT_GET_MP_CPU_CNT:
+		if ((*nodes) != INFINITE)
+			(*nodes) = bg_conf->cpus_per_mp;
+		break;
+	case SELECT_SET_MP_CNT:
+		if (((*nodes) == INFINITE) || ((*nodes) == NO_VAL))
+			tmp = (*nodes);
+		else if ((*nodes) > bg_conf->mp_cnode_cnt) {
+			tmp = (*nodes);
+			tmp /= bg_conf->mp_cnode_cnt;
+			if (tmp < 1)
+				tmp = 1;
+		} else
+			tmp = 1;
+		(*nodes) = tmp;
+		break;
+	case SELECT_APPLY_NODE_MIN_OFFSET:
+		if ((*nodes) == 1) {
+			/* Job will actually get more than one c-node,
+			 * but we can't be sure exactly how much so we
+			 * don't scale up this value. */
+			break;
+		}
+		(*nodes) *= bg_conf->mp_cnode_cnt;
+		break;
+	case SELECT_APPLY_NODE_MAX_OFFSET:
+		if ((*nodes) != INFINITE)
+			(*nodes) *= bg_conf->mp_cnode_cnt;
+		break;
+	case SELECT_SET_NODE_CNT:
+		get_select_jobinfo(job_desc->select_jobinfo->data,
+				   SELECT_JOBDATA_ALTERED, &tmp);
+		if (tmp == 1) {
+			return SLURM_SUCCESS;
+		}
+		tmp = 1;
+		set_select_jobinfo(job_desc->select_jobinfo->data,
+				   SELECT_JOBDATA_ALTERED, &tmp);
+
+		if (job_desc->min_nodes == (uint32_t) NO_VAL)
+			return SLURM_SUCCESS;
+
+		get_select_jobinfo(job_desc->select_jobinfo->data,
+				   SELECT_JOBDATA_GEOMETRY, &req_geometry);
+
+		if (req_geometry[0] != 0
+		    && req_geometry[0] != (uint16_t)NO_VAL) {
+			job_desc->min_nodes = 1;
+			for (i=0; i<SYSTEM_DIMENSIONS; i++)
+				job_desc->min_nodes *=
+					(uint16_t)req_geometry[i];
+			job_desc->min_nodes *= bg_conf->mp_cnode_cnt;
+			job_desc->max_nodes = job_desc->min_nodes;
+		}
+
+		/* make sure if the user only specified min_cpus to
+		   set min_nodes correctly
+		*/
+		if ((job_desc->min_cpus != NO_VAL)
+		    && (job_desc->min_cpus > job_desc->min_nodes))
+			job_desc->min_nodes =
+				job_desc->min_cpus / bg_conf->cpu_ratio;
+
+		/* initialize min_cpus to the min_nodes */
+		job_desc->min_cpus = job_desc->min_nodes * bg_conf->cpu_ratio;
+
+		if ((job_desc->max_nodes == (uint32_t) NO_VAL)
+		    || (job_desc->max_nodes < job_desc->min_nodes))
+			job_desc->max_nodes = job_desc->min_nodes;
+
+		/* See if min_nodes is greater than one base partition */
+		if (job_desc->min_nodes > bg_conf->mp_cnode_cnt) {
+			/*
+			 * if it is make sure it is a factor of
+			 * bg_conf->mp_cnode_cnt, if it isn't make it
+			 * that way
+			 */
+			tmp = job_desc->min_nodes % bg_conf->mp_cnode_cnt;
+			if (tmp > 0)
+				job_desc->min_nodes +=
+					(bg_conf->mp_cnode_cnt-tmp);
+		}
+		tmp = job_desc->min_nodes / bg_conf->mp_cnode_cnt;
+
+		/* this means it is greater or equal to one mp */
+		if (tmp > 0) {
+			set_select_jobinfo(job_desc->select_jobinfo->data,
+					   SELECT_JOBDATA_NODE_CNT,
+					   &job_desc->min_nodes);
+			job_desc->min_nodes = tmp;
+			job_desc->min_cpus = bg_conf->cpus_per_mp * tmp;
+		} else {
+#ifdef HAVE_BGL
+			if (job_desc->min_nodes <= bg_conf->nodecard_cnode_cnt
+			    && bg_conf->nodecard_ionode_cnt)
+				job_desc->min_nodes =
+					bg_conf->nodecard_cnode_cnt;
+			else if (job_desc->min_nodes
+				 <= bg_conf->quarter_cnode_cnt)
+				job_desc->min_nodes =
+					bg_conf->quarter_cnode_cnt;
+			else
+				job_desc->min_nodes =
+					bg_conf->mp_cnode_cnt;
+
+			set_select_jobinfo(job_desc->select_jobinfo->data,
+					   SELECT_JOBDATA_NODE_CNT,
+					   &job_desc->min_nodes);
+
+			tmp = bg_conf->mp_cnode_cnt/job_desc->min_nodes;
+
+			job_desc->min_cpus = bg_conf->cpus_per_mp/tmp;
+			job_desc->min_nodes = 1;
+#else
+			i = bg_conf->smallest_block;
+			while (i <= bg_conf->mp_cnode_cnt) {
+				if (job_desc->min_nodes <= i) {
+					job_desc->min_nodes = i;
+					break;
+				}
+				i *= 2;
+			}
+
+			set_select_jobinfo(job_desc->select_jobinfo->data,
+					   SELECT_JOBDATA_NODE_CNT,
+					   &job_desc->min_nodes);
+
+			job_desc->min_cpus = job_desc->min_nodes
+				* bg_conf->cpu_ratio;
+			job_desc->min_nodes = 1;
+#endif
+		}
+
+		if (job_desc->max_nodes > bg_conf->mp_cnode_cnt) {
+			tmp = job_desc->max_nodes % bg_conf->mp_cnode_cnt;
+			if (tmp > 0)
+				job_desc->max_nodes +=
+					(bg_conf->mp_cnode_cnt-tmp);
+		}
+		tmp = job_desc->max_nodes / bg_conf->mp_cnode_cnt;
+
+		if (tmp > 0) {
+			job_desc->max_nodes = tmp;
+			job_desc->max_cpus =
+				job_desc->max_nodes * bg_conf->cpus_per_mp;
+			tmp = NO_VAL;
+		} else {
+#ifdef HAVE_BGL
+			if (job_desc->max_nodes <= bg_conf->nodecard_cnode_cnt
+			    && bg_conf->nodecard_ionode_cnt)
+				job_desc->max_nodes =
+					bg_conf->nodecard_cnode_cnt;
+			else if (job_desc->max_nodes
+				 <= bg_conf->quarter_cnode_cnt)
+				job_desc->max_nodes =
+					bg_conf->quarter_cnode_cnt;
+			else
+				job_desc->max_nodes =
+					bg_conf->mp_cnode_cnt;
+
+			tmp = bg_conf->mp_cnode_cnt/job_desc->max_nodes;
+			job_desc->max_cpus = bg_conf->cpus_per_mp/tmp;
+			job_desc->max_nodes = 1;
+#else
+			i = bg_conf->smallest_block;
+			while (i <= bg_conf->mp_cnode_cnt) {
+				if (job_desc->max_nodes <= i) {
+					job_desc->max_nodes = i;
+					break;
+				}
+				i *= 2;
+			}
+			job_desc->max_cpus =
+				job_desc->max_nodes * bg_conf->cpu_ratio;
+
+			job_desc->max_nodes = 1;
+#endif
+		}
+		tmp = NO_VAL;
+
+		break;
+	default:
+		error("unknown option %d for alter_node_cnt", type);
+	}
+
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern int select_p_reconfigure(void)
+{
+#ifdef HAVE_BG
+	slurm_conf_lock();
+	if (!slurmctld_conf.slurm_user_name
+	    || strcmp(bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name))
+		error("The slurm user has changed from '%s' to '%s'.  "
+		      "If this is really what you "
+		      "want you will need to restart slurm for this "
+		      "change to be enforced in the bluegene plugin.",
+		      bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name);
+	if (!slurmctld_conf.node_prefix
+	    || strcmp(bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix))
+		error("Node Prefix has changed from '%s' to '%s'.  "
+		      "If this is really what you "
+		      "want you will need to restart slurm for this "
+		      "change to be enforced in the bluegene plugin.",
+		      bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix);
+	bg_conf->slurm_debug_flags = slurmctld_conf.debug_flags;
+	bg_conf->slurm_debug_level = slurmctld_conf.slurmctld_debug;
+	set_ba_debug_flags(bg_conf->slurm_debug_flags);
+	slurm_conf_unlock();
+
+	return SLURM_SUCCESS;
+#else
+	return SLURM_ERROR;
+#endif
+}
+
+extern bitstr_t *select_p_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+	return NULL;
+}
+
+extern void select_p_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	ba_init(node_info_ptr, sanity_check);
+}
+
+extern void select_p_ba_fini(void)
+{
+	ba_fini();
+}
+
+extern int *select_p_ba_get_dims(void)
+{
+#ifdef HAVE_BG
+	return DIM_SIZE;
+#else
+	return NULL;
+#endif
+}
diff --git a/src/plugins/select/bluegene/sfree/Makefile.am b/src/plugins/select/bluegene/sfree/Makefile.am
new file mode 100644
index 000000000..426db63e1
--- /dev/null
+++ b/src/plugins/select/bluegene/sfree/Makefile.am
@@ -0,0 +1,17 @@
+# Makefile for the bluegene sfree tool
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir)  -I$(top_srcdir)/src/common $(BG_INCLUDES)
+
+sbin_PROGRAMS = sfree
+
+sfree_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
+
+sfree_SOURCES = sfree.c sfree.h opts.c
+sfree_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
+
+force:
+$(sfree_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/checkpoint/xlch/Makefile.in b/src/plugins/select/bluegene/sfree/Makefile.in
similarity index 80%
rename from src/plugins/checkpoint/xlch/Makefile.in
rename to src/plugins/select/bluegene/sfree/Makefile.in
index 9e4617874..e89a44f0a 100644
--- a/src/plugins/checkpoint/xlch/Makefile.in
+++ b/src/plugins/select/bluegene/sfree/Makefile.in
@@ -15,7 +15,7 @@
 
 @SET_MAKE@
 
-# Makefile for checkpoint/xlch plugin
+# Makefile for the bluegene sfree tool
 
 VPATH = @srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
@@ -37,7 +37,8 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
-subdir = src/plugins/checkpoint/xlch
+sbin_PROGRAMS = sfree$(EXEEXT)
+subdir = src/plugins/select/bluegene/sfree
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
@@ -63,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -81,35 +84,14 @@ mkinstalldirs = $(install_sh) -d
 CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
-am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
-am__vpath_adj = case $$p in \
-    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
-    *) f=$$p;; \
-  esac;
-am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
-am__install_max = 40
-am__nobase_strip_setup = \
-  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
-am__nobase_strip = \
-  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
-am__nobase_list = $(am__nobase_strip_setup); \
-  for p in $$list; do echo "$$p $$p"; done | \
-  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
-  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
-    if (++n[$$2] == $(am__install_max)) \
-      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
-    END { for (dir in files) print dir, files[dir] }'
-am__base_list = \
-  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
-  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__installdirs = "$(DESTDIR)$(pkglibdir)"
-LTLIBRARIES = $(pkglib_LTLIBRARIES)
-checkpoint_xlch_la_LIBADD =
-am_checkpoint_xlch_la_OBJECTS = checkpoint_xlch.lo
-checkpoint_xlch_la_OBJECTS = $(am_checkpoint_xlch_la_OBJECTS)
-checkpoint_xlch_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(checkpoint_xlch_la_LDFLAGS) $(LDFLAGS) -o $@
+am__installdirs = "$(DESTDIR)$(sbindir)"
+PROGRAMS = $(sbin_PROGRAMS)
+am_sfree_OBJECTS = sfree.$(OBJEXT) opts.$(OBJEXT)
+sfree_OBJECTS = $(am_sfree_OBJECTS)
+sfree_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o
+sfree_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sfree_LDFLAGS) \
+	$(LDFLAGS) -o $@
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -123,8 +105,8 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(checkpoint_xlch_la_SOURCES)
-DIST_SOURCES = $(checkpoint_xlch_la_SOURCES)
+SOURCES = $(sfree_SOURCES)
+DIST_SOURCES = $(sfree_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -138,7 +120,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +160,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +218,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +254,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -320,11 +308,11 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = checkpoint_xlch.la
-checkpoint_xlch_la_SOURCES = checkpoint_xlch.c
-checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir)  -I$(top_srcdir)/src/common $(BG_INCLUDES)
+sfree_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
+sfree_SOURCES = sfree.c sfree.h opts.c
+sfree_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
 all: all-am
 
 .SUFFIXES:
@@ -338,9 +326,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/checkpoint/xlch/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/sfree/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign src/plugins/checkpoint/xlch/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/sfree/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -359,39 +347,52 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
 $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
 	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
 $(am__aclocal_m4_deps):
-install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+install-sbinPROGRAMS: $(sbin_PROGRAMS)
 	@$(NORMAL_INSTALL)
-	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
-	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
-	list2=; for p in $$list; do \
-	  if test -f $$p; then \
-	    list2="$$list2 $$p"; \
-	  else :; fi; \
-	done; \
-	test -z "$$list2" || { \
-	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
-	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
-	}
-
-uninstall-pkglibLTLIBRARIES:
+	test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
+	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+	for p in $$list; do echo "$$p $$p"; done | \
+	sed 's/$(EXEEXT)$$//' | \
+	while read p p1; do if test -f $$p || test -f $$p1; \
+	  then echo "$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+	sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
+	    else { print "f", $$3 "/" $$4, $$1; } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	    test -z "$$files" || { \
+	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \
+	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \
+	    } \
+	; done
+
+uninstall-sbinPROGRAMS:
 	@$(NORMAL_UNINSTALL)
-	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
-	for p in $$list; do \
-	  $(am__strip_dir) \
-	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
-	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
-	done
-
-clean-pkglibLTLIBRARIES:
-	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
-	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
-	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
-	  test "$$dir" != "$$p" || dir=.; \
-	  echo "rm -f \"$${dir}/so_locations\""; \
-	  rm -f "$${dir}/so_locations"; \
-	done
-checkpoint_xlch.la: $(checkpoint_xlch_la_OBJECTS) $(checkpoint_xlch_la_DEPENDENCIES) 
-	$(checkpoint_xlch_la_LINK) -rpath $(pkglibdir) $(checkpoint_xlch_la_OBJECTS) $(checkpoint_xlch_la_LIBADD) $(LIBS)
+	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+	      -e 's/$$/$(EXEEXT)/' `; \
+	test -n "$$list" || exit 0; \
+	echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
+	cd "$(DESTDIR)$(sbindir)" && rm -f $$files
+
+clean-sbinPROGRAMS:
+	@list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+sfree$(EXEEXT): $(sfree_OBJECTS) $(sfree_DEPENDENCIES) 
+	@rm -f sfree$(EXEEXT)
+	$(sfree_LINK) $(sfree_OBJECTS) $(sfree_LDADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -399,7 +400,8 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint_xlch.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sfree.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -512,9 +514,9 @@ distdir: $(DISTFILES)
 	done
 check-am: all-am
 check: check-am
-all-am: Makefile $(LTLIBRARIES)
+all-am: Makefile $(PROGRAMS)
 installdirs:
-	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	for dir in "$(DESTDIR)$(sbindir)"; do \
 	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
 	done
 install: install-am
@@ -534,6 +536,7 @@ install-strip:
 mostlyclean-generic:
 
 clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
 
 distclean-generic:
 	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
@@ -544,7 +547,7 @@ maintainer-clean-generic:
 	@echo "it deletes files that may require special tools to rebuild."
 clean: clean-am
 
-clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+clean-am: clean-generic clean-libtool clean-sbinPROGRAMS \
 	mostlyclean-am
 
 distclean: distclean-am
@@ -571,7 +574,7 @@ install-dvi: install-dvi-am
 
 install-dvi-am:
 
-install-exec-am: install-pkglibLTLIBRARIES
+install-exec-am: install-sbinPROGRAMS
 
 install-html: install-html-am
 
@@ -611,28 +614,27 @@ ps: ps-am
 
 ps-am:
 
-uninstall-am: uninstall-pkglibLTLIBRARIES
+uninstall-am: uninstall-sbinPROGRAMS
 
 .MAKE: install-am install-strip
 
 .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	clean-libtool clean-sbinPROGRAMS ctags distclean \
 	distclean-compile distclean-generic distclean-libtool \
 	distclean-tags distdir dvi dvi-am html html-am info info-am \
 	install install-am install-data install-data-am install-dvi \
 	install-dvi-am install-exec install-exec-am install-html \
 	install-html-am install-info install-info-am install-man \
-	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
-	install-ps install-ps-am install-strip installcheck \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-sbinPROGRAMS install-strip installcheck \
 	installcheck-am installdirs maintainer-clean \
 	maintainer-clean-generic mostlyclean mostlyclean-compile \
 	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+	tags uninstall uninstall-am uninstall-sbinPROGRAMS
 
 
 force:
-
-$(checkpoint_xlch_LDADD) : force
+$(sfree_LDADD) : force
 	@cd `dirname $@` && $(MAKE) `basename $@`
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
diff --git a/src/plugins/select/bluegene/plugin/opts.c b/src/plugins/select/bluegene/sfree/opts.c
similarity index 98%
rename from src/plugins/select/bluegene/plugin/opts.c
rename to src/plugins/select/bluegene/sfree/opts.c
index de45b3904..290d37564 100644
--- a/src/plugins/select/bluegene/plugin/opts.c
+++ b/src/plugins/select/bluegene/sfree/opts.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/sfree/sfree.c
similarity index 96%
rename from src/plugins/select/bluegene/plugin/sfree.c
rename to src/plugins/select/bluegene/sfree/sfree.c
index 87aa795ce..fe16af435 100644
--- a/src/plugins/select/bluegene/plugin/sfree.c
+++ b/src/plugins/select/bluegene/sfree/sfree.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -102,7 +102,7 @@ static int _check_status()
 						    bg_block_id)) {
 						if (block_ptr->
 						    block_array[i].
-						    state == RM_PARTITION_FREE)
+						    state == BG_BLOCK_FREE)
 							list_delete_item(itr);
 						break;
 					}
@@ -156,9 +156,9 @@ int main(int argc, char *argv[])
 	itr = list_iterator_create(block_list);
 	while ((block_name = list_next(itr))) {
 		if (remove_blocks)
-			msg.state = RM_PARTITION_NAV;
+			msg.state = BG_BLOCK_NAV;
 		else
-			msg.state = RM_PARTITION_FREE;
+			msg.state = BG_BLOCK_FREE;
 		msg.bg_block_id = block_name;
 		rc = slurm_update_block(&msg);
 		if (rc != SLURM_SUCCESS)
diff --git a/src/plugins/select/bluegene/plugin/sfree.h b/src/plugins/select/bluegene/sfree/sfree.h
similarity index 89%
rename from src/plugins/select/bluegene/plugin/sfree.h
rename to src/plugins/select/bluegene/sfree/sfree.h
index a6e05ea0f..ed3f0bf07 100644
--- a/src/plugins/select/bluegene/plugin/sfree.h
+++ b/src/plugins/select/bluegene/sfree/sfree.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,10 @@
 #ifndef _SFREE_H
 #define _SFREE_H
 
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
 #if HAVE_GETOPT_H
 #  include <getopt.h>
 #else
@@ -46,7 +50,14 @@
 #endif
 
 #include <signal.h>
-#include "bluegene.h"
+#include <stdio.h>
+#include <stdlib.h>		/* getenv     */
+
+#include "slurm/slurm.h"
+#include "../bg_enums.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/log.h"
+#include "src/common/xstring.h"
 
 /* getopt_long options, integers but not characters */
 #define OPT_LONG_HELP	0x100
diff --git a/src/plugins/select/bluegene/plugin/slurm_epilog.c b/src/plugins/select/bluegene/slurm_epilog.c
similarity index 98%
rename from src/plugins/select/bluegene/plugin/slurm_epilog.c
rename to src/plugins/select/bluegene/slurm_epilog.c
index 0fa526175..e297ca47c 100644
--- a/src/plugins/select/bluegene/plugin/slurm_epilog.c
+++ b/src/plugins/select/bluegene/slurm_epilog.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,7 +51,8 @@
 #include <string.h>
 #include <strings.h>
 #include <sys/types.h>
-#include <slurm/slurm.h>
+
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 
diff --git a/src/plugins/select/bluegene/plugin/slurm_prolog.c b/src/plugins/select/bluegene/slurm_prolog.c
similarity index 93%
rename from src/plugins/select/bluegene/plugin/slurm_prolog.c
rename to src/plugins/select/bluegene/slurm_prolog.c
index d96a6b943..86e171d14 100644
--- a/src/plugins/select/bluegene/plugin/slurm_prolog.c
+++ b/src/plugins/select/bluegene/slurm_prolog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,11 +49,12 @@
 #include <string.h>
 #include <strings.h>
 #include <sys/types.h>
-#include <slurm/slurm.h>
+
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/node_select.h"
-#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 #define _DEBUG 0
 #define POLL_SLEEP 3			/* retry interval in seconds  */
@@ -61,14 +62,6 @@
 int max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT;
 int cur_delay = 0;
 
-enum rm_partition_state {RM_PARTITION_FREE,
-			 RM_PARTITION_CONFIGURING,
-			 RM_PARTITION_READY,
-			 RM_PARTITION_BUSY,
-			 RM_PARTITION_DEALLOCATING,
-			 RM_PARTITION_ERROR,
-			 RM_PARTITION_NAV};
-
 static int  _get_job_size(uint32_t job_id);
 static int  _wait_part_ready(uint32_t job_id);
 static int  _partitions_dealloc();
@@ -209,8 +202,7 @@ static int _partitions_dealloc()
 		return -1;
 	}
 	for (i=0; i<new_bg_ptr->record_count; i++) {
-		if (new_bg_ptr->block_array[i].state
-		    == RM_PARTITION_DEALLOCATING) {
+		if (new_bg_ptr->block_array[i].state == BG_BLOCK_TERM) {
 			rc = 1;
 			break;
 		}
diff --git a/src/plugins/select/bluegene/wrap_rm_api.h b/src/plugins/select/bluegene/wrap_rm_api.h
deleted file mode 100644
index cf3481215..000000000
--- a/src/plugins/select/bluegene/wrap_rm_api.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* We can not include IBM's attach_bgl.h or attach_bg.h file due to problems
- * in compiling it with gcc and missing externals in that file, so we define
- * our own version of the header here and define critical variable. We also
- * "#define ATTACH_BGL_H" and "define ATTACH_BG_H" to avoid having IBM's
- * header files loaded for BGL and BGP systems respectively.*/
-
-#ifndef ATTACH_BGL_H	/* Test for attach_bgl.h on BGL */
-#ifndef ATTACH_BG_H	/* Test for attach_bg.h on BGP */
-#define ATTACH_BGL_H	/* Replacement for attach_bgl.h on BGL */
-#define ATTACH_BG_H	/* Replacement for attach_bg.h on BGP */
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#ifdef HAVE_BG_FILES
-
-  /* MPI Debug support */
-  typedef struct {
-    const char * host_name;        /* Something we can pass to inet_addr */
-    const char * executable_name;  /* The name of the image */
-    int    pid;                    /* The pid of the process */
-  } MPIR_PROCDESC;
-
-#include "rm_api.h"
-
-#else
-  typedef char *   pm_partition_id_t;
-  typedef int      rm_connection_type_t;
-  typedef int      rm_partition_mode_t;
-  typedef int      rm_partition_state_t;
-  typedef uint16_t rm_partition_t;
-  typedef char *   rm_BGL_t;
-  typedef char *   rm_BG_t;
-  typedef char *   rm_component_id_t;
-  typedef rm_component_id_t rm_bp_id_t;
-  typedef int      rm_BP_state_t;
-  typedef char *   rm_job_list_t;
-
-  /* these are the typedefs that we will need to have
-   * if we want the states on the Front End Node of a BG system
-   * make certain they match the rm_api.h values on the Service Node */
-  enum rm_partition_state {RM_PARTITION_FREE,
-			   RM_PARTITION_CONFIGURING,
-#ifdef HAVE_BGL
-			   RM_PARTITION_READY,
-			   RM_PARTITION_BUSY,
-#else
-			   RM_PARTITION_REBOOTING,
-			   RM_PARTITION_READY,
-#endif
-			   RM_PARTITION_DEALLOCATING,
-			   RM_PARTITION_ERROR,
-			   RM_PARTITION_NAV};
-  typedef enum status {STATUS_OK  = 0,
-	  	       PARTITION_NOT_FOUND = -1,
-		       JOB_NOT_FOUND = -2,
-		       BP_NOT_FOUND = -3,
-		       SWITCH_NOT_FOUND = -4,
-		       JOB_ALREADY_DEFINED=-5,
-#ifndef HAVE_BGL
-		       PARTITION_ALREADY_DEFINED=-6,
-#endif
-		       CONNECTION_ERROR=-10,
-		       INTERNAL_ERROR = -11,
-		       INVALID_INPUT=-12,
-		       INCOMPATIBLE_STATE=-13,
-		       INCONSISTENT_DATA=-14
-  }status_t;
-
-#endif
-
-/* The below #defines are needed for cross cluster dealings */
-#ifdef HAVE_BGL
-typedef rm_BGL_t my_bluegene_t;
-#define PARTITION_ALREADY_DEFINED -6
-#define RM_PARTITION_REBOOTING 1000
-#else
-typedef rm_BG_t my_bluegene_t;
-#define RM_PARTITION_BUSY 1000
-
-#endif
-
-#endif	/* #ifndef ATTACH_BG_H */
-#endif	/* #ifndef ATTACH_BGL_H */
diff --git a/src/plugins/select/cons_res/Makefile.in b/src/plugins/select/cons_res/Makefile.in
index d42b1d3ec..e4d36874d 100644
--- a/src/plugins/select/cons_res/Makefile.in
+++ b/src/plugins/select/cons_res/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index 560db4c26..f8b23adc2 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -75,24 +75,22 @@ static int _compute_c_b_task_dist(struct job_record *job_ptr)
 		return SLURM_ERROR;
 	}
 
-
-	if (job_ptr->details->ntasks_per_node == 0)
-		maxtasks = job_res->ncpus;
-	else
-		maxtasks = job_res->ncpus * job_ptr->details->ntasks_per_node;
-
+	maxtasks = job_res->ncpus;
 	avail_cpus = job_res->cpus;
-
 	job_res->cpus = xmalloc(job_res->nhosts * sizeof(uint16_t));
 
 	/* ncpus is already set the number of tasks if overcommit is used */
-	if (!job_ptr->details->overcommit
-	    && (job_ptr->details->cpus_per_task > 1))
-		maxtasks = maxtasks / job_ptr->details->cpus_per_task;
+	if (!job_ptr->details->overcommit &&
+	    (job_ptr->details->cpus_per_task > 1)) {
+		if (job_ptr->details->ntasks_per_node == 0)
+			maxtasks = maxtasks / job_ptr->details->cpus_per_task;
+		else
+			maxtasks = job_ptr->details->ntasks_per_node * job_res->nhosts;
+	}
 
 	/* Safe guard if the user didn't specified a lower number of
 	 * cpus than cpus_per_task or didn't specify the number. */
-	if(!maxtasks) {
+	if (!maxtasks) {
 		error("_compute_c_b_task_dist: request was for 0 tasks, "
 		      "setting to 1");
 		maxtasks = 1;
diff --git a/src/plugins/select/cons_res/dist_tasks.h b/src/plugins/select/cons_res/dist_tasks.h
index 0b085c723..df177ddb5 100644
--- a/src/plugins/select/cons_res/dist_tasks.h
+++ b/src/plugins/select/cons_res/dist_tasks.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/select/cons_res/job_test.c b/src/plugins/select/cons_res/job_test.c
index febfbddbc..2fc1a1b35 100644
--- a/src/plugins/select/cons_res/job_test.c
+++ b/src/plugins/select/cons_res/job_test.c
@@ -62,7 +62,7 @@
  *  from select/linear
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -100,6 +100,7 @@
 #    include <inttypes.h>
 #  endif
 #endif
+#include <time.h>
 
 #include "dist_tasks.h"
 #include "job_test.h"
@@ -151,6 +152,10 @@ uint16_t _allocate_sockets(struct job_record *job_ptr, bitstr_t *core_map,
 		if (mc_ptr->ntasks_per_core) {
 			ntasks_per_core = mc_ptr->ntasks_per_core;
 		}
+		if ((mc_ptr->threads_per_core != (uint16_t) NO_VAL) &&
+		    (mc_ptr->threads_per_core <  ntasks_per_core)) {
+			ntasks_per_core = mc_ptr->threads_per_core;
+		}
 		ntasks_per_socket = mc_ptr->ntasks_per_socket;
 	}
 
@@ -400,6 +405,10 @@ uint16_t _allocate_cores(struct job_record *job_ptr, bitstr_t *core_map,
 		if (mc_ptr->ntasks_per_core) {
 			ntasks_per_core = mc_ptr->ntasks_per_core;
 		}
+		if ((mc_ptr->threads_per_core != (uint16_t) NO_VAL) &&
+		    (mc_ptr->threads_per_core <  ntasks_per_core)) {
+			ntasks_per_core = mc_ptr->threads_per_core;
+		}
 	}
 
 	/* These are the job parameters that we must respect:
@@ -613,6 +622,32 @@ uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
 	core_start_bit = cr_get_coremap_offset(node_i);
 	core_end_bit   = cr_get_coremap_offset(node_i+1) - 1;
 	node_ptr = select_node_record[node_i].node_ptr;
+
+	if (cr_type & CR_MEMORY) {
+		/* Memory Check: check pn_min_memory to see if:
+		 *          - this node has enough memory (MEM_PER_CPU == 0)
+		 *          - there are enough free_cores (MEM_PER_CPU = 1)
+		 */
+		req_mem   = job_ptr->details->pn_min_memory & ~MEM_PER_CPU;
+		avail_mem = select_node_record[node_i].real_memory;
+		if (!test_only)
+			avail_mem -= node_usage[node_i].alloc_memory;
+		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
+			/* memory is per-cpu */
+			while ((cpus > 0) && ((req_mem * cpus) > avail_mem))
+				cpus--;
+			if ((cpus < job_ptr->details->ntasks_per_node) ||
+			    ((job_ptr->details->cpus_per_task > 1) &&
+			     (cpus < job_ptr->details->cpus_per_task)))
+				cpus = 0;
+			/* FIXME: Need to recheck min_cores, etc. here */
+		} else {
+			/* memory is per node */
+			if (req_mem > avail_mem)
+				cpus = 0;
+		}
+	}
+
 	if (node_usage[node_i].gres_list)
 		gres_list = node_usage[node_i].gres_list;
 	else
@@ -622,34 +657,13 @@ uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
 					 core_map, core_start_bit,
 					 core_end_bit, job_ptr->job_id,
 					 node_ptr->name);
+	if ((gres_cpus < job_ptr->details->ntasks_per_node) ||
+	    ((job_ptr->details->cpus_per_task > 1) &&
+	     (gres_cpus < job_ptr->details->cpus_per_task)))
+		gres_cpus = 0;
 	if (gres_cpus < cpus)
 		cpus = gres_cpus;
 
-	if (!(cr_type & CR_MEMORY))
-		return cpus;
-
-	/* Memory Check: check pn_min_memory to see if:
-	 *          - this node has enough memory (MEM_PER_CPU == 0)
-	 *          - there are enough free_cores (MEM_PER_CPU = 1)
-	 */
-	req_mem   = job_ptr->details->pn_min_memory & ~MEM_PER_CPU;
-	avail_mem = select_node_record[node_i].real_memory;
-	if (!test_only)
-		avail_mem -= node_usage[node_i].alloc_memory;
-	if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
-		/* memory is per-cpu */
-		while ((cpus > 0) && ((req_mem * cpus) > avail_mem))
-			cpus--;
-		if ((cpus < job_ptr->details->ntasks_per_node) ||
-		    ((job_ptr->details->cpus_per_task > 1) &&
-		     (cpus < job_ptr->details->cpus_per_task)))
-			cpus = 0;
-		/* FIXME: We need to recheck min_cores, gres, etc. here */
-	} else {
-		/* memory is per node */
-		if (req_mem > avail_mem)
-			cpus = 0;
-	}
 	if (cpus == 0)
 		bit_nclear(core_map, core_start_bit, core_end_bit);
 
@@ -1316,6 +1330,7 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	int       *switches_cpu_cnt;		/* total CPUs on switch */
 	int       *switches_node_cnt;		/* total nodes on switch */
 	int       *switches_required;		/* set if has required node */
+	int        leaf_switch_count = 0;   /* Count of leaf node switches used */
 
 	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
 	bitstr_t  *req_nodes_bitmap   = NULL;
@@ -1327,6 +1342,15 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	int best_fit_nodes, best_fit_cpus;
 	int best_fit_location = 0, best_fit_sufficient;
 	bool sufficient;
+	long time_waiting = 0;
+
+	if (job_ptr->req_switch) {
+		time_t     time_now;
+		time_now = time(NULL);
+		if (job_ptr->wait4switch_start == 0)
+			job_ptr->wait4switch_start = time_now;
+		time_waiting = time_now - job_ptr->wait4switch_start;
+	}
 
 	rem_cpus = job_ptr->details->min_cpus;
 	if (req_nodes > min_nodes)
@@ -1559,6 +1583,7 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	/* Select resources from these leafs on a best-fit basis */
 	/* Use required switches first to minimize the total amount */
 	/* of switches */
+	/* compute best-switch nodes available array */
 	while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
 		int *cpus_array = NULL, array_len;
 		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
@@ -1603,6 +1628,7 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 					best_fit_nodes = switches_node_cnt[j];
 					best_fit_location = j;
 					best_fit_sufficient = sufficient;
+					leaf_switch_count++;
 				}
 			}
 		}
@@ -1624,7 +1650,28 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 				cpus_array[j] = _get_cpu_cnt(job_ptr, i, 
 							     cpu_cnt);
 		}
-		
+
+		if (job_ptr->req_switch > 0) {
+			if (time_waiting > job_ptr->wait4switch) {
+				job_ptr->best_switch = true;
+				debug3("Job=%u Waited %ld sec for switches use=%d",
+					job_ptr->job_id, time_waiting,
+					leaf_switch_count);
+			} else if (leaf_switch_count>job_ptr->req_switch) {
+				/* Allocation is for more than requested number
+				 * of switches */
+				job_ptr->best_switch = false;
+				debug3("Job=%u waited %ld sec for switches=%u "
+					"found=%d wait %u",
+					job_ptr->job_id, time_waiting,
+					job_ptr->req_switch,
+					leaf_switch_count,
+					job_ptr->wait4switch);
+			} else {
+				job_ptr->best_switch = true;
+			}
+		}
+
 		/* accumulate resources from this leaf on a best-fit basis */
 		while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
 			
@@ -1987,6 +2034,12 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		if (select_debug_flags & DEBUG_FLAG_CPU_BIND)
 			info("cons_res: cr_job_test: test 0 pass: test_only");
 		return SLURM_SUCCESS;
+	} else if (!job_ptr->best_switch) {
+		FREE_NULL_BITMAP(orig_map);
+		FREE_NULL_BITMAP(free_cores);
+		FREE_NULL_BITMAP(avail_cores);
+		xfree(cpu_count);
+		return SLURM_ERROR;
 	}
 	if (cr_type == CR_MEMORY) {
 		/* CR_MEMORY does not care about existing CPU allocations,
diff --git a/src/plugins/select/cons_res/job_test.h b/src/plugins/select/cons_res/job_test.h
index a3ba1390c..38a28fe6b 100644
--- a/src/plugins/select/cons_res/job_test.h
+++ b/src/plugins/select/cons_res/job_test.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -43,8 +43,9 @@
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/list.h"
 #include "src/common/log.h"
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 8967c78a0..02413b03d 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -62,7 +62,7 @@
  *  from select/linear
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -168,7 +168,7 @@ bitstr_t *idle_node_bitmap;
 const char plugin_name[] = "Consumable Resources (CR) Node Selection plugin";
 const char plugin_type[] = "select/cons_res";
 const uint32_t plugin_id      = 101;
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = 100;
 const uint32_t pstate_version = 7;	/* version control on saved state */
 
 uint16_t cr_type = CR_CPU; /* cr_type is overwritten in init() */
@@ -192,16 +192,24 @@ struct select_nodeinfo {
 	uint16_t alloc_cpus;
 };
 
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size);
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(void);
 extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
 
 /* Procedure Declarations */
+static int _add_job_to_res(struct job_record *job_ptr, int action);
+static int _job_expand(struct job_record *from_job_ptr,
+		       struct job_record *to_job_ptr);
 static int _rm_job_from_one_node(struct job_record *job_ptr,
 				 struct node_record *node_ptr);
+static int _rm_job_from_res(struct part_res_record *part_record_ptr,
+			    struct node_use_record *node_usage,
+			    struct job_record *job_ptr, int action);
 static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    uint32_t min_nodes, uint32_t max_nodes,
 		    uint32_t req_nodes, uint16_t job_node_req,
 		    List preemptee_candidates, List *preemptee_job_list);
+static int _sort_usable_nodes_dec(struct job_record *job_a,
+				  struct job_record *job_b);
 static int _test_only(struct job_record *job_ptr, bitstr_t *bitmap,
 		      uint32_t min_nodes, uint32_t max_nodes,
  		      uint32_t req_nodes, uint16_t job_node_req);
@@ -597,7 +605,8 @@ extern void cr_sort_part_rows(struct part_res_record *p_ptr)
  *
  * IN/OUT: p_ptr   - the partition that has jobs to be optimized
  */
-static void _build_row_bitmaps(struct part_res_record *p_ptr)
+static void _build_row_bitmaps(struct part_res_record *p_ptr,
+			       struct job_record *job_ptr)
 {
 	uint32_t i, j, num_jobs, size;
 	int x, *jstart;
@@ -614,21 +623,13 @@ static void _build_row_bitmaps(struct part_res_record *p_ptr)
 				size = bit_size(this_row->row_bitmap);
 				bit_nclear(this_row->row_bitmap, 0, size-1);
 			}
-			return;
-		}
-
-		/* rebuild the row bitmap */
-		num_jobs = this_row->num_jobs;
-		tmpjobs = xmalloc(num_jobs * sizeof(struct job_resources *));
-		for (i = 0; i < num_jobs; i++) {
-			tmpjobs[i] = this_row->job_list[i];
-			this_row->job_list[i] = NULL;
-		}
-		this_row->num_jobs = 0; /* this resets the row_bitmap */
-		for (i = 0; i < num_jobs; i++) {
-			_add_job_to_row(tmpjobs[i], this_row);
+		} else {
+			xassert(job_ptr);
+			xassert(job_ptr->job_resrcs);
+			remove_job_from_cores(job_ptr->job_resrcs,
+					      &this_row->row_bitmap,
+					      cr_node_num_cores);
 		}
-		xfree(tmpjobs);
 		return;
 	}
 
@@ -924,8 +925,228 @@ static int _add_job_to_res(struct job_record *job_ptr, int action)
 	return SLURM_SUCCESS;
 }
 
+static job_resources_t *_create_job_resources(int node_cnt)
+{
+	job_resources_t *job_resrcs_ptr;
+
+	job_resrcs_ptr = create_job_resources();
+	job_resrcs_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->cpu_array_value = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->memory_allocated = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->memory_used = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->nhosts = node_cnt;
+	return job_resrcs_ptr;
+}
+
+/* Move all resources from one job to another */
+static int _job_expand(struct job_record *from_job_ptr,
+		       struct job_record *to_job_ptr)
+{
+	job_resources_t *from_job_resrcs_ptr, *to_job_resrcs_ptr,
+		        *new_job_resrcs_ptr;
+	struct node_record *node_ptr;
+	int first_bit, last_bit, i, node_cnt;
+	bool from_node_used, to_node_used;
+	int from_node_offset, to_node_offset, new_node_offset;
+	bitstr_t *tmp_bitmap, *tmp_bitmap2;
+
+	xassert(from_job_ptr);
+	xassert(to_job_ptr);
+	if (from_job_ptr->job_id == to_job_ptr->job_id) {
+		error("select/cons_res: attempt to merge job %u with self",
+		      from_job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+
+	from_job_resrcs_ptr = from_job_ptr->job_resrcs;
+	if ((from_job_resrcs_ptr == NULL) ||
+	    (from_job_resrcs_ptr->cpus == NULL) ||
+	    (from_job_resrcs_ptr->core_bitmap == NULL) ||
+	    (from_job_resrcs_ptr->node_bitmap == NULL)) {
+		error("select/cons_res: job %u lacks a job_resources struct",
+		      from_job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	to_job_resrcs_ptr = to_job_ptr->job_resrcs;
+	if ((to_job_resrcs_ptr == NULL) ||
+	    (to_job_resrcs_ptr->cpus == NULL) ||
+	    (to_job_resrcs_ptr->core_bitmap == NULL) ||
+	    (to_job_resrcs_ptr->node_bitmap == NULL)) {
+		error("select/cons_res: job %u lacks a job_resources struct",
+		      to_job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+
+	(void) _rm_job_from_res(select_part_record, select_node_usage,
+				from_job_ptr, 0);
+	(void) _rm_job_from_res(select_part_record, select_node_usage,
+				to_job_ptr, 0);
 
-/* deallocate resources to the given job
+	if (to_job_resrcs_ptr->core_bitmap_used) {
+		i = bit_size(to_job_resrcs_ptr->core_bitmap_used);
+		bit_nclear(to_job_resrcs_ptr->core_bitmap_used, 0, i-1);
+	}
+
+	tmp_bitmap = bit_copy(to_job_resrcs_ptr->node_bitmap);
+	if (!tmp_bitmap)
+		fatal("bit_copy: malloc failure");
+	bit_or(tmp_bitmap, from_job_resrcs_ptr->node_bitmap);
+	tmp_bitmap2 = bit_copy(to_job_ptr->node_bitmap);
+	if (!tmp_bitmap)
+		fatal("bit_copy: malloc failure");
+	bit_or(tmp_bitmap2, from_job_ptr->node_bitmap);
+	bit_and(tmp_bitmap, tmp_bitmap2);
+	bit_free(tmp_bitmap2);
+	node_cnt = bit_set_count(tmp_bitmap);
+	new_job_resrcs_ptr = _create_job_resources(node_cnt);
+	new_job_resrcs_ptr->ncpus = from_job_resrcs_ptr->ncpus +
+				    to_job_resrcs_ptr->ncpus;
+	new_job_resrcs_ptr->node_req = to_job_resrcs_ptr->node_req;
+	new_job_resrcs_ptr->node_bitmap = tmp_bitmap;
+	new_job_resrcs_ptr->nodes = bitmap2node_name(new_job_resrcs_ptr->
+						     node_bitmap);
+	build_job_resources(new_job_resrcs_ptr, node_record_table_ptr,
+			    select_fast_schedule);
+	xfree(to_job_ptr->node_addr);
+	to_job_ptr->node_addr = xmalloc(sizeof(slurm_addr_t) * node_cnt);
+	to_job_ptr->total_cpus = 0;
+
+	first_bit = MIN(bit_ffs(from_job_resrcs_ptr->node_bitmap),
+			bit_ffs(to_job_resrcs_ptr->node_bitmap));
+	last_bit =  MAX(bit_fls(from_job_resrcs_ptr->node_bitmap),
+			bit_fls(to_job_resrcs_ptr->node_bitmap));
+	from_node_offset = to_node_offset = new_node_offset = -1;
+	for (i = first_bit; i <= last_bit; i++) {
+		from_node_used = to_node_used = false;
+		if (bit_test(from_job_resrcs_ptr->node_bitmap, i)) {
+			from_node_used = bit_test(from_job_ptr->node_bitmap,i);
+			from_node_offset++;
+		}
+		if (bit_test(to_job_resrcs_ptr->node_bitmap, i)) {
+			to_node_used = bit_test(to_job_ptr->node_bitmap, i);
+			to_node_offset++;
+		}
+		if (!from_node_used && !to_node_used)
+			continue;
+		new_node_offset++;
+		node_ptr = node_record_table_ptr + i;
+		memcpy(&to_job_ptr->node_addr[new_node_offset],
+                       &node_ptr->slurm_addr, sizeof(slurm_addr_t));
+		if (from_node_used) {
+			/* Merge alloc info from both "from" and "to" jobs,
+			 * leave "from" job with no allocated CPUs or memory */
+			new_job_resrcs_ptr->cpus[new_node_offset] =
+				from_job_resrcs_ptr->cpus[from_node_offset];
+			from_job_resrcs_ptr->cpus[from_node_offset] = 0;
+			/* new_job_resrcs_ptr->cpus_used[new_node_offset] =
+				from_job_resrcs_ptr->
+				cpus_used[from_node_offset]; Should be 0 */
+			new_job_resrcs_ptr->memory_allocated[new_node_offset] =
+				from_job_resrcs_ptr->
+				memory_allocated[from_node_offset];
+			/* new_job_resrcs_ptr->memory_used[new_node_offset] =
+				from_job_resrcs_ptr->
+				memory_used[from_node_offset]; Should be 0 */
+			job_resources_bits_copy(new_job_resrcs_ptr,
+						new_node_offset,
+						from_job_resrcs_ptr,
+						from_node_offset);
+		}
+		if (to_node_used) {
+			/* Merge alloc info from both "from" and "to" jobs */
+
+			/* DO NOT double count the allocated CPUs in partition
+			 * with Shared nodes */
+			new_job_resrcs_ptr->cpus[new_node_offset] +=
+				to_job_resrcs_ptr->cpus[to_node_offset];
+			new_job_resrcs_ptr->cpus_used[new_node_offset] +=
+				to_job_resrcs_ptr->cpus_used[to_node_offset];
+			new_job_resrcs_ptr->memory_allocated[new_node_offset]+=
+				to_job_resrcs_ptr->
+				memory_allocated[to_node_offset];
+			new_job_resrcs_ptr->memory_used[new_node_offset] +=
+				to_job_resrcs_ptr->memory_used[to_node_offset];
+			job_resources_bits_copy(new_job_resrcs_ptr,
+						new_node_offset,
+						to_job_resrcs_ptr,
+						to_node_offset);
+			if (from_node_used) {
+				/* Adust cpu count for shared CPUs */
+				int from_core_cnt, to_core_cnt, new_core_cnt;
+				from_core_cnt = count_job_resources_node(
+							from_job_resrcs_ptr,
+							from_node_offset);
+				to_core_cnt = count_job_resources_node(
+							to_job_resrcs_ptr,
+							to_node_offset);
+				new_core_cnt = count_job_resources_node(
+							new_job_resrcs_ptr,
+							new_node_offset);
+				if ((from_core_cnt + to_core_cnt) !=
+				    new_core_cnt) {
+					new_job_resrcs_ptr->
+						cpus[new_node_offset] *=
+						new_core_cnt;
+					new_job_resrcs_ptr->
+						cpus[new_node_offset] /=
+						(from_core_cnt + to_core_cnt);
+				}
+			}
+		}
+
+		to_job_ptr->total_cpus += new_job_resrcs_ptr->
+					  cpus[new_node_offset];
+	}
+	build_job_resources_cpu_array(new_job_resrcs_ptr);
+	gres_plugin_job_merge(from_job_ptr->gres_list,
+			      from_job_resrcs_ptr->node_bitmap,
+			      to_job_ptr->gres_list,
+			      to_job_resrcs_ptr->node_bitmap);
+
+	/* Now swap data: "new" -> "to" and clear "from" */
+	free_job_resources(&to_job_ptr->job_resrcs);
+	to_job_ptr->job_resrcs = new_job_resrcs_ptr;
+
+	to_job_ptr->cpu_cnt = to_job_ptr->total_cpus;
+	if (to_job_ptr->details) {
+		to_job_ptr->details->min_cpus = to_job_ptr->total_cpus;
+		to_job_ptr->details->max_cpus = to_job_ptr->total_cpus;
+	}
+	from_job_ptr->total_cpus   = 0;
+	from_job_resrcs_ptr->ncpus = 0;
+	if (from_job_ptr->details) {
+		from_job_ptr->details->min_cpus = 0;
+		from_job_ptr->details->max_cpus = 0;
+	}
+
+	from_job_ptr->total_nodes   = 0;
+	from_job_resrcs_ptr->nhosts = 0;
+	from_job_ptr->node_cnt      = 0;
+	if (from_job_ptr->details)
+		from_job_ptr->details->min_nodes = 0;
+	to_job_ptr->total_nodes     = new_job_resrcs_ptr->nhosts;
+	to_job_ptr->node_cnt        = new_job_resrcs_ptr->nhosts;
+
+	bit_or(to_job_ptr->node_bitmap, from_job_ptr->node_bitmap);
+	bit_nclear(from_job_ptr->node_bitmap, 0, (node_record_count - 1));
+	bit_nclear(from_job_resrcs_ptr->node_bitmap, 0,
+		   (node_record_count - 1));
+
+	xfree(to_job_ptr->nodes);
+	to_job_ptr->nodes = xstrdup(new_job_resrcs_ptr->nodes);
+	xfree(from_job_ptr->nodes);
+	from_job_ptr->nodes = xstrdup("");
+	xfree(from_job_resrcs_ptr->nodes);
+	from_job_resrcs_ptr->nodes = xstrdup("");
+
+	(void) _add_job_to_res(to_job_ptr, 0);
+
+	return SLURM_SUCCESS;
+}
+
+/* deallocate resources previously allocated to the given job
  * - subtract 'struct job_resources' resources from 'struct part_res_record'
  * - subtract job's memory requirements from 'struct node_res_record'
  *
@@ -960,7 +1181,10 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 		_dump_job_res(job);
 
 	first_bit = bit_ffs(job->node_bitmap);
-	last_bit =  bit_fls(job->node_bitmap);
+	if (first_bit == -1)
+		last_bit = -2;
+	else
+		last_bit =  bit_fls(job->node_bitmap);
 	for (i = first_bit, n = -1; i <= last_bit; i++) {
 		if (!bit_test(job->node_bitmap, i))
 			continue;
@@ -984,7 +1208,7 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 			if (node_usage[i].alloc_memory <
 			    job->memory_allocated[n]) {
 				error("cons_res: node %s memory is "
-				      "underallocated (%u-%u) for job %u",
+				      "under-allocated (%u-%u) for job %u",
 				      node_ptr->name,
 				      node_usage[i].alloc_memory,
 				      job->memory_allocated[n],
@@ -1048,7 +1272,7 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 
 		if (n) {
 			/* job was found and removed, so refresh the bitmaps */
-			_build_row_bitmaps(p_ptr);
+			_build_row_bitmaps(p_ptr, job_ptr);
 
 			/* Adjust the node_state of all nodes affected by
 			 * the removal of this job. If all cores are now
@@ -1089,7 +1313,8 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 	List gres_list;
 
 	if (!job || !job->core_bitmap) {
-		error("job %u has no select data", job_ptr->job_id);
+		error("select/cons_res: job %u has no select data",
+		      job_ptr->job_id);
 		return SLURM_ERROR;
 	}
 
@@ -1184,7 +1409,7 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 
 
 	/* job was found and removed from core-bitmap, so refresh CR bitmaps */
-	_build_row_bitmaps(p_ptr);
+	_build_row_bitmaps(p_ptr, job_ptr);
 
 	/* Adjust the node_state of the node removed from this job.
 	 * If all cores are now available, set node_state = NODE_CR_AVAILABLE */
@@ -1272,6 +1497,21 @@ static int _test_only(struct job_record *job_ptr, bitstr_t *bitmap,
 	return rc;
 }
 
+/*
+ * Sort the usable_node element to put jobs in the correct
+ * preemption order.
+ */
+static int _sort_usable_nodes_dec(struct job_record *job_a,
+				  struct job_record *job_b)
+{
+	if (job_a->details->usable_nodes > job_b->details->usable_nodes)
+		return -1;
+	else if (job_a->details->usable_nodes < job_b->details->usable_nodes)
+		return 1;
+
+	return 0;
+}
+
 /* Allocate resources for a job now, if possible */
 static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    uint32_t min_nodes, uint32_t max_nodes,
@@ -1279,15 +1519,17 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    List preemptee_candidates, List *preemptee_job_list)
 {
 	int rc;
-	bitstr_t *orig_map;
+	bitstr_t *orig_map = NULL, *save_bitmap;
 	struct job_record *tmp_job_ptr;
 	ListIterator job_iterator, preemptee_iterator;
 	struct part_res_record *future_part;
 	struct node_use_record *future_usage;
 	bool remove_some_jobs = false;
+	uint16_t pass_count = 0;
 	uint16_t mode;
 
-	orig_map = bit_copy(bitmap);
+	save_bitmap = bit_copy(bitmap);
+top:	orig_map = bit_copy(save_bitmap);
 	if (!orig_map)
 		fatal("bit_copy: malloc failure");
 
@@ -1301,16 +1543,18 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		future_part = _dup_part_data(select_part_record);
 		if (future_part == NULL) {
 			FREE_NULL_BITMAP(orig_map);
+			FREE_NULL_BITMAP(save_bitmap);
 			return SLURM_ERROR;
 		}
 		future_usage = _dup_node_usage(select_node_usage);
 		if (future_usage == NULL) {
 			_destroy_part_data(future_part);
 			FREE_NULL_BITMAP(orig_map);
+			FREE_NULL_BITMAP(save_bitmap);
 			return SLURM_ERROR;
 		}
 
-		job_iterator = list_iterator_create(job_list);
+		job_iterator = list_iterator_create(preemptee_candidates);
 		if (job_iterator == NULL)
 			fatal ("memory allocation failure");
 		while ((tmp_job_ptr = (struct job_record *)
@@ -1323,20 +1567,37 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 			    (mode != PREEMPT_MODE_CHECKPOINT) &&
 			    (mode != PREEMPT_MODE_CANCEL))
 				continue;	/* can't remove job */
-			if (_is_preemptable(tmp_job_ptr,
-					    preemptee_candidates)) {
-				/* Remove preemptable job now */
-				_rm_job_from_res(future_part, future_usage,
-						 tmp_job_ptr, 0);
-				bit_or(bitmap, orig_map);
-				rc = cr_job_test(job_ptr, bitmap, min_nodes,
-						 max_nodes, req_nodes,
-						 SELECT_MODE_WILL_RUN,
-						 cr_type, job_node_req,
-						 select_node_cnt,
-						 future_part, future_usage);
-				if (rc == SLURM_SUCCESS)
+			/* Remove preemptable job now */
+			_rm_job_from_res(future_part, future_usage,
+					 tmp_job_ptr, 0);
+			bit_or(bitmap, orig_map);
+			rc = cr_job_test(job_ptr, bitmap, min_nodes,
+					 max_nodes, req_nodes,
+					 SELECT_MODE_WILL_RUN,
+					 cr_type, job_node_req,
+					 select_node_cnt,
+					 future_part, future_usage);
+			tmp_job_ptr->details->usable_nodes =
+				 bit_overlap(bitmap, tmp_job_ptr->node_bitmap);
+			/*
+			 * If successful, set the last job's usable count to a
+			 * large value so that it will be first after sorting.
+			 * Note: usable_count is only used for sorting purposes
+			 */
+			if (rc == SLURM_SUCCESS) {
+				if (pass_count++ ||
+				    (list_count(preemptee_candidates) == 1))
 					break;
+				tmp_job_ptr->details->usable_nodes = 9999;
+				while ((tmp_job_ptr = (struct job_record *)
+					list_next(job_iterator))) {
+					tmp_job_ptr->details->usable_nodes = 0;
+				}
+				list_sort(preemptee_candidates,
+					  (ListCmpF)_sort_usable_nodes_dec);
+				FREE_NULL_BITMAP(orig_map);
+				list_iterator_destroy(job_iterator);
+				goto top;
 			}
 		}
 		list_iterator_destroy(job_iterator);
@@ -1364,6 +1625,8 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 				if (bit_overlap(bitmap,
 						tmp_job_ptr->node_bitmap) == 0)
 					continue;
+				if (tmp_job_ptr->details->usable_nodes == 0)
+					continue;
 				list_append(*preemptee_job_list,
 					    tmp_job_ptr);
 				remove_some_jobs = true;
@@ -1379,6 +1642,7 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		_destroy_node_data(future_usage, NULL);
 	}
 	FREE_NULL_BITMAP(orig_map);
+	FREE_NULL_BITMAP(save_bitmap);
 
 	return rc;
 }
@@ -1535,97 +1799,6 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	return rc;
 }
 
-/* Helper function for _synchronize_bitmap().  Check
- * if the given node has at least one available CPU */
-static bool _is_node_avail(struct part_res_record *p_ptr, uint32_t node_i)
-{
-	uint32_t i, r, cpu_begin, cpu_end;
-
-	cpu_begin = cr_get_coremap_offset(node_i);
-	cpu_end   = cr_get_coremap_offset(node_i+1);
-
-	if (select_node_usage[node_i].node_state >= NODE_CR_RESERVED)
-		return false;
-
-	if (select_node_usage[node_i].node_state >= NODE_CR_ONE_ROW) {
-		/* An existing job has requested that it's CPUs
-		 * NOT be shared, but any other CPUs on the same
-		 * node can be used by other jobs with the same
-		 * CPU restriction.
-		 * Check whether or not there are free CPUs on this
-		 * node in the given partition.
-		 */
-		if (!p_ptr->row || !p_ptr->row[0].row_bitmap)
-			return true;
-		for (i = cpu_begin; i < cpu_end; i++) {
-			if (!bit_test(p_ptr->row[0].row_bitmap, i))
-				return true;
-		}
-	} else {
-		/* check the core_bitmap in all rows */
-		if (!p_ptr->row)
-			return true;
-		for (r = 0; r < p_ptr->num_rows; r++) {
-			if (!p_ptr->row[r].row_bitmap)
-				return true;
-			for (i = cpu_begin; i < cpu_end; i++) {
-				if (!bit_test(p_ptr->row[r].row_bitmap, i))
-					return true;
-			}
-		}
-	}
-	return false;
-}
-
-
-/* Worker function for select_p_get_info_from_plugin() */
-static int _synchronize_bitmaps(struct job_record *job_ptr,
-				bitstr_t ** partially_idle_bitmap)
-{
-	int size, i;
-	struct part_res_record *p_ptr;
-	size = bit_size(avail_node_bitmap);
-	bitstr_t *bitmap = bit_alloc(size);
-
-	if (bitmap == NULL)
-		return SLURM_ERROR;
-
-	debug3("cons_res: synch_bm: avail %d of %d set, idle %d of %d set",
-	       bit_set_count(avail_node_bitmap), size,
-	       bit_set_count(idle_node_bitmap), size);
-
-	if (!job_ptr)
-		fatal("cons_res: error: don't know what job I'm sync'ing");
-
-	for (p_ptr = select_part_record; p_ptr; p_ptr = p_ptr->next) {
-		if (p_ptr->part_ptr == job_ptr->part_ptr)
-			break;
-	}
-
-	for (i = 0; i < select_node_cnt; i++) {
-		if (!bit_test(avail_node_bitmap, i))
-			continue;
-
-		if (bit_test(idle_node_bitmap, i)) {
-			bit_set(bitmap, i);
-			continue;
-		}
-
-		if (!p_ptr || _is_node_avail(p_ptr, i))
-			bit_set(bitmap, i);
-	}
-	if (p_ptr) {
-		debug3("cons_res: found %d partially idle nodes in part %s",
-		       bit_set_count(bitmap), p_ptr->part_ptr->name);
-	} else {
-		debug3("cons_res: found %d partially idle nodes",
-		       bit_set_count(bitmap));
-	}
-
-	*partially_idle_bitmap = bitmap;
-	return SLURM_SUCCESS;
-}
-
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -1687,6 +1860,12 @@ extern int select_p_job_init(List job_list)
 	return SLURM_SUCCESS;
 }
 
+/* This plugin does not generate a node ranking. */
+extern bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)
+{
+	return false;
+}
+
 /* This is Part 1 of a 4-part procedure which can be found in
  * src/slurmctld/read_config.c. The whole story goes like this:
  *
@@ -1704,6 +1883,10 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 	int i, tot_core;
 
 	info("cons_res: select_p_node_init");
+	if ((cr_type & (CR_CPU | CR_SOCKET | CR_CORE)) == 0) {
+		fatal("Invalid SelectTypeParameter: %s",
+		      sched_param_type_string(cr_type));
+	}
 	if (node_ptr == NULL) {
 		error("select_p_node_init: node_ptr == NULL");
 		return SLURM_ERROR;
@@ -1896,6 +2079,30 @@ extern int select_p_job_resized(struct job_record *job_ptr,
 	return SLURM_SUCCESS;
 }
 
+extern bool select_p_job_expand_allow(void)
+{
+	return true;
+}
+
+extern int select_p_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr)
+{
+	xassert(from_job_ptr);
+	xassert(from_job_ptr->magic == JOB_MAGIC);
+	xassert(to_job_ptr);
+	xassert(to_job_ptr->magic == JOB_MAGIC);
+
+	return _job_expand(from_job_ptr, to_job_ptr);
+}
+
+extern int select_p_job_signal(struct job_record *job_ptr, int signal)
+{
+	xassert(job_ptr);
+	xassert(job_ptr->magic == JOB_MAGIC);
+
+	return SLURM_SUCCESS;
+}
+
 extern int select_p_job_fini(struct job_record *job_ptr)
 {
 	xassert(job_ptr);
@@ -1909,23 +2116,41 @@ extern int select_p_job_fini(struct job_record *job_ptr)
 /* NOTE: This function is not called with gang scheduling because it
  * needs to track how many jobs are running or suspended on each node.
  * This sum is compared with the partition's Shared parameter */
-extern int select_p_job_suspend(struct job_record *job_ptr)
+extern int select_p_job_suspend(struct job_record *job_ptr, bool indf_susp)
 {
 	xassert(job_ptr);
 
+	if (!indf_susp)
+		return SLURM_SUCCESS;
+
 	return _rm_job_from_res(select_part_record, select_node_usage,
 				job_ptr, 2);
 }
 
 /* See NOTE with select_p_job_suspend above */
-extern int select_p_job_resume(struct job_record *job_ptr)
+extern int select_p_job_resume(struct job_record *job_ptr, bool indf_susp)
 {
 	xassert(job_ptr);
 
+	if (!indf_susp)
+		return SLURM_SUCCESS;
+
 	return _add_job_to_res(job_ptr, 2);
 }
 
 
+extern bitstr_t *select_p_step_pick_nodes(struct job_record *job_ptr,
+					  select_jobinfo_t *jobinfo,
+					  uint32_t node_count)
+{
+	return NULL;
+}
+
+extern int select_p_step_finish(struct step_record *step_ptr)
+{
+	return SLURM_SUCCESS;
+}
+
 extern int select_p_pack_select_info(time_t last_query_time,
 				     uint16_t show_flags, Buf *buffer_ptr,
 				     uint16_t protocol_version)
@@ -1949,7 +2174,7 @@ extern int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
 {
 	select_nodeinfo_t *nodeinfo_ptr = NULL;
 
-	nodeinfo_ptr = select_p_select_nodeinfo_alloc(NO_VAL);
+	nodeinfo_ptr = select_p_select_nodeinfo_alloc();
 	*nodeinfo = nodeinfo_ptr;
 
 	safe_unpack16(&nodeinfo_ptr->alloc_cpus, buffer);
@@ -1964,7 +2189,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size)
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(void)
 {
 	select_nodeinfo_t *nodeinfo = xmalloc(sizeof(struct select_nodeinfo));
 
@@ -2193,23 +2418,10 @@ extern int select_p_get_info_from_plugin(enum select_plugindata_info info,
 					 void *data)
 {
 	int rc = SLURM_SUCCESS;
-	bitstr_t **bitmap = (bitstr_t **) data;
 	uint32_t *tmp_32 = (uint32_t *) data;
-	bitstr_t *tmp_bitmap = NULL;
 	List *tmp_list = (List *) data;
 
 	switch (info) {
-	case SELECT_BITMAP:
-		rc = _synchronize_bitmaps(job_ptr, &tmp_bitmap);
-		if (rc != SLURM_SUCCESS) {
-			FREE_NULL_BITMAP(tmp_bitmap);
-			return rc;
-		}
-		*bitmap = tmp_bitmap;	/* Ownership transfer,
-					 * Remember to free bitmap
-					 * using FREE_NULL_BITMAP(bitmap);*/
-		tmp_bitmap = 0;
-		break;
 	case SELECT_CR_PLUGIN:
 		*tmp_32 = 1;
 		break;
@@ -2243,7 +2455,7 @@ extern int select_p_update_node_config (int index)
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_update_node_state (int index, uint16_t state)
+extern int select_p_update_node_state(struct node_record *node_ptr)
 {
 	return SLURM_SUCCESS;
 }
@@ -2288,3 +2500,166 @@ extern int select_p_reconfigure(void)
 
 	return SLURM_SUCCESS;
 }
+
+/*
+ * select_p_resv_test - Identify the nodes which "best" satisfy a reservation
+ *	request. "best" is defined as either single set of consecutive nodes
+ *	satisfying the request and leaving the minimum number of unused nodes
+ *	OR the fewest number of consecutive node sets
+ * IN avail_bitmap - nodes available for the reservation
+ * IN node_cnt - count of required nodes
+ * RET - nodes selected for use by the reservation
+ */
+extern bitstr_t * select_p_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+	bitstr_t **switches_bitmap;		/* nodes on this switch */
+	int       *switches_cpu_cnt;		/* total CPUs on switch */
+	int       *switches_node_cnt;		/* total nodes on switch */
+	int       *switches_required;		/* set if has required node */
+
+	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
+	int rem_nodes;			/* remaining resources desired */
+	int i, j;
+	int best_fit_inx, first, last;
+	int best_fit_nodes;
+	int best_fit_location = 0, best_fit_sufficient;
+	bool sufficient;
+
+	xassert(avail_bitmap);
+	if (!switch_record_cnt || !switch_record_table)
+		return bit_pick_cnt(avail_bitmap, node_cnt);
+
+	/* Use topology state information */
+	if (bit_set_count(avail_bitmap) < node_cnt)
+		return avail_nodes_bitmap;
+	rem_nodes = node_cnt;
+
+	/* Construct a set of switch array entries,
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
+	for (i=0; i<switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], avail_bitmap);
+		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+	}
+
+#if SELECT_DEBUG
+	/* Don't compile this, it slows things down too much */
+	for (i=0; i<switch_record_cnt; i++) {
+		char *node_names = NULL;
+		if (switches_node_cnt[i])
+			node_names = bitmap2node_name(switches_bitmap[i]);
+		debug("switch=%s nodes=%u:%s required:%u speed=%u",
+		      switch_record_table[i].name,
+		      switches_node_cnt[i], node_names,
+		      switches_required[i],
+		      switch_record_table[i].link_speed);
+		xfree(node_names);
+	}
+#endif
+
+	/* Determine lowest level switch satifying request with best fit */
+	best_fit_inx = -1;
+	for (j=0; j<switch_record_cnt; j++) {
+		if (switches_node_cnt[j] < rem_nodes)
+			continue;
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx])))
+			best_fit_inx = j;
+	}
+	if (best_fit_inx == -1) {
+		debug("select_p_resv_test: could not find resources for "
+		      "reservation");
+		goto fini;
+	}
+
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switch_record_table[j].level != 0) ||
+		    (!bit_super_set(switches_bitmap[j],
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		}
+	}
+
+	/* Select resources from these leafs on a best-fit basis */
+	avail_nodes_bitmap = bit_alloc(node_record_count);
+	while (rem_nodes > 0) {
+		best_fit_nodes = best_fit_sufficient = 0;
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			sufficient = (switches_node_cnt[j] >= rem_nodes);
+			/* If first possibility OR */
+			/* first set large enough for request OR */
+			/* tightest fit (less resource waste) OR */
+			/* nothing yet large enough, but this is biggest */
+			if ((best_fit_nodes == 0) ||
+			    (sufficient && (best_fit_sufficient == 0)) ||
+			    (sufficient &&
+			     (switches_node_cnt[j] < best_fit_nodes)) ||
+			    ((sufficient == 0) &&
+			     (switches_node_cnt[j] > best_fit_nodes))) {
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+				best_fit_sufficient = sufficient;
+			}
+		}
+		if (best_fit_nodes == 0)
+			break;
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(switches_bitmap[best_fit_location], i))
+				continue;
+
+			bit_clear(switches_bitmap[best_fit_location], i);
+			switches_node_cnt[best_fit_location]--;
+
+			if (bit_test(avail_nodes_bitmap, i)) {
+				/* node on multiple leaf switches
+				 * and already selected */
+				continue;
+			}
+
+			bit_set(avail_nodes_bitmap, i);
+			if (--rem_nodes <= 0)
+				break;
+		}
+		switches_node_cnt[best_fit_location] = 0;
+	}
+	if (rem_nodes > 0)	/* insufficient resources */
+		FREE_NULL_BITMAP(avail_nodes_bitmap);
+
+fini:	for (i=0; i<switch_record_cnt; i++)
+		FREE_NULL_BITMAP(switches_bitmap[i]);
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_required);
+
+	return avail_nodes_bitmap;
+}
+
+extern void select_p_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	return;
+}
+extern void select_p_ba_fini(void)
+{
+	return;
+}
+
+extern int *select_p_ba_get_dims(void)
+{
+	return NULL;
+}
diff --git a/src/plugins/select/cons_res/select_cons_res.h b/src/plugins/select/cons_res/select_cons_res.h
index 19326e5d4..7efd0ac4c 100644
--- a/src/plugins/select/cons_res/select_cons_res.h
+++ b/src/plugins/select/cons_res/select_cons_res.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,8 +44,9 @@
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/gres.h"
diff --git a/src/plugins/select/cray/Makefile.am b/src/plugins/select/cray/Makefile.am
index 89731109b..88ecbf9ee 100644
--- a/src/plugins/select/cray/Makefile.am
+++ b/src/plugins/select/cray/Makefile.am
@@ -2,13 +2,39 @@
 
 AUTOMAKE_OPTIONS = foreign
 
+CPPFLAGS = -DCRAY_CONFIG_FILE=\"$(sysconfdir)/cray.conf\"
+
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I.
 
 pkglib_LTLIBRARIES = select_cray.la
 
 # Cray node selection plugin.
-select_cray_la_SOURCES = select_cray.c other_select.c other_select.h
+select_cray_la_SOURCES = 	\
+	select_cray.c		\
+	basil_interface.h	\
+	nodespec.c		\
+	other_select.c 		\
+	other_select.h		\
+	parser_common.h		\
+	cray_config.c
 select_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 
+if HAVE_CRAY
+select_cray_la_SOURCES += basil_interface.c
+select_cray_la_CFLAGS   = $(MYSQL_CFLAGS)
+
+if HAVE_CRAY_EMULATION
+SUBDIRS = libemulate
+select_cray_la_LIBADD   = libemulate/libalps.la
+else
+SUBDIRS = libalps
+select_cray_la_LIBADD   = libalps/libalps.la
+endif
+
+force:
+$(select_cray_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+endif
diff --git a/src/plugins/select/cray/Makefile.in b/src/plugins/select/cray/Makefile.in
index 2e9bfa2e7..46716d11e 100644
--- a/src/plugins/select/cray/Makefile.in
+++ b/src/plugins/select/cray/Makefile.in
@@ -37,6 +37,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+@HAVE_CRAY_TRUE@am__append_1 = basil_interface.c
 subdir = src/plugins/select/cray
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -63,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -104,12 +107,20 @@ am__base_list = \
   sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
 am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
-select_cray_la_LIBADD =
-am_select_cray_la_OBJECTS = select_cray.lo other_select.lo
+@HAVE_CRAY_EMULATION_FALSE@@HAVE_CRAY_TRUE@select_cray_la_DEPENDENCIES =  \
+@HAVE_CRAY_EMULATION_FALSE@@HAVE_CRAY_TRUE@	libalps/libalps.la
+@HAVE_CRAY_EMULATION_TRUE@@HAVE_CRAY_TRUE@select_cray_la_DEPENDENCIES = libemulate/libalps.la
+am__select_cray_la_SOURCES_DIST = select_cray.c basil_interface.h \
+	nodespec.c other_select.c other_select.h parser_common.h \
+	cray_config.c basil_interface.c
+@HAVE_CRAY_TRUE@am__objects_1 = select_cray_la-basil_interface.lo
+am_select_cray_la_OBJECTS = select_cray_la-select_cray.lo \
+	select_cray_la-nodespec.lo select_cray_la-other_select.lo \
+	select_cray_la-cray_config.lo $(am__objects_1)
 select_cray_la_OBJECTS = $(am_select_cray_la_OBJECTS)
 select_cray_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(select_cray_la_LDFLAGS) $(LDFLAGS) -o $@
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(select_cray_la_CFLAGS) \
+	$(CFLAGS) $(select_cray_la_LDFLAGS) $(LDFLAGS) -o $@
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -124,10 +135,48 @@ LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
 SOURCES = $(select_cray_la_SOURCES)
-DIST_SOURCES = $(select_cray_la_SOURCES)
+DIST_SOURCES = $(am__select_cray_la_SOURCES_DIST)
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+	html-recursive info-recursive install-data-recursive \
+	install-dvi-recursive install-exec-recursive \
+	install-html-recursive install-info-recursive \
+	install-pdf-recursive install-ps-recursive install-recursive \
+	installcheck-recursive installdirs-recursive pdf-recursive \
+	ps-recursive uninstall-recursive
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
+	$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+	distdir
 ETAGS = etags
 CTAGS = ctags
+DIST_SUBDIRS = libalps libemulate
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
 ACLOCAL = @ACLOCAL@
 AMTAR = @AMTAR@
 AR = @AR@
@@ -138,7 +187,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -149,7 +201,7 @@ CCDEPMODE = @CCDEPMODE@
 CFLAGS = @CFLAGS@
 CMD_LDFLAGS = @CMD_LDFLAGS@
 CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
+CPPFLAGS = -DCRAY_CONFIG_FILE=\"$(sysconfdir)/cray.conf\"
 CXX = @CXX@
 CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
@@ -175,6 +227,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +285,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +321,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -321,13 +376,20 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I.
 pkglib_LTLIBRARIES = select_cray.la
 
 # Cray node selection plugin.
-select_cray_la_SOURCES = select_cray.c other_select.c other_select.h
+select_cray_la_SOURCES = select_cray.c basil_interface.h nodespec.c \
+	other_select.c other_select.h parser_common.h cray_config.c \
+	$(am__append_1)
 select_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-all: all-am
+@HAVE_CRAY_TRUE@select_cray_la_CFLAGS = $(MYSQL_CFLAGS)
+@HAVE_CRAY_EMULATION_FALSE@@HAVE_CRAY_TRUE@SUBDIRS = libalps
+@HAVE_CRAY_EMULATION_TRUE@@HAVE_CRAY_TRUE@SUBDIRS = libemulate
+@HAVE_CRAY_EMULATION_FALSE@@HAVE_CRAY_TRUE@select_cray_la_LIBADD = libalps/libalps.la
+@HAVE_CRAY_EMULATION_TRUE@@HAVE_CRAY_TRUE@select_cray_la_LIBADD = libemulate/libalps.la
+all: all-recursive
 
 .SUFFIXES:
 .SUFFIXES: .c .lo .o .obj
@@ -401,8 +463,11 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/other_select.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray_la-basil_interface.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray_la-cray_config.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray_la-nodespec.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray_la-other_select.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cray_la-select_cray.Plo@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -425,12 +490,117 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
+select_cray_la-select_cray.lo: select_cray.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -MT select_cray_la-select_cray.lo -MD -MP -MF $(DEPDIR)/select_cray_la-select_cray.Tpo -c -o select_cray_la-select_cray.lo `test -f 'select_cray.c' || echo '$(srcdir)/'`select_cray.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/select_cray_la-select_cray.Tpo $(DEPDIR)/select_cray_la-select_cray.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='select_cray.c' object='select_cray_la-select_cray.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -c -o select_cray_la-select_cray.lo `test -f 'select_cray.c' || echo '$(srcdir)/'`select_cray.c
+
+select_cray_la-nodespec.lo: nodespec.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -MT select_cray_la-nodespec.lo -MD -MP -MF $(DEPDIR)/select_cray_la-nodespec.Tpo -c -o select_cray_la-nodespec.lo `test -f 'nodespec.c' || echo '$(srcdir)/'`nodespec.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/select_cray_la-nodespec.Tpo $(DEPDIR)/select_cray_la-nodespec.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='nodespec.c' object='select_cray_la-nodespec.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -c -o select_cray_la-nodespec.lo `test -f 'nodespec.c' || echo '$(srcdir)/'`nodespec.c
+
+select_cray_la-other_select.lo: other_select.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -MT select_cray_la-other_select.lo -MD -MP -MF $(DEPDIR)/select_cray_la-other_select.Tpo -c -o select_cray_la-other_select.lo `test -f 'other_select.c' || echo '$(srcdir)/'`other_select.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/select_cray_la-other_select.Tpo $(DEPDIR)/select_cray_la-other_select.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='other_select.c' object='select_cray_la-other_select.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -c -o select_cray_la-other_select.lo `test -f 'other_select.c' || echo '$(srcdir)/'`other_select.c
+
+select_cray_la-cray_config.lo: cray_config.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -MT select_cray_la-cray_config.lo -MD -MP -MF $(DEPDIR)/select_cray_la-cray_config.Tpo -c -o select_cray_la-cray_config.lo `test -f 'cray_config.c' || echo '$(srcdir)/'`cray_config.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/select_cray_la-cray_config.Tpo $(DEPDIR)/select_cray_la-cray_config.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='cray_config.c' object='select_cray_la-cray_config.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -c -o select_cray_la-cray_config.lo `test -f 'cray_config.c' || echo '$(srcdir)/'`cray_config.c
+
+select_cray_la-basil_interface.lo: basil_interface.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -MT select_cray_la-basil_interface.lo -MD -MP -MF $(DEPDIR)/select_cray_la-basil_interface.Tpo -c -o select_cray_la-basil_interface.lo `test -f 'basil_interface.c' || echo '$(srcdir)/'`basil_interface.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/select_cray_la-basil_interface.Tpo $(DEPDIR)/select_cray_la-basil_interface.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='basil_interface.c' object='select_cray_la-basil_interface.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(select_cray_la_CFLAGS) $(CFLAGS) -c -o select_cray_la-basil_interface.lo `test -f 'basil_interface.c' || echo '$(srcdir)/'`basil_interface.c
+
 mostlyclean-libtool:
 	-rm -f *.lo
 
 clean-libtool:
 	-rm -rf .libs _libs
 
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+#     (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+	@fail= failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+	@fail= failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	rev=''; for subdir in $$list; do \
+	  if test "$$subdir" = "."; then :; else \
+	    rev="$$subdir $$rev"; \
+	  fi; \
+	done; \
+	rev="$$rev ."; \
+	target=`echo $@ | sed s/-recursive//`; \
+	for subdir in $$rev; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done && test -z "$$fail"
+tags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+	done
+ctags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+	done
+
 ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
 	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
 	unique=`for i in $$list; do \
@@ -441,10 +611,23 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
 	mkid -fID $$unique
 tags: TAGS
 
-TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+TAGS: tags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
 		$(TAGS_FILES) $(LISP)
 	set x; \
 	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
 	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
 	unique=`for i in $$list; do \
 	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
@@ -463,7 +646,7 @@ TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
 	  fi; \
 	fi
 ctags: CTAGS
-CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
 		$(TAGS_FILES) $(LISP)
 	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
 	unique=`for i in $$list; do \
@@ -513,22 +696,51 @@ distdir: $(DISTFILES)
 	    || exit 1; \
 	  fi; \
 	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test -d "$(distdir)/$$subdir" \
+	    || $(MKDIR_P) "$(distdir)/$$subdir" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
 check-am: all-am
-check: check-am
+check: check-recursive
 all-am: Makefile $(LTLIBRARIES)
-installdirs:
+installdirs: installdirs-recursive
+installdirs-am:
 	for dir in "$(DESTDIR)$(pkglibdir)"; do \
 	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
 	done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
 
 install-am: all-am
 	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
 
-installcheck: installcheck-am
+installcheck: installcheck-recursive
 install-strip:
 	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
 	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
@@ -545,81 +757,83 @@ distclean-generic:
 maintainer-clean-generic:
 	@echo "This command is intended for maintainers to use"
 	@echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
+clean: clean-recursive
 
 clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
 	mostlyclean-am
 
-distclean: distclean-am
+distclean: distclean-recursive
 	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
 distclean-am: clean-am distclean-compile distclean-generic \
 	distclean-tags
 
-dvi: dvi-am
+dvi: dvi-recursive
 
 dvi-am:
 
-html: html-am
+html: html-recursive
 
 html-am:
 
-info: info-am
+info: info-recursive
 
 info-am:
 
 install-data-am:
 
-install-dvi: install-dvi-am
+install-dvi: install-dvi-recursive
 
 install-dvi-am:
 
 install-exec-am: install-pkglibLTLIBRARIES
 
-install-html: install-html-am
+install-html: install-html-recursive
 
 install-html-am:
 
-install-info: install-info-am
+install-info: install-info-recursive
 
 install-info-am:
 
 install-man:
 
-install-pdf: install-pdf-am
+install-pdf: install-pdf-recursive
 
 install-pdf-am:
 
-install-ps: install-ps-am
+install-ps: install-ps-recursive
 
 install-ps-am:
 
 installcheck-am:
 
-maintainer-clean: maintainer-clean-am
+maintainer-clean: maintainer-clean-recursive
 	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
 maintainer-clean-am: distclean-am maintainer-clean-generic
 
-mostlyclean: mostlyclean-am
+mostlyclean: mostlyclean-recursive
 
 mostlyclean-am: mostlyclean-compile mostlyclean-generic \
 	mostlyclean-libtool
 
-pdf: pdf-am
+pdf: pdf-recursive
 
 pdf-am:
 
-ps: ps-am
+ps: ps-recursive
 
 ps-am:
 
 uninstall-am: uninstall-pkglibLTLIBRARIES
 
-.MAKE: install-am install-strip
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \
+	install-am install-strip tags-recursive
 
-.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+	all all-am check check-am clean clean-generic clean-libtool \
+	clean-pkglibLTLIBRARIES ctags ctags-recursive distclean \
 	distclean-compile distclean-generic distclean-libtool \
 	distclean-tags distdir dvi dvi-am html html-am info info-am \
 	install install-am install-data install-data-am install-dvi \
@@ -627,11 +841,16 @@ uninstall-am: uninstall-pkglibLTLIBRARIES
 	install-html-am install-info install-info-am install-man \
 	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
 	install-ps install-ps-am install-strip installcheck \
-	installcheck-am installdirs maintainer-clean \
+	installcheck-am installdirs installdirs-am maintainer-clean \
 	maintainer-clean-generic mostlyclean mostlyclean-compile \
 	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+	tags tags-recursive uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
 
+@HAVE_CRAY_TRUE@force:
+@HAVE_CRAY_TRUE@$(select_cray_la_LIBADD) : force
+@HAVE_CRAY_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/src/plugins/select/cray/basil_alps.h b/src/plugins/select/cray/basil_alps.h
new file mode 100644
index 000000000..437a2ff74
--- /dev/null
+++ b/src/plugins/select/cray/basil_alps.h
@@ -0,0 +1,630 @@
+/*
+ * Lower-level BASIL/ALPS XML-RPC library functions.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#ifndef __BASIL_ALPS_H__
+#define __BASIL_ALPS_H__
+
+#if HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <sys/types.h>
+#include <ctype.h>
+#include <string.h>
+
+#include <sys/wait.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <assert.h>
+
+#ifdef HAVE_CRAY
+#  include <expat.h>
+#  include <mysql.h>
+#endif
+
+#include "src/common/log.h"
+#include "src/common/fd.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "cray_config.h"
+
+/*
+ * Limits
+ */
+#define TAG_DEPTH_MAX		12	/* maximum XML nesting level */
+#define BASIL_STRING_SHORT	16
+#define BASIL_STRING_MEDIUM	32
+#define BASIL_STRING_LONG	64
+#define BASIL_ERROR_BUFFER_SIZE	256
+
+/*
+ * Basil XML tags
+ */
+enum basil_version {
+	BV_1_0 = 0,	/* Basil 1.0: earliest version and fallback */
+	BV_1_1,		/* Basil 1.1 CLE variant (XT/SeaStar)       */
+	BV_1_2,		/* Basil 1.1 CLE 2.x variant (XT/SeaStar)   */
+	BV_3_1,		/* Basil 1.1 CLE 3.x (XE/Gemini support)    */
+	BV_4_0,		/* Basil 1.2 CLE 4.x unconfirmed simulator version  */
+	BV_4_1,		/* Basil 1.2 CLE 4.x unconfirmed simulator version  */
+	BV_MAX
+};
+
+enum basil_method {
+	BM_none = 0,
+	BM_reserve,	/* RESERVE method          */
+	BM_confirm,	/* CONFIRM method          */
+	BM_release,	/* RELEASE method          */
+	BM_engine,	/* QUERY of type ENGINE    */
+	BM_inventory,	/* QUERY of type INVENTORY */
+	BM_switch,	/* SWITCH method           */
+	BM_MAX,
+	BM_UNKNOWN
+};
+
+/**
+ * basil_element - XML tags appearing in BasilReponse
+ * This is list is *sorted* according to the following Basil versions:
+ * - Basil 1.0  (common denominator)
+ * - Basil 1.1  (earliest 1.1 variant used on XT systems with CLE 2.x)
+ * - Basil 3.1  (later 1.1 variant used on XE systems with CLE 3.x)
+ * Remember to keep this order when making changes to this enum!
+ */
+enum basil_element {
+	BT_MESSAGE = 0,
+	BT_RESPONSE,
+	BT_RESPDATA,
+
+	BT_RESERVED,		/* RESERVE */
+	BT_CONFIRMED,		/* CONFIRM */
+	BT_RELEASED,		/* RELEASE */
+	BT_ENGINE,		/* QUERY - ENGINE    */
+
+	BT_INVENTORY,		/* QUERY - INVENTORY */
+	BT_NODEARRAY,		/* Generic Inventory */
+	BT_NODE,		/* Generic Inventory */
+	BT_PROCARRAY,		/* Generic Inventory */
+	BT_PROCESSOR,		/* Generic Inventory */
+	BT_PROCALLOC,		/* Generic Inventory */
+	BT_MEMARRAY,		/* Generic Inventory */
+	BT_MEMORY,		/* Generic Inventory */
+	BT_MEMALLOC,		/* Generic Inventory */
+	BT_LABELARRAY,		/* Generic Inventory */
+	BT_LABEL,		/* Generic Inventory */
+	BT_RESARRAY,		/* Generic Inventory */
+	BT_RESVN,		/* Generic Inventory */
+#define BT_1_0_MAX		(BT_RESVN + 1)		/* End of Basil 1.0 */
+
+	BT_SEGMARRAY,		/* Basil 1.1 Inventory/Node */
+	BT_SEGMENT,		/* Basil 1.1 Inventory/Node */
+	BT_APPARRAY,		/* Basil 1.1 Inventory/Reservation */
+	BT_APPLICATION,		/* Basil 1.1 Inventory/Reservation */
+	BT_CMDARRAY,		/* Basil 1.1 Inventory/Reservation */
+	BT_COMMAND,		/* Basil 1.1 Inventory/Reservation */
+#define BT_1_1_MAX		(BT_COMMAND + 1)	/* End of Basil 1.1 */
+
+	BT_RESVDNODEARRAY,	/* Basil 3.1 RESERVE Response */
+	BT_RESVDNODE,		/* Basil 3.1 RESERVE Response */
+#define BT_3_1_MAX		(BT_RESVDNODE + 1)	/* End of Basil 3.1 */
+
+	BT_ACCELARRAY,		/* Basil 4.0 Inventory/Node */
+	BT_ACCEL,		/* Basil 4.0 Inventory/Node */
+	BT_ACCELALLOC,		/* Basil 4.0 Inventory/Node */
+	BT_SWITCH,              /* SWITCH */
+	BT_SWITCHRES,   	/* Response for Switch reservation */
+	BT_SWITCHAPP,   	/* Response for Switch application */
+	BT_SWITCHRESARRAY,	/* Response for Switch reservation array */
+	BT_SWITCHAPPARRAY,	/* Response for Switch application array */
+#define BT_4_0_MAX              (BT_ACCELALLOC + 1)	/* End of Basil 4.0 */
+	/* FIXME: the Basil 4.1 interface is not yet fully released */
+#define BT_4_1_MAX              BT_4_0_MAX              /* End of Basil 4.1 */
+	BT_MAX			/* End of Basil tags */
+};
+
+/* Error types */
+enum basil_error {
+	/* (a) up to and excluding BE_MAX, error kind information */
+	BE_NONE = 0,
+	BE_INTERNAL,
+	BE_SYSTEM,
+	BE_PARSER,
+	BE_SYNTAX,
+	BE_BACKEND,
+	BE_UNKNOWN,
+	/* custom errors start here */
+	BE_NO_RESID,
+	BE_MAX,
+	/* (b) bit masks for additional information */
+	BE_ERROR_TYPE_MASK = 0x00FF,
+	BE_TRANSIENT	   = 0x0100
+};
+
+/** Decode negative error code @rc into Basil error */
+static inline enum basil_error decode_basil_error(int rc)
+{
+	int be = -rc & BE_ERROR_TYPE_MASK;
+
+	return rc >= 0 ? BE_NONE : (be < BE_MAX ? be : BE_UNKNOWN);
+}
+
+/** Return true if the absolute value of @rc indicates transient error. */
+static inline bool is_transient_error(int rc)
+{
+	return (rc < 0 ? -rc : rc) & BE_TRANSIENT;
+}
+
+extern const char *basil_strerror(int rc);
+
+/*
+ * INVENTORY/RESERVE data
+ */
+enum basil_node_arch {
+	BNA_NONE = 0,
+	BNA_X2,
+	BNA_XT,
+	BNA_UNKNOWN,
+	BNA_MAX
+};
+
+enum basil_memory_type {
+	BMT_NONE = 0,
+	BMT_OS,
+	BMT_HUGEPAGE,
+	BMT_VIRTUAL,
+	BMT_UNKNOWN,
+	BMT_MAX
+};
+
+enum basil_label_type {
+	BLT_NONE = 0,
+	BLT_HARD,
+	BLT_SOFT,
+	BLT_UNKNOWN,
+	BLT_MAX
+};
+
+enum basil_label_disp {
+	BLD_NONE = 0,
+	BLD_ATTRACT,
+	BLD_REPEL,
+	BLD_UNKNOWN,
+	BLD_MAX
+};
+
+/*
+ * INVENTORY-only data
+ */
+enum basil_node_state {
+	BNS_NONE = 0,
+	BNS_UP,
+	BNS_DOWN,
+	BNS_UNAVAIL,
+	BNS_ROUTE,
+	BNS_SUSPECT,
+	BNS_ADMINDOWN,
+	BNS_UNKNOWN,
+	BNS_MAX
+};
+
+enum basil_node_role {
+	BNR_NONE = 0,
+	BNR_INTER,
+	BNR_BATCH,
+	BNR_UNKNOWN,
+	BNR_MAX
+};
+
+enum basil_proc_type {
+	BPT_NONE = 0,
+	BPT_CRAY_X2,
+	BPT_X86_64,
+	BPT_UNKNOWN,
+	BPT_MAX
+};
+
+enum basil_rsvn_mode {	/* Basil 3.1 */
+	BRM_NONE = 0,
+	BRM_EXCLUSIVE,
+	BRM_SHARE,
+	BRM_UNKNOWN,
+	BRM_MAX
+};
+
+enum basil_gpc_mode {	/* Basil 3.1 */
+	BGM_NONE = 0,
+	BRM_PROCESSOR,
+	BRM_LOCAL,
+	BRM_GLOBAL,
+	BGM_UNKNOWN,
+	BGM_MAX
+};
+
+enum basil_acceltype {	/* Alps 4.x (Basil 1.2) */
+	BA_NONE = 0,
+	BA_GPU,
+	BA_UNKNOWN,
+	BA_MAX
+};
+
+enum basil_accelstate {	/* Alps 4.x (Basil 1.2) */
+	BAS_NONE = 0,
+	BAS_UP,
+	BAS_DOWN,
+	BAS_UNKNOWN,
+	BAS_MAX
+};
+
+/*
+ * Inventory structs
+ */
+struct basil_node_processor {
+	uint32_t		ordinal;
+	uint32_t		clock_mhz;
+	enum basil_proc_type	arch;
+
+	/* With gang scheduling we can have more than 1 rsvn per node,
+	   so this is just here to see if the node itself is allocated
+	   at all.
+	*/
+	uint32_t		rsvn_id;
+
+	struct basil_node_processor *next;
+};
+
+struct basil_mem_alloc {
+	uint32_t		rsvn_id;
+	uint32_t		page_count;
+
+	struct basil_mem_alloc	*next;
+};
+
+struct basil_node_memory {
+	enum basil_memory_type	type;
+	uint32_t		page_size_kb;
+	uint32_t		page_count;
+	struct basil_mem_alloc	*a_head;
+
+	struct basil_node_memory *next;
+};
+
+struct basil_label {
+	enum basil_label_type	type;
+	enum basil_label_disp	disp;
+	char			name[BASIL_STRING_MEDIUM];
+
+	struct basil_label *next;
+};
+
+struct basil_segment {
+	uint8_t	ordinal;
+
+	struct basil_node_processor	*proc_head;
+	struct basil_node_memory	*mem_head;
+	struct basil_label		*lbl_head;
+
+	struct basil_segment *next;
+};
+
+struct basil_accel_alloc {		/* Basil 1.2, Alps 4.x */
+	uint32_t	rsvn_id;	/* reservation_id attribute */
+	/* NB: exclusive use of Accelerator/GPU, i.e. at most 1 allocation */
+};
+
+struct basil_node_accelerator {		/* Basil 1.2, Alps 4.x */
+	uint32_t		  ordinal;	/* must be 0 in Basil 1.2 */
+	enum basil_acceltype	  type;		/* must be BA_GPU in Basil 1.2 */
+	enum basil_accelstate	  state;
+	char 			  family[BASIL_STRING_LONG];
+	uint32_t		  memory_mb;
+	uint32_t		  clock_mhz;
+	struct basil_accel_alloc *allocation;
+
+	struct basil_node_accelerator *next;
+};
+
+struct basil_node {
+	uint32_t node_id;
+	uint32_t router_id;				/* Basil 3.1 */
+	char	 name[BASIL_STRING_SHORT];
+
+	enum basil_node_arch	arch;
+	enum basil_node_role	role;
+	enum basil_node_state	state;
+
+	struct basil_segment		*seg_head;	/* Basil 1.1 */
+	struct basil_node_accelerator	*accel_head;	/* Basil 1.2 */
+
+	struct basil_node *next;
+};
+extern bool node_is_allocated(const struct basil_node *node);
+
+struct basil_rsvn_app_cmd {
+	uint32_t		width,
+				depth,
+				nppn,
+				memory;
+	enum basil_node_arch	arch;
+
+	char			cmd[BASIL_STRING_MEDIUM];
+
+	struct basil_rsvn_app_cmd *next;
+};
+
+struct basil_rsvn_app {
+	uint64_t	apid;
+	uint32_t	user_id;
+	uint32_t	group_id;
+	time_t		timestamp;
+
+	struct basil_rsvn_app_cmd *cmd_head;
+
+	struct basil_rsvn_app *next;
+};
+
+struct basil_rsvn {
+	uint32_t	rsvn_id;
+	time_t		timestamp;			/* Basil 1.1 */
+	char		user_name[BASIL_STRING_MEDIUM];
+	char		account_name[BASIL_STRING_MEDIUM];
+	char		batch_id[BASIL_STRING_LONG];	/* Basil 1.1 */
+
+	enum basil_rsvn_mode	rsvn_mode;		/* Basil 3.1 */
+	enum basil_gpc_mode	gpc_mode;		/* Basil 3.1 */
+
+	struct basil_rsvn_app	*app_head;		/* Basil 1.1 */
+
+	struct basil_rsvn *next;
+};
+
+/*
+ * Inventory parameters (OUT)
+ */
+struct basil_full_inventory {
+	struct basil_node *node_head;
+	struct basil_rsvn *rsvn_head;
+};
+
+/**
+ * struct basil_inventory - basic inventory information
+ * @mpp_host:     Basil 3.1 and above
+ * @timestamp:    Basil 3.1 and above
+ * @is_gemini:    true if XE/Gemini system, false if XT/SeaStar system
+ * @change_count: number of changes since start
+ * @batch_avail:  number of compute nodes available for scheduling
+ * @batch_total:  total number of usable/used compute nodes
+ * @nodes_total:  total number of all compute nodes
+ */
+struct basil_inventory {
+	char		mpp_host[BASIL_STRING_SHORT];
+	time_t		timestamp;
+	bool		is_gemini;
+	uint64_t        change_count;
+	uint32_t	batch_avail,
+			batch_total,
+			nodes_total;
+
+	struct basil_full_inventory *f;
+};
+
+/*
+ * Reservation parameters (IN)
+ */
+struct basil_memory_param {
+	enum basil_memory_type	type;
+	uint32_t		size_mb;
+
+	struct basil_memory_param *next;
+};
+
+struct basil_accel_param {
+	enum basil_acceltype	type;
+	char 			family[BASIL_STRING_LONG];
+	uint32_t		memory_mb;
+
+	struct basil_accel_param *next;
+};
+
+struct basil_rsvn_param {
+	enum basil_node_arch	arch;		/* "architecture", XT or X2, -a  */
+	long			width,		/* required mppwidth > 0,    -n  */
+				/* The following MPP parameters are optional  */
+				depth,		/* depth > 0,         -d  */
+				nppn,		/* nppn > 0,          -N  */
+				npps,		/* PEs per segment,   -S  */
+				nspn;		/* segments per node, -sn */
+
+	char				*nodes;		/* NodeParamArray   */
+	struct basil_label		*labels;	/* LabelParamArray  */
+	struct basil_memory_param	*memory;	/* MemoryParamArray */
+	struct basil_accel_param	*accel;		/* AccelParamArray  */
+
+	struct basil_rsvn_param		*next;
+};
+
+/**
+ * struct basil_reservation  -  reservation parameters and data
+ * @rsvn_id:      assigned by RESERVE method
+ * @pagg_id:      used by CONFIRM method (session ID or CSA PAGG ID)
+ * @claims:	  number of claims outstanding against @rsvn_id (Basil 4.0)
+ * @suspended:	  If the reservation is suspended or not (Basil 4.0)
+ * @rsvd_nodes:   assigned by Basil 3.1 RESERVE method
+ * @user_name:    required by RESERVE method
+ * @account_name: optional Basil 1.0 RESERVE parameter
+ * @batch_id:     required Basil 1.1/3.1 RESERVE parameter
+ * @params:	  parameter contents of the ReserveParamArray
+ */
+struct basil_reservation {
+	/*
+	 * Runtime (IN/OUT) parameters
+	 */
+	uint32_t	rsvn_id;
+	uint64_t	pagg_id;
+	uint32_t        claims;
+	bool            suspended;
+
+	struct nodespec *rsvd_nodes;
+	/*
+	 * Static (IN) parameters
+	 */
+	char		user_name[BASIL_STRING_MEDIUM],
+			account_name[BASIL_STRING_MEDIUM],
+			batch_id[BASIL_STRING_LONG];
+
+	struct basil_rsvn_param *params;
+};
+
+/*
+ * struct basil_parse_data  -  method-dependent data used during parsing
+ *
+ * @version:	which Basil version to use (IN)
+ * @method:	the type of request issued (IN)
+ *
+ * @mdata:	method-dependent data (IN/OUT)
+ * @inv:	containers for (full/counting) INVENTORY (OUT)
+ * @res:	reservation parameters for RESERVE method (IN)
+ * @raw:	typecast of mdata to check if parameters are present
+ *
+ * @msg:	method-dependent string on success, error string on failure (OUT)
+ */
+struct basil_parse_data {
+	enum basil_version	version;
+	enum basil_method	method;
+
+	union {
+		struct basil_inventory	 *inv;
+		struct basil_reservation *res;
+		uint8_t			 *raw;
+	} mdata;
+
+	char msg[BASIL_ERROR_BUFFER_SIZE];
+};
+
+/*
+ * Mapping tables
+ */
+extern const char *bv_names[BV_MAX];
+extern const char *bv_names_long[BV_MAX];
+extern const char *bm_names[BM_MAX];
+extern const char *be_names[BE_MAX];
+
+extern const char *nam_arch[BNA_MAX];
+extern const char *nam_memtype[BMT_MAX];
+extern const char *nam_labeltype[BLT_MAX];
+extern const char *nam_ldisp[BLD_MAX];
+
+extern const char *nam_noderole[BNR_MAX];
+extern const char *nam_nodestate[BNS_MAX];
+extern const char *nam_proc[BPT_MAX];
+extern const char *nam_rsvn_mode[BRM_MAX];
+extern const char *nam_gpc_mode[BGM_MAX];
+
+extern const char *nam_acceltype[BA_MAX];
+extern const char *nam_accelstate[BAS_MAX];
+
+/**
+ * struct nodespec  -  representation of node ranges
+ * @start: start value of the range
+ * @end:   end value of the range (may equal @start)
+ * @next:  next element ns such that ns.start > this.end
+ */
+struct nodespec {
+	uint32_t	start;
+	uint32_t	end;
+
+	struct nodespec *next;
+};
+
+extern int ns_add_node(struct nodespec **head, uint32_t node_id);
+extern char *ns_to_string(const struct nodespec *head);
+extern void free_nodespec(struct nodespec *head);
+
+#ifdef HAVE_CRAY
+/*
+ *	Routines to interact with SDB database (uses prepared statements)
+ */
+/** Connect to the XTAdmin table on the SDB */
+extern MYSQL *cray_connect_sdb(void);
+
+/** Initialize and prepare statement */
+extern MYSQL_STMT *prepare_stmt(MYSQL *handle, const char *query,
+				MYSQL_BIND bind_parm[], unsigned long nparams,
+				MYSQL_BIND bind_cols[], unsigned long ncols);
+
+/** Execute and return the number of rows. */
+extern int exec_stmt(MYSQL_STMT *stmt, const char *query,
+		     MYSQL_BIND bind_col[], unsigned long ncols);
+
+/**
+ * Fetch the next row of data;
+ */
+int fetch_stmt(MYSQL_STMT *stmt);
+
+/* Free memory associated with data retrieved by fetch_stmt() */
+my_bool free_stmt_result(MYSQL_STMT *stmt);
+
+/* Free memory associated with data generated by prepare_stmt() */
+my_bool stmt_close(MYSQL_STMT *stmt);
+
+/* Free memory associated with data generated by cray_connect_sdb() */
+void cray_close_sdb(MYSQL *handle);
+
+/** Find out interconnect chip: Gemini (XE) or SeaStar (XT) */
+extern int cray_is_gemini_system(MYSQL *handle);
+
+/*
+ * Column positions used by basil_geometry() and fetch_stmt() in
+ * libemulate.
+ */
+enum query_columns {
+	/* integer data */
+	COL_X,		/* X coordinate		*/
+	COL_Y,		/* Y coordinate		*/
+	COL_Z,		/* Z coordinate		*/
+	COL_CAB,	/* cabinet position		*/
+	COL_ROW,	/* row position			*/
+	COL_CAGE,	/* cage number (0..2)		*/
+	COL_SLOT,	/* slot number (0..7)		*/
+	COL_CPU,	/* node number (0..3)		*/
+	COL_CORES,	/* number of cores per node	*/
+	COL_MEMORY,	/* rounded-down memory in MB	*/
+	/* string data */
+	COL_TYPE,	/* {service, compute }		*/
+	COLUMN_COUNT	/* sentinel */
+};
+#endif  /* HAVE_CRAY */
+
+
+/*
+ *	Basil XML-RPC API prototypes
+ */
+extern enum basil_version get_basil_version(void);
+extern int basil_request(struct basil_parse_data *bp);
+
+extern struct basil_inventory *get_full_inventory(enum basil_version version);
+extern void   free_inv(struct basil_inventory *inv);
+
+extern long basil_reserve(const char *user, const char *batch_id,
+			  uint32_t width, uint32_t depth, uint32_t nppn,
+			  uint32_t mem_mb, struct nodespec *ns_head,
+			  struct basil_accel_param *accel_head);
+extern int basil_confirm(uint32_t rsvn_id, int job_id, uint64_t pagg_id);
+extern const struct basil_rsvn *basil_rsvn_by_id(const struct basil_inventory *inv,
+						 uint32_t resvn_id);
+extern uint64_t *basil_get_rsvn_aprun_apids(const struct basil_inventory *inv,
+					    uint32_t rsvn_id);
+extern int basil_release(uint32_t rsvn_id);
+extern int basil_signal_apids(int32_t rsvn_id, int signal,
+			      struct basil_inventory *inv);
+extern int basil_safe_release(int32_t rsvn_id, struct basil_inventory *inv);
+extern int basil_switch(uint32_t rsvn_id, bool suspend);
+
+#endif /* __BASIL_ALPS_H__ */
diff --git a/src/plugins/select/cray/basil_interface.c b/src/plugins/select/cray/basil_interface.c
new file mode 100644
index 000000000..36a2ff77b
--- /dev/null
+++ b/src/plugins/select/cray/basil_interface.c
@@ -0,0 +1,942 @@
+/*
+ * Interface between lower-level ALPS XML-RPC functions and SLURM.
+ *
+ * Copyright (c) 2010-11 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under GPLv2.
+ */
+#include "basil_interface.h"
+#include "basil_alps.h"
+#include "src/common/slurm_accounting_storage.h"
+
+#define _DEBUG 0
+
+int dim_size[3] = {0, 0, 0};
+
+
+/*
+ * Following routines are from src/plugins/select/bluegene/plugin/jobinfo.c
+ */
+static int _set_select_jobinfo(select_jobinfo_t *jobinfo,
+			       enum select_jobdata_type data_type, void *data)
+{
+	uint32_t *uint32 = (uint32_t *) data;
+
+	if (jobinfo == NULL) {
+		error("cray/set_select_jobinfo: jobinfo not set");
+		return SLURM_ERROR;
+	}
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("cray/set_select_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+	case SELECT_JOBDATA_RESV_ID:
+		jobinfo->reservation_id = *uint32;
+		break;
+	default:
+		error("cray/set_select_jobinfo: data_type %d invalid",
+		      data_type);
+	}
+
+	return SLURM_SUCCESS;
+}
+
+static int _get_select_jobinfo(select_jobinfo_t *jobinfo,
+			       enum select_jobdata_type data_type, void *data)
+{
+	uint64_t *uint64 = (uint64_t *) data;
+	uint32_t *uint32 = (uint32_t *) data;
+
+	if (jobinfo == NULL) {
+		error("cray/get_select_jobinfo: jobinfo not set");
+		return SLURM_ERROR;
+	}
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("cray/get_select_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+	case SELECT_JOBDATA_RESV_ID:
+		*uint32 = jobinfo->reservation_id;
+		break;
+	case SELECT_JOBDATA_PAGG_ID:
+		*uint64 = jobinfo->confirm_cookie;
+		break;
+	default:
+		error("cray/get_select_jobinfo: data_type %d invalid",
+		      data_type);
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/** Convert between Cray NID and slurm nodename format */
+static struct node_record *_find_node_by_basil_id(uint32_t node_id)
+{
+	char nid[9];	/* nid%05d\0 */
+
+	snprintf(nid, sizeof(nid), "nid%05u", node_id);
+
+	return find_node_record(nid);
+}
+
+extern int basil_node_ranking(struct node_record *node_array, int node_cnt)
+{
+	enum basil_version version = get_basil_version();
+	struct basil_inventory *inv;
+	struct basil_node *node;
+	int rank_count = 0, i;
+	hostlist_t hl = hostlist_create(NULL);
+	bool bad_node = 0;
+
+	/*
+	 * When obtaining the initial configuration, we can not allow ALPS to
+	 * fail. If there is a problem at this stage it is better to restart
+	 * SLURM completely, after investigating (and/or fixing) the cause.
+	 */
+	inv = get_full_inventory(version);
+	if (inv == NULL)
+		fatal("failed to get BASIL %s ranking", bv_names_long[version]);
+	else if (!inv->batch_total)
+		fatal("system has no usable batch compute nodes");
+	else if (inv->batch_total < node_cnt)
+		info("Warning: ALPS sees only %d/%d slurm.conf nodes, "
+		     "check DownNodes", inv->batch_total, node_cnt);
+
+	debug("BASIL %s RANKING INVENTORY: %d/%d batch nodes",
+	      bv_names_long[version], inv->batch_avail, inv->batch_total);
+
+	/*
+	 * Node ranking is based on a subset of the inventory: only nodes in
+	 * batch allocation mode which are up and not allocated. Assign a
+	 * 'NO_VAL' rank to all other nodes, which will translate as a very
+	 * high value, (unsigned)-2, to put those nodes last in the ranking.
+	 * The rest of the code must ensure that those nodes are never chosen.
+	 */
+	for (i = 0; i < node_cnt; i++)
+		node_array[i].node_rank = NO_VAL;
+
+	for (node = inv->f->node_head; node; node = node->next) {
+		struct node_record *node_ptr;
+		char tmp[50];
+
+		node_ptr = _find_node_by_basil_id(node->node_id);
+		if (node_ptr == NULL) {
+			error("nid%05u (%s node in state %s) not in slurm.conf",
+			      node->node_id, nam_noderole[node->role],
+			      nam_nodestate[node->state]);
+			bad_node = 1;
+		} else
+			node_ptr->node_rank = inv->nodes_total - rank_count++;
+		sprintf(tmp, "nid%05u", node->node_id);
+		hostlist_push(hl, tmp);
+	}
+	free_inv(inv);
+	if (bad_node) {
+		hostlist_sort(hl);
+		char *name = hostlist_ranged_string_xmalloc(hl);
+		info("It appears your slurm.conf nodelist doesn't "
+		     "match the alps system.  Here are the nodes alps knows "
+		     "about\n%s", name);
+	}
+	hostlist_destroy(hl);
+
+	return SLURM_SUCCESS;
+}
+
+/**
+ * basil_inventory - Periodic node-state query via ALPS XML-RPC.
+ * This should be run immediately before each scheduling cycle.
+ * Returns non-SLURM_SUCCESS if
+ * - INVENTORY method failed (error)
+ * - no nodes are available (no point in scheduling)
+ * - orphaned ALPS reservation exists (wait until ALPS resynchronizes)
+ */
+extern int basil_inventory(void)
+{
+	enum basil_version version = get_basil_version();
+	struct basil_inventory *inv;
+	struct basil_node *node;
+	struct basil_rsvn *rsvn;
+	int slurm_alps_mismatch = 0;
+	int rc = SLURM_SUCCESS;
+	time_t now = time(NULL);
+	static time_t slurm_alps_mismatch_time = (time_t) 0;
+	static bool logged_sync_timeout = false;
+
+	inv = get_full_inventory(version);
+	if (inv == NULL) {
+		error("BASIL %s INVENTORY failed", bv_names_long[version]);
+		return SLURM_ERROR;
+	}
+
+	debug("BASIL %s INVENTORY: %d/%d batch nodes available",
+	      bv_names_long[version], inv->batch_avail, inv->batch_total);
+
+	/* Avoid checking for inv->batch_avail here since if we are
+	   gang scheduling returning an error for a full system is
+	   probably the wrong thing to do. (the schedule() function
+	   in the slurmctld will never run ;)).
+	*/
+	if (!inv->f->node_head || !inv->batch_total)
+		rc = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+
+	for (node = inv->f->node_head; node; node = node->next) {
+		int node_inx;
+		struct node_record *node_ptr;
+		char *reason = NULL;
+
+		node_ptr = _find_node_by_basil_id(node->node_id);
+		if (node_ptr == NULL) {
+			error("nid%05u (%s node in state %s) not in slurm.conf",
+			      node->node_id, nam_noderole[node->role],
+			      nam_nodestate[node->state]);
+			continue;
+		}
+		node_inx = node_ptr - node_record_table_ptr;
+
+		if (node_is_allocated(node) && !IS_NODE_ALLOCATED(node_ptr)) {
+			/*
+			 * ALPS still hangs on to the node while SLURM considers
+			 * it already unallocated. Possible causes are partition
+			 * cleanup taking too long (can be 10sec ... minutes),
+			 * and orphaned ALPS reservations (caught below).
+			 *
+			 * The converse case (SLURM hanging on to the node while
+			 * ALPS has already freed it) happens frequently during
+			 * job completion: select_g_job_fini() is called before
+			 * make_node_comp(). Rely on SLURM logic for this case.
+			 */
+			slurm_alps_mismatch++;
+		}
+
+		if (node->state == BNS_DOWN) {
+			reason = "ALPS marked it DOWN";
+		} else if (node->state == BNS_UNAVAIL) {
+			reason = "node is UNAVAILABLE";
+		} else if (node->state == BNS_ROUTE) {
+			reason = "node does ROUTING";
+		} else if (node->state == BNS_SUSPECT) {
+			reason = "entered SUSPECT mode";
+		} else if (node->state == BNS_ADMINDOWN) {
+			reason = "node is ADMINDOWN";
+		} else if (node->state != BNS_UP) {
+			reason = "state not UP";
+		} else if (node->role != BNR_BATCH) {
+			reason = "mode not BATCH";
+		} else if (node->arch != BNA_XT) {
+			reason = "arch not XT/XE";
+		}
+
+		/* Base state entirely derives from ALPS */
+		if (reason) {
+			if (node_ptr->down_time == 0)
+				node_ptr->down_time = now;
+			if (IS_NODE_DOWN(node_ptr)) {
+				/* node still down */
+			} else if (slurmctld_conf.slurmd_timeout &&
+				   ((now - node_ptr->down_time) <
+				    slurmctld_conf.slurmd_timeout)) {
+				node_ptr->node_state |= NODE_STATE_NO_RESPOND;
+				bit_clear(avail_node_bitmap, node_inx);
+			} else {
+				xfree(node_ptr->reason);
+				info("MARKING %s DOWN (%s)",
+				     node_ptr->name, reason);
+				/* set_node_down also kills any running jobs */
+				set_node_down_ptr(node_ptr, reason);
+			}
+		} else if (IS_NODE_DOWN(node_ptr)) {
+			xfree(node_ptr->reason);
+			node_ptr->down_time = 0;
+			info("MARKING %s UP", node_ptr->name);
+
+			/* Reset state, make_node_idle figures out the rest */
+			node_ptr->node_state &= NODE_STATE_FLAGS;
+			node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
+			node_ptr->node_state |= NODE_STATE_UNKNOWN;
+
+			make_node_idle(node_ptr, NULL);
+			if (!IS_NODE_DRAIN(node_ptr) &&
+			    !IS_NODE_FAIL(node_ptr)) {
+				xfree(node_ptr->reason);
+				node_ptr->reason_time = 0;
+				node_ptr->reason_uid = NO_VAL;
+				clusteracct_storage_g_node_up(
+					acct_db_conn, node_ptr, now);
+			}
+		} else if (IS_NODE_NO_RESPOND(node_ptr)) {
+			node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
+			if (!IS_NODE_DRAIN(node_ptr) &&
+			    !IS_NODE_FAIL(node_ptr)) {
+				bit_set(avail_node_bitmap, node_inx);
+			}
+		}
+	}
+
+	if (slurm_alps_mismatch)
+		debug("ALPS: %d node(s) still held", slurm_alps_mismatch);
+
+	/*
+	 * Check that each ALPS reservation corresponds to a SLURM job.
+	 * Purge orphaned reservations, which may result from stale or
+	 * messed up system state, or are indicative of ALPS problems
+	 * (stuck in pending cancel calls).
+	 */
+	for (rsvn = inv->f->rsvn_head; rsvn; rsvn = rsvn->next) {
+		ListIterator job_iter = list_iterator_create(job_list);
+		struct job_record *job_ptr;
+		uint32_t resv_id;
+
+		if (job_iter == NULL)
+			fatal("list_iterator_create: malloc failure");
+
+		while ((job_ptr = (struct job_record *)list_next(job_iter))) {
+
+			if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+						SELECT_JOBDATA_RESV_ID,
+						&resv_id) == SLURM_SUCCESS
+			    && resv_id == rsvn->rsvn_id)
+				break;
+		}
+		list_iterator_destroy(job_iter);
+
+		if (job_ptr == NULL) {
+			error("orphaned ALPS reservation %u, trying to remove",
+			      rsvn->rsvn_id);
+			basil_safe_release(rsvn->rsvn_id, inv);
+			slurm_alps_mismatch = true;
+		}
+	}
+	free_inv(inv);
+
+	if (slurm_alps_mismatch) {
+		/* If SLURM and ALPS state are not in synchronization,
+		 * do not schedule any more jobs until waiting at least
+		 * SyncTimeout seconds. */
+		if (slurm_alps_mismatch_time == 0) {
+			slurm_alps_mismatch_time = now;
+		} else if (cray_conf->sync_timeout == 0) {
+			/* Wait indefinitely */
+		} else if (difftime(now, slurm_alps_mismatch_time) <
+			   cray_conf->sync_timeout) {
+			return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+		} else if (!logged_sync_timeout) {
+			error("Could not synchronize SLURM with ALPS for %u "
+			      "seconds, proceeding with job scheduling",
+			      cray_conf->sync_timeout);
+			logged_sync_timeout = true;
+		}
+	} else {
+		slurm_alps_mismatch_time = 0;
+		logged_sync_timeout = false;
+	}
+	return rc;
+}
+
+/** Base-36 encoding of @coord */
+static char _enc_coord(uint8_t coord)
+{
+	return coord + (coord < 10 ? '0' : 'A' - 10);
+}
+
+/**
+ * basil_geometry - Check node attributes, resolve (X,Y,Z) coordinates.
+ *
+ * Checks both SDB database and ALPS inventory for consistency. The inventory
+ * part is identical to basil_inventory(), with the difference of being called
+ * before valid bitmaps exist, from select_g_node_init().
+ * Its dependencies are:
+ * - it needs reset_job_bitmaps() in order to rebuild node_bitmap fields,
+ * - it relies on _sync_nodes_to_jobs() to
+ *   o kill active jobs on nodes now marked DOWN,
+ *   o reset node state to ALLOCATED if it has been marked IDLE here (which is
+ *     an error case, since there is no longer an ALPS reservation for the job,
+ *     this is caught by the subsequent basil_inventory()).
+ */
+extern int basil_geometry(struct node_record *node_ptr_array, int node_cnt)
+{
+	struct node_record *node_ptr, *end = node_ptr_array + node_cnt;
+	enum basil_version version = get_basil_version();
+	struct basil_inventory *inv;
+
+	/* General mySQL */
+	MYSQL		*handle;
+	MYSQL_STMT	*stmt = NULL;
+	/* Input parameters */
+	unsigned int	node_id;
+	/*
+	 * Use a left outer join here since the attributes table may not be
+	 * populated for a given nodeid (e.g. when the node has been disabled
+	 * on the SMW via 'xtcli disable').
+	 * The processor table has more authoritative information, if a nodeid
+	 * is not listed there, it does not exist.
+	 */
+	const char query[] =	"SELECT x_coord, y_coord, z_coord,"
+				"       cab_position, cab_row, cage, slot, cpu,"
+				"	LOG2(coremask+1), availmem, "
+				"       processor_type  "
+				"FROM  processor LEFT JOIN attributes "
+				"ON    processor_id = nodeid "
+				"WHERE processor_id = ? ";
+	const int	PARAM_COUNT = 1;	/* node id */
+	MYSQL_BIND	params[PARAM_COUNT];
+
+	int		x_coord, y_coord, z_coord;
+	int		cab, row, cage, slot, cpu;
+	unsigned int	node_cpus, node_mem;
+	char		proc_type[BASIL_STRING_SHORT];
+	MYSQL_BIND	bind_cols[COLUMN_COUNT];
+	my_bool		is_null[COLUMN_COUNT];
+	my_bool		is_error[COLUMN_COUNT];
+	int		is_gemini, i;
+	time_t		now = time(NULL);
+
+	memset(params, 0, sizeof(params));
+	params[0].buffer_type = MYSQL_TYPE_LONG;
+	params[0].is_unsigned = true;
+	params[0].is_null     = (my_bool *)0;
+	params[0].buffer      = (char *)&node_id;
+
+	memset(bind_cols, 0, sizeof(bind_cols));
+	for (i = 0; i < COLUMN_COUNT; i ++) {
+		bind_cols[i].is_null = &is_null[i];
+		bind_cols[i].error   = &is_error[i];
+
+		if (i == COL_TYPE) {
+			bind_cols[i].buffer_type   = MYSQL_TYPE_STRING;
+			bind_cols[i].buffer_length = sizeof(proc_type);
+			bind_cols[i].buffer	   = proc_type;
+		} else {
+			bind_cols[i].buffer_type   = MYSQL_TYPE_LONG;
+			bind_cols[i].is_unsigned   = (i >= COL_CORES);
+		}
+	}
+	bind_cols[COL_X].buffer	     = (char *)&x_coord;
+	bind_cols[COL_Y].buffer	     = (char *)&y_coord;
+	bind_cols[COL_Z].buffer	     = (char *)&z_coord;
+	bind_cols[COL_CAB].buffer    = (char *)&cab;
+	bind_cols[COL_ROW].buffer    = (char *)&row;
+	bind_cols[COL_CAGE].buffer   = (char *)&cage;
+	bind_cols[COL_SLOT].buffer   = (char *)&slot;
+	bind_cols[COL_CPU].buffer    = (char *)&cpu;
+	bind_cols[COL_CORES].buffer  = (char *)&node_cpus;
+	bind_cols[COL_MEMORY].buffer = (char *)&node_mem;
+
+	inv = get_full_inventory(version);
+	if (inv == NULL)
+		fatal("failed to get initial BASIL inventory");
+
+	info("BASIL %s initial INVENTORY: %d/%d batch nodes available",
+	      bv_names_long[version], inv->batch_avail, inv->batch_total);
+
+	handle = cray_connect_sdb();
+	if (handle == NULL)
+		fatal("can not connect to XTAdmin database on the SDB");
+
+	is_gemini = cray_is_gemini_system(handle);
+	if (is_gemini < 0)
+		fatal("can not determine Cray XT/XE system type");
+
+	stmt = prepare_stmt(handle, query, params, PARAM_COUNT,
+				    bind_cols, COLUMN_COUNT);
+	if (stmt == NULL)
+		fatal("can not prepare statement to resolve Cray coordinates");
+
+	for (node_ptr = node_record_table_ptr; node_ptr < end; node_ptr++) {
+		struct basil_node *node;
+		char *reason = NULL;
+
+		if ((node_ptr->name == NULL) ||
+		    (sscanf(node_ptr->name, "nid%05u", &node_id) != 1)) {
+			error("can not read basil_node_id from %s",
+				node_ptr->name);
+			continue;
+		}
+
+		if (exec_stmt(stmt, query, bind_cols, COLUMN_COUNT) < 0)
+			fatal("can not resolve %s coordinates", node_ptr->name);
+
+		if (fetch_stmt(stmt) == 0) {
+#if _DEBUG
+			info("proc_type:%s cpus:%u memory:%u",
+			     proc_type, node_cpus, node_mem);
+			info("row:%u cage:%u slot:%u cpu:%u xyz:%u:%u:%u",
+			     row, cage, slot, cpu, x_coord, y_coord, z_coord);
+#endif
+			if (strcmp(proc_type, "compute") != 0) {
+				/*
+				 * Switching a compute node to be a service node
+				 * can not happen at runtime: requires a reboot.
+				 */
+				fatal("Node '%s' is a %s node. "
+				      "Only compute nodes can appear in slurm.conf.",
+					node_ptr->name, proc_type);
+			} else if (is_null[COL_CORES] || is_null[COL_MEMORY]) {
+				/*
+				 * This can happen if a node has been disabled
+				 * on the SMW (using 'xtcli disable <nid>'). The
+				 * node will still be listed in the 'processor'
+				 * table, but have no 'attributes' entry (NULL
+				 * values for CPUs/memory). Also, the node will
+				 * be invisible to ALPS, which is why we need to
+				 * set it down here already.
+				 */
+				node_cpus = node_mem = 0;
+				reason = "node data unknown - disabled on SMW?";
+			} else if (is_null[COL_X] || is_null[COL_Y]
+						  || is_null[COL_Z]) {
+				/*
+				 * Similar case to the one above, observed when
+				 * a blade has been removed. Node will not
+				 * likely show up in ALPS.
+				 */
+				x_coord = y_coord = z_coord = 0;
+				reason = "unknown coordinates - hardware failure?";
+			} else if (node_cpus < node_ptr->config_ptr->cpus) {
+				/*
+				 * FIXME: Might reconsider this policy.
+				 *
+				 * FastSchedule is ignored here, it requires the
+				 * slurm.conf to be consistent with hardware.
+				 *
+				 * Assumption is that CPU/Memory do not change
+				 * at runtime (Cray has no hot-swappable parts).
+				 *
+				 * Hence checking it in basil_inventory() would
+				 * mean a lot of runtime overhead.
+				 */
+				fatal("slurm.conf: node %s has only Procs=%d",
+					node_ptr->name, node_cpus);
+			} else if (node_mem < node_ptr->config_ptr->real_memory) {
+				fatal("slurm.conf: node %s has RealMemory=%d",
+					node_ptr->name, node_mem);
+			}
+
+		} else if (is_gemini) {
+			fatal("Non-existing Gemini node '%s' in slurm.conf",
+			      node_ptr->name);
+		} else {
+			fatal("Non-existing SeaStar node '%s' in slurm.conf",
+			      node_ptr->name);
+		}
+
+		if (!is_gemini) {
+			/*
+			 * SeaStar: each node has unique coordinates
+			 */
+			if (node_ptr->arch == NULL)
+				node_ptr->arch = xstrdup("XT");
+		} else {
+			/*
+			 * Gemini: each 2 nodes share the same network
+			 * interface (i.e., nodes 0/1 and 2/3 each have
+			 * the same coordinates).
+			 */
+			if (node_ptr->arch == NULL)
+				node_ptr->arch = xstrdup("XE");
+		}
+
+		xfree(node_ptr->node_hostname);
+		xfree(node_ptr->comm_name);
+		/*
+		 * Convention: since we are using SLURM in frontend-mode,
+		 *             we use Node{Addr,HostName} as follows.
+		 *
+		 * NodeAddr:      <X><Y><Z> coordinates in base-36 encoding
+		 *
+		 * NodeHostName:  c#-#c#s#n# using the  NID convention
+		 *                <cabinet>-<row><chassis><slot><node>
+		 * - each cabinet can accommodate 3 chassis (c1..c3)
+		 * - each chassis has 8 slots               (s0..s7)
+		 * - each slot contains 2 or 4 nodes        (n0..n3)
+		 *   o either 2 service nodes (n0/n3)
+		 *   o or 4 compute nodes     (n0..n3)
+		 *   o or 2 gemini chips      (g0/g1 serving n0..n3)
+		 *
+		 * Example: c0-0c1s0n1
+		 *          - c0- = cabinet 0
+		 *          - 0   = row     0
+		 *          - c1  = chassis 1
+		 *          - s0  = slot    0
+		 *          - n1  = node    1
+		 */
+		node_ptr->node_hostname = xstrdup_printf("c%u-%uc%us%un%u", cab,
+							 row, cage, slot, cpu);
+		node_ptr->comm_name = xstrdup_printf("%c%c%c",
+						     _enc_coord(x_coord),
+						     _enc_coord(y_coord),
+						     _enc_coord(z_coord));
+		dim_size[0] = MAX(dim_size[0], (x_coord - 1));
+		dim_size[1] = MAX(dim_size[1], (y_coord - 1));
+		dim_size[2] = MAX(dim_size[2], (z_coord - 1));
+#if _DEBUG
+		info("%s  %s  %s  cpus=%u, mem=%u reason=%s", node_ptr->name,
+		     node_ptr->node_hostname, node_ptr->comm_name,
+		     node_cpus, node_mem, reason);
+#endif
+		/*
+		 * Check the current state reported by ALPS inventory, unless it
+		 * is already evident that the node has some other problem.
+		 */
+		if (reason == NULL) {
+			for (node = inv->f->node_head; node; node = node->next)
+				if (node->node_id == node_id)
+					break;
+			if (node == NULL) {
+				reason = "not visible to ALPS - check hardware";
+			} else if (node->state == BNS_DOWN) {
+				reason = "ALPS marked it DOWN";
+			} else if (node->state == BNS_UNAVAIL) {
+				reason = "node is UNAVAILABLE";
+			} else if (node->state == BNS_ROUTE) {
+				reason = "node does ROUTING";
+			} else if (node->state == BNS_SUSPECT) {
+				reason = "entered SUSPECT mode";
+			} else if (node->state == BNS_ADMINDOWN) {
+				reason = "node is ADMINDOWN";
+			} else if (node->state != BNS_UP) {
+				reason = "state not UP";
+			} else if (node->role != BNR_BATCH) {
+				reason = "mode not BATCH";
+			} else if (node->arch != BNA_XT) {
+				reason = "arch not XT/XE";
+			}
+		}
+
+		/* Base state entirely derives from ALPS
+		 * NOTE: The node bitmaps are not defined when this code is
+		 * initially executed. */
+		node_ptr->node_state &= NODE_STATE_FLAGS;
+		if (reason) {
+			if (node_ptr->down_time == 0)
+				node_ptr->down_time = now;
+			if (IS_NODE_DOWN(node_ptr)) {
+				/* node still down */
+				debug("Initial DOWN node %s - %s",
+					node_ptr->name, node_ptr->reason);
+			} else if (slurmctld_conf.slurmd_timeout &&
+				   ((now - node_ptr->down_time) <
+				    slurmctld_conf.slurmd_timeout)) {
+				node_ptr->node_state |= NODE_STATE_NO_RESPOND;
+			} else {
+				info("Initial DOWN node %s - %s",
+				     node_ptr->name, reason);
+				node_ptr->reason = xstrdup(reason);
+				/* Node state flags preserved above */
+				node_ptr->node_state |= NODE_STATE_DOWN;
+				clusteracct_storage_g_node_down(acct_db_conn,
+								node_ptr,
+								now, NULL,
+								slurm_get_slurm_user_id());
+			}
+		} else {
+			bool node_up_flag = IS_NODE_DOWN(node_ptr) &&
+					    !IS_NODE_DRAIN(node_ptr) &&
+					    !IS_NODE_FAIL(node_ptr);
+			node_ptr->down_time = 0;
+			if (node_is_allocated(node))
+				node_ptr->node_state |= NODE_STATE_ALLOCATED;
+			else
+				node_ptr->node_state |= NODE_STATE_IDLE;
+			node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
+			xfree(node_ptr->reason);
+			if (node_up_flag) {
+				info("ALPS returned node %s to service",
+				     node_ptr->name);
+				clusteracct_storage_g_node_up(acct_db_conn,
+							      node_ptr, now);
+			}
+		}
+
+		free_stmt_result(stmt);
+	}
+
+	if (stmt_close(stmt))
+		error("error closing statement: %s", mysql_stmt_error(stmt));
+	cray_close_sdb(handle);
+	free_inv(inv);
+
+	return SLURM_SUCCESS;
+}
+
+/**
+ * do_basil_reserve - create a BASIL reservation.
+ * IN job_ptr - pointer to job which has just been allocated resources
+ * RET 0 or error code, job will abort or be requeued on failure
+ */
+extern int do_basil_reserve(struct job_record *job_ptr)
+{
+	struct nodespec *ns_head = NULL;
+	uint16_t mppwidth = 0, mppdepth, mppnppn;
+	/* mppmem must be at least 1 for gang scheduling to work so
+	 * if you are wondering why gang scheduling isn't working you
+	 * should check your slurm.conf for DefMemPerNode */
+	uint32_t mppmem = 0, node_min_mem = 0;
+	uint32_t resv_id;
+	int i, first_bit, last_bit;
+	long rc;
+	char *user, batch_id[16];
+
+	if (!job_ptr->job_resrcs || job_ptr->job_resrcs->nhosts == 0)
+		return SLURM_SUCCESS;
+
+	debug3("job #%u: %u nodes = %s, cpus=%u" , job_ptr->job_id,
+		job_ptr->job_resrcs->nhosts,
+		job_ptr->job_resrcs->nodes,
+		job_ptr->job_resrcs->ncpus
+	);
+
+	if (job_ptr->job_resrcs->node_bitmap == NULL) {
+		error("job %u node_bitmap not set", job_ptr->job_id);
+		return SLURM_SUCCESS;
+	}
+
+	first_bit = bit_ffs(job_ptr->job_resrcs->node_bitmap);
+	last_bit  = bit_fls(job_ptr->job_resrcs->node_bitmap);
+	if (first_bit == -1 || last_bit == -1)
+		return SLURM_SUCCESS;		/* no nodes allocated */
+
+	mppdepth = MAX(1, job_ptr->details->cpus_per_task);
+	mppnppn  = job_ptr->details->ntasks_per_node;
+
+	/* mppmem */
+	if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
+		/* Only honour --mem-per-cpu if --ntasks has been given */
+		if (job_ptr->details->num_tasks)
+			mppmem = job_ptr->details->pn_min_memory & ~MEM_PER_CPU;
+	} else if (job_ptr->details->pn_min_memory) {
+		node_min_mem = job_ptr->details->pn_min_memory;
+	}
+
+	for (i = first_bit; i <= last_bit; i++) {
+		struct node_record *node_ptr = node_record_table_ptr + i;
+		uint32_t basil_node_id;
+
+		if (!bit_test(job_ptr->job_resrcs->node_bitmap, i))
+			continue;
+
+		if (!node_ptr->name || node_ptr->name[0] == '\0')
+			continue;	/* bad node */
+
+		if (sscanf(node_ptr->name, "nid%05u", &basil_node_id) != 1)
+			fatal("can not read basil_node_id from %s",
+			      node_ptr->name);
+
+		if (ns_add_node(&ns_head, basil_node_id) != 0) {
+			error("can not add node %s (nid%05u)", node_ptr->name,
+			      basil_node_id);
+			free_nodespec(ns_head);
+			return SLURM_ERROR;
+		}
+
+		if (node_min_mem) {
+			uint32_t node_cpus, node_mem;
+			int32_t tmp_mppmem;
+
+			if (slurmctld_conf.fast_schedule) {
+				node_cpus = node_ptr->config_ptr->cpus;
+				node_mem  = node_ptr->config_ptr->real_memory;
+			} else {
+				node_cpus = node_ptr->cpus;
+				node_mem  = node_ptr->real_memory;
+			}
+			/*
+			 * ALPS 'Processing Elements per Node' value (aprun -N),
+			 * which in slurm is --ntasks-per-node and 'mppnppn' in
+			 * PBS: if --ntasks is specified, default to the number
+			 * of cores per node (also the default for 'aprun -N').
+			 * On a heterogeneous system the nodes aren't
+			 * always the same so keep track of the lowest
+			 * mppmem and use it as the level for all
+			 * nodes (mppmem is 0 when coming in).
+			 */
+			tmp_mppmem = node_min_mem = MIN(node_mem, node_min_mem);
+			tmp_mppmem /= mppnppn ? mppnppn : node_cpus;
+
+			/* If less than or equal to 0 make sure you
+			   have 1 at least since 0 means give all the
+			   memory to the job.
+			*/
+			if (tmp_mppmem <= 0)
+				tmp_mppmem = 1;
+
+			if (mppmem)
+				mppmem = MIN(mppmem, tmp_mppmem);
+			else
+				mppmem = tmp_mppmem;
+		}
+	}
+
+	/* mppwidth */
+	for (i = 0; i < job_ptr->job_resrcs->nhosts; i++) {
+		uint16_t node_tasks = job_ptr->job_resrcs->cpus[i] / mppdepth;
+
+		if (mppnppn && mppnppn < node_tasks)
+			node_tasks = mppnppn;
+		mppwidth += node_tasks;
+	}
+
+	snprintf(batch_id, sizeof(batch_id), "%u", job_ptr->job_id);
+	user = uid_to_string(job_ptr->user_id);
+	rc   = basil_reserve(user, batch_id, mppwidth, mppdepth, mppnppn,
+			     mppmem, ns_head, NULL);
+	xfree(user);
+	if (rc <= 0) {
+		/* errno value will be resolved by select_g_job_begin() */
+		errno = is_transient_error(rc) ? EAGAIN : ECONNABORTED;
+		return SLURM_ERROR;
+	}
+
+	resv_id	= rc;
+	if (_set_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_RESV_ID, &resv_id) != SLURM_SUCCESS) {
+		/*
+		 * This is a fatal error since it means we will not be able to
+		 * confirm the reservation; no step will be able to run in it.
+		 */
+		error("job %u: can not set resId %u", job_ptr->job_id, resv_id);
+		basil_release(resv_id);
+		return SLURM_ERROR;
+	}
+	if (mppmem)
+		job_ptr->details->pn_min_memory = mppmem | MEM_PER_CPU;
+
+	info("ALPS RESERVATION #%u, JobId %u: BASIL -n %d -N %d -d %d -m %d",
+	     resv_id, job_ptr->job_id, mppwidth, mppnppn, mppdepth, mppmem);
+
+	return SLURM_SUCCESS;
+}
+
+/**
+ * do_basil_confirm - confirm an existing BASIL reservation.
+ * This requires the alloc_sid to equal the session ID (getsid()) of the process
+ * executing the aprun/mpirun commands
+ * Returns: SLURM_SUCCESS if ok, READY_JOB_ERROR/FATAL on transient/fatal error.
+ */
+extern int do_basil_confirm(struct job_record *job_ptr)
+{
+	uint32_t resv_id;
+	uint64_t pagg_id;
+
+	if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_RESV_ID, &resv_id) != SLURM_SUCCESS) {
+		error("can not read resId for JobId=%u", job_ptr->job_id);
+	} else if (resv_id == 0) {
+		/* On Cray XT/XE, a reservation ID of 0 is always invalid. */
+		error("JobId=%u has invalid (ZERO) resId", job_ptr->job_id);
+	} else if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_PAGG_ID, &pagg_id) != SLURM_SUCCESS) {
+		error("can not read pagg ID for JobId=%u", job_ptr->job_id);
+	} else {
+		int rc;
+
+		if (pagg_id == 0) {
+#ifdef HAVE_REAL_CRAY
+			/* This fallback case is for interactive jobs only */
+			error("JobId %u has no pagg ID, falling back to SID",
+				job_ptr->job_id);
+#endif
+			pagg_id = job_ptr->alloc_sid;
+		}
+
+		rc = basil_confirm(resv_id, job_ptr->job_id, pagg_id);
+		if (rc == 0) {
+			debug2("confirmed ALPS resId %u for JobId %u, pagg "
+			       "%"PRIu64"", resv_id, job_ptr->job_id, pagg_id);
+			return SLURM_SUCCESS;
+		} else if (rc == -BE_NO_RESID) {
+			/*
+			 * If ALPS can not find the reservation ID we are trying
+			 * to confirm, it may be that the job has already been
+			 * canceled, or that the reservation has timed out after
+			 * waiting for the confirmation.
+			 * It is more likely that this error occurs on a per-job
+			 * basis, hence in this case do not drain frontend node.
+			 */
+			error("JobId %u has invalid ALPS resId %u - job "
+			      "already canceled?", job_ptr->job_id, resv_id);
+			return SLURM_SUCCESS;
+		} else {
+			error("confirming ALPS resId %u of JobId %u FAILED: %s",
+				resv_id, job_ptr->job_id, basil_strerror(rc));
+
+			if (is_transient_error(rc))
+				return READY_JOB_ERROR;
+		}
+	}
+	return READY_JOB_FATAL;
+}
+
+/**
+ * do_basil_signal  -  pass job signal on to any APIDs
+ * IN job_ptr - job to be signalled
+ * IN signal  - signal(7) number
+ * Only signal job if an ALPS reservation exists (non-0 reservation ID).
+ */
+extern int do_basil_signal(struct job_record *job_ptr, int signal)
+{
+	uint32_t resv_id;
+
+	if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_RESV_ID, &resv_id) != SLURM_SUCCESS) {
+		error("can not read resId for JobId=%u", job_ptr->job_id);
+	} else if (resv_id != 0) {
+		int rc = basil_signal_apids(resv_id, signal, NULL);
+
+		if (rc)
+			error("could not signal APIDs of resId %u: %s", resv_id,
+				basil_strerror(rc));
+	}
+	return SLURM_SUCCESS;
+}
+
+/**
+ * do_basil_release - release an (unconfirmed) BASIL reservation
+ * IN job_ptr - pointer to job which has just been deallocated resources
+ * RET see below
+ */
+extern int do_basil_release(struct job_record *job_ptr)
+{
+	uint32_t resv_id;
+
+	if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_RESV_ID, &resv_id) != SLURM_SUCCESS) {
+		error("can not read resId for JobId=%u", job_ptr->job_id);
+	} else if (resv_id && basil_release(resv_id) == 0) {
+		/* The resv_id is non-zero only if the job is or was running. */
+		debug("released ALPS resId %u for JobId %u",
+		      resv_id, job_ptr->job_id);
+	}
+	/*
+	 * Error handling: we only print out the errors (basil_release does this
+	 * internally), but do not signal error to select_g_job_fini(). Calling
+	 * contexts of this function (deallocate_nodes, batch_finish) only print
+	 * additional error text: no further action is taken at this stage.
+	 */
+	return SLURM_SUCCESS;
+}
+
+/**
+ * do_basil_switch - suspend/resume BASIL reservation
+ * IN job_ptr - pointer to job which has just been deallocated resources
+ * IN suspend - to suspend or not to suspend
+ * RET see below
+ */
+extern int do_basil_switch(struct job_record *job_ptr, bool suspend)
+{
+	uint32_t resv_id;
+
+	if (_get_select_jobinfo(job_ptr->select_jobinfo->data,
+			SELECT_JOBDATA_RESV_ID, &resv_id) != SLURM_SUCCESS) {
+		error("can not read resId for JobId=%u", job_ptr->job_id);
+	} else if (resv_id && basil_switch(resv_id, suspend) == 0) {
+		/* The resv_id is non-zero only if the job is or was running. */
+		debug("%s ALPS resId %u for JobId %u",
+		      suspend ? "Suspended" : "Resumed",
+		      resv_id, job_ptr->job_id);
+	}
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/select/cray/basil_interface.h b/src/plugins/select/cray/basil_interface.h
new file mode 100644
index 000000000..f4df5f9d4
--- /dev/null
+++ b/src/plugins/select/cray/basil_interface.h
@@ -0,0 +1,105 @@
+/*
+ * Interface between lower-level ALPS XML-RPC library functions and SLURM.
+ *
+ * Copyright (c) 2010-11 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under GPLv2.
+ */
+#ifndef __CRAY_BASIL_INTERFACE_H
+#define __CRAY_BASIL_INTERFACE_H
+
+#if HAVE_CONFIG_H
+# include "config.h"
+#endif	/* HAVE_CONFIG_H */
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
+#include "src/common/log.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
+#include "src/common/node_select.h"
+#include "src/slurmctld/slurmctld.h"
+
+extern int dim_size[3];
+
+/**
+ * struct select_jobinfo - data specific to Cray node selection plugin
+ * @magic:		magic number, must equal %JOBINFO_MAGIC
+ * @reservation_id:	ALPS reservation ID, assigned upon creation
+ * @confirm_cookie:	cluster-wide unique container identifier to
+ *			confirm the ALPS reservation. Should best use
+ *			SGI process aggregate IDs since session IDs
+ *			are not unique across multiple nodes.
+ * @other_jobinfo:	hook into attached, "other" node selection plugin.
+ */
+struct select_jobinfo {
+	uint16_t		magic;
+	uint32_t		reservation_id;
+	uint64_t		confirm_cookie;
+	select_jobinfo_t	*other_jobinfo;
+};
+#define JOBINFO_MAGIC		0x8cb3
+
+/**
+ * struct select_nodeinfo - data used for node information
+ * @magic:		magic number, must equal %NODEINFO_MAGIC
+ * @other_nodeinfo:	hook into attached, "other" node selection plugin.
+ */
+struct select_nodeinfo {
+	uint16_t		magic;
+	select_nodeinfo_t	*other_nodeinfo;
+};
+#define NODEINFO_MAGIC		0x82a3
+
+#ifdef HAVE_CRAY
+extern int basil_node_ranking(struct node_record *node_array, int node_cnt);
+extern int basil_inventory(void);
+extern int basil_geometry(struct node_record *node_ptr_array, int node_cnt);
+extern int do_basil_reserve(struct job_record *job_ptr);
+extern int do_basil_confirm(struct job_record *job_ptr);
+extern int do_basil_signal(struct job_record *job_ptr, int signal);
+extern int do_basil_release(struct job_record *job_ptr);
+extern int do_basil_switch(struct job_record *job_ptr, bool suspend);
+#else	/* !HAVE_CRAY */
+static inline int basil_node_ranking(struct node_record *ig, int nore)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int basil_inventory(void)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int basil_geometry(struct node_record *ig, int nore)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int do_basil_reserve(struct job_record *job_ptr)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int do_basil_confirm(struct job_record *job_ptr)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int do_basil_signal(struct job_record *job_ptr, int signal)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int do_basil_release(struct job_record *job_ptr)
+{
+	return SLURM_SUCCESS;
+}
+
+static inline int do_basil_switch(struct job_record *job_ptr, bool suspend)
+{
+	return SLURM_SUCCESS;
+}
+
+#endif	/* HAVE_CRAY */
+#endif	/* __CRAY_BASIL_INTERFACE_H */
diff --git a/src/plugins/select/cray/cray_config.c b/src/plugins/select/cray/cray_config.c
new file mode 100644
index 000000000..c11a14ca5
--- /dev/null
+++ b/src/plugins/select/cray/cray_config.c
@@ -0,0 +1,192 @@
+/*****************************************************************************\
+ *  cray_config.c
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC <http://www.schedmd.com>.
+ *  Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "cray_config.h"
+
+#include "src/common/slurm_xlator.h"	/* Must be first */
+#include "src/common/read_config.h"
+#include "src/common/parse_spec.h"
+#include "src/common/xstring.h"
+#include "src/common/xmalloc.h"
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+cray_config_t *cray_conf = NULL;
+
+s_p_options_t cray_conf_file_options[] = {
+	{"AlpsDir", S_P_STRING},	/* Vestigial option */
+	{"apbasil", S_P_STRING},
+	{"apkill", S_P_STRING},
+	{"SDBdb", S_P_STRING},
+	{"SDBhost", S_P_STRING},
+	{"SDBpass", S_P_STRING},
+	{"SDBport", S_P_UINT32},
+	{"SDBuser", S_P_STRING},
+	{"SyncTimeout", S_P_UINT32},
+	{NULL}
+};
+
+static char *_get_cray_conf(void)
+{
+	char *val = getenv("SLURM_CONF");
+	char *rc = NULL;
+	int i;
+
+	if (!val)
+		return xstrdup(CRAY_CONFIG_FILE);
+
+	/* Replace file name on end of path */
+	i = strlen(val) - strlen("slurm.conf") + strlen("cray.conf") + 1;
+	rc = xmalloc(i);
+	strcpy(rc, val);
+	val = strrchr(rc, (int)'/');
+	if (val)	/* absolute path */
+		val++;
+	else		/* not absolute path */
+		val = rc;
+	strcpy(val, "cray.conf");
+	return rc;
+}
+
+extern int create_config(void)
+{
+	int rc = SLURM_SUCCESS;
+	char* cray_conf_file = NULL;
+	static time_t last_config_update = (time_t) 0;
+	struct stat config_stat;
+	s_p_hashtbl_t *tbl = NULL;
+
+	if (cray_conf)
+		return SLURM_ERROR;
+
+	cray_conf = xmalloc(sizeof(cray_config_t));
+
+	cray_conf_file = _get_cray_conf();
+
+	if (stat(cray_conf_file, &config_stat) < 0) {
+		cray_conf->apbasil  = xstrdup(DEFAULT_APBASIL);
+		cray_conf->apkill   = xstrdup(DEFAULT_APKILL);
+		cray_conf->sdb_db   = xstrdup(DEFAULT_CRAY_SDB_DB);
+		cray_conf->sdb_host = xstrdup(DEFAULT_CRAY_SDB_HOST);
+		cray_conf->sdb_pass = xstrdup(DEFAULT_CRAY_SDB_PASS);
+		cray_conf->sdb_port = DEFAULT_CRAY_SDB_PORT;
+		cray_conf->sdb_user = xstrdup(DEFAULT_CRAY_SDB_USER);
+		cray_conf->sync_timeout = DEFAULT_CRAY_SYNC_TIMEOUT;
+		xfree(cray_conf_file);
+		goto end_it;
+	}
+	if (cray_conf->slurm_debug_flags & DEBUG_FLAG_SELECT_TYPE)
+		info("Reading the cray.conf file");
+	
+	if (last_config_update) {
+		if (last_config_update == config_stat.st_mtime) {
+			if (cray_conf->slurm_debug_flags
+			    & DEBUG_FLAG_SELECT_TYPE)
+				info("%s unchanged", cray_conf_file);
+		} else {
+			info("Restart slurmctld for %s changes "
+			     "to take effect",
+			     cray_conf_file);
+		}
+		last_config_update = config_stat.st_mtime;
+		xfree(cray_conf_file);
+		return SLURM_SUCCESS;
+	}
+
+	tbl = s_p_hashtbl_create(cray_conf_file_options);
+
+	if (s_p_parse_file(tbl, NULL, cray_conf_file, false) == SLURM_ERROR)
+		fatal("something wrong with opening/reading cray "
+		      "conf file");
+	xfree(cray_conf_file);
+
+	if (!s_p_get_string(&cray_conf->apbasil, "apbasil", tbl))
+		cray_conf->apbasil = xstrdup(DEFAULT_APBASIL);
+	if (!s_p_get_string(&cray_conf->apkill, "apkill", tbl))
+		cray_conf->apkill = xstrdup(DEFAULT_APKILL);
+
+	if (!s_p_get_string(&cray_conf->sdb_db, "SDBdb", tbl))
+		cray_conf->sdb_db = xstrdup(DEFAULT_CRAY_SDB_DB);
+	if (!s_p_get_string(&cray_conf->sdb_host, "SDBhost", tbl))
+		cray_conf->sdb_host = xstrdup(DEFAULT_CRAY_SDB_HOST);
+	if (!s_p_get_string(&cray_conf->sdb_pass, "SDBpass", tbl))
+		cray_conf->sdb_pass = xstrdup(DEFAULT_CRAY_SDB_PASS);
+	if (!s_p_get_uint32(&cray_conf->sdb_port, "SDBport", tbl))
+		cray_conf->sdb_port = DEFAULT_CRAY_SDB_PORT;
+	if (!s_p_get_string(&cray_conf->sdb_user, "SDBuser", tbl))
+		cray_conf->sdb_user = xstrdup(DEFAULT_CRAY_SDB_USER);
+	if (!s_p_get_uint32(&cray_conf->sync_timeout, "SyncTimeout", tbl))
+		cray_conf->sync_timeout = DEFAULT_CRAY_SYNC_TIMEOUT;
+
+	s_p_hashtbl_destroy(tbl);
+end_it:
+	cray_conf->slurm_debug_flags = slurmctld_conf.debug_flags;
+
+#if 0
+	info("Cray conf is...");
+	info("\tapbasil=\t%s", cray_conf->apbasil);
+	info("\tapkill=\t\t%s", cray_conf->apkill);
+	info("\tSDBdb=\t\t%s", cray_conf->sdb_db);
+	info("\tSDBhost=\t%s", cray_conf->sdb_host);
+	info("\tSDBpass=\t%s", cray_conf->sdb_pass);
+	info("\tSDBport=\t%u", cray_conf->sdb_port);
+	info("\tSDBuser=\t%s", cray_conf->sdb_user);
+	info("\tSyncTimeout=\t%u", cray_conf->sync_timeout);
+#endif
+	return rc;
+}
+
+extern int destroy_config(void)
+{
+	int rc = SLURM_SUCCESS;
+
+	if (cray_conf) {
+		xfree(cray_conf->apbasil);
+		xfree(cray_conf->apkill);
+		xfree(cray_conf->sdb_db);
+		xfree(cray_conf->sdb_host);
+		xfree(cray_conf->sdb_pass);
+		xfree(cray_conf->sdb_user);
+		xfree(cray_conf);
+	}
+
+	return rc;
+}
diff --git a/src/plugins/select/cray/cray_config.h b/src/plugins/select/cray/cray_config.h
new file mode 100644
index 000000000..bc474cc2d
--- /dev/null
+++ b/src/plugins/select/cray/cray_config.h
@@ -0,0 +1,97 @@
+/*****************************************************************************\
+ *  cray_config.h
+ *
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC <http://www.schedmd.com>.
+ *  Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _CRAY_CONFIG_H_
+#define _CRAY_CONFIG_H_
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include "slurm/slurm.h"
+
+/* Location of ALPS apbasil executable (supported on XT/XE CNL) */
+#define DEFAULT_APBASIL           "/usr/bin/apbasil"
+/* Location of ALPS apkill executable (supported on XT/XE CNL) */
+#define DEFAULT_APKILL            "/usr/bin/apkill"
+/* database name to use  */
+#define DEFAULT_CRAY_SDB_DB       "XTAdmin"
+/* DNS name of SDB host */
+#define DEFAULT_CRAY_SDB_HOST     "sdb"
+/* If NULL, use value from my.cnf */
+#define DEFAULT_CRAY_SDB_PASS     NULL
+/* If NULL, use value from my.cnf */
+#define DEFAULT_CRAY_SDB_PORT     0
+/* If NULL, use value from my.cnf */
+#define DEFAULT_CRAY_SDB_USER     NULL
+/* Default maximum delay for ALPS and SLURM to synchronize. Do not schedule
+ * jobs while out of sync until this time is reached (seconds) */
+#define DEFAULT_CRAY_SYNC_TIMEOUT 3600
+
+/**
+ * cray_config_t - Parsed representation of cray.conf
+ * @apbasil:	full path to ALPS 'apbasil' executable
+ * @apkill:	full path to ALPS 'apkill' executable
+ * @sdb_host:	DNS name of SDB host
+ * @sdb_db:	SDB database name to use (default XTAdmin)
+ * @sdb_user:	SDB database username
+ * @sdb_pass:	SDB database password
+ * @sdb_port:	port number of SDB host
+ * @slurm_debug_flags: see code for details
+ * @sync_timeout: seconds to wait for ALPS and SLURM to sync without scheduling
+ *                jobs
+ */
+typedef struct {
+	char		*apbasil;
+	char		*apkill;
+
+	char		*sdb_host;
+	char		*sdb_db;
+	char		*sdb_user;
+	char		*sdb_pass;
+	uint32_t	sdb_port;
+	uint32_t	slurm_debug_flags;
+	uint32_t	sync_timeout;
+} cray_config_t;
+
+extern cray_config_t *cray_conf;
+
+extern int create_config(void);
+extern int destroy_config(void);
+
+#endif
diff --git a/src/plugins/select/cray/libalps/Makefile.am b/src/plugins/select/cray/libalps/Makefile.am
new file mode 100644
index 000000000..1f73a513b
--- /dev/null
+++ b/src/plugins/select/cray/libalps/Makefile.am
@@ -0,0 +1,29 @@
+# Makefile for building Cray/Basil XML-RPC low-level interface
+
+AUTOMAKE_OPTIONS = foreign
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I. -I../
+
+noinst_LTLIBRARIES = libalps.la
+
+libalps_la_SOURCES =		\
+	../basil_alps.h		\
+	parser_internal.h	\
+	basil_mysql_routines.c	\
+	parser_common.c		\
+	parser_basil_1.0.c	\
+	parser_basil_1.1.c	\
+	parser_basil_3.1.c	\
+	parser_basil_4.0.c	\
+	basil_request.c		\
+	do_query.c		\
+	do_reserve.c		\
+	do_release.c		\
+	do_confirm.c		\
+	do_switch.c		\
+	memory_handling.c	\
+	popen2.c		\
+	atoul.c
+libalps_la_CFLAGS  = $(MYSQL_CFLAGS)
+libalps_la_LIBADD  = $(MYSQL_LIBS) -lexpat
+libalps_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
diff --git a/src/plugins/select/cray/libalps/Makefile.in b/src/plugins/select/cray/libalps/Makefile.in
new file mode 100644
index 000000000..312b6181d
--- /dev/null
+++ b/src/plugins/select/cray/libalps/Makefile.in
@@ -0,0 +1,741 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for building Cray/Basil XML-RPC low-level interface
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/select/cray/libalps
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+am__DEPENDENCIES_1 =
+libalps_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am_libalps_la_OBJECTS = libalps_la-basil_mysql_routines.lo \
+	libalps_la-parser_common.lo libalps_la-parser_basil_1.0.lo \
+	libalps_la-parser_basil_1.1.lo libalps_la-parser_basil_3.1.lo \
+	libalps_la-parser_basil_4.0.lo libalps_la-basil_request.lo \
+	libalps_la-do_query.lo libalps_la-do_reserve.lo \
+	libalps_la-do_release.lo libalps_la-do_confirm.lo \
+	libalps_la-do_switch.lo libalps_la-memory_handling.lo \
+	libalps_la-popen2.lo libalps_la-atoul.lo
+libalps_la_OBJECTS = $(am_libalps_la_OBJECTS)
+libalps_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(libalps_la_CFLAGS) \
+	$(CFLAGS) $(libalps_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libalps_la_SOURCES)
+DIST_SOURCES = $(libalps_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I. -I../
+noinst_LTLIBRARIES = libalps.la
+libalps_la_SOURCES = \
+	../basil_alps.h		\
+	parser_internal.h	\
+	basil_mysql_routines.c	\
+	parser_common.c		\
+	parser_basil_1.0.c	\
+	parser_basil_1.1.c	\
+	parser_basil_3.1.c	\
+	parser_basil_4.0.c	\
+	basil_request.c		\
+	do_query.c		\
+	do_reserve.c		\
+	do_release.c		\
+	do_confirm.c		\
+	do_switch.c		\
+	memory_handling.c	\
+	popen2.c		\
+	atoul.c
+
+libalps_la_CFLAGS = $(MYSQL_CFLAGS)
+libalps_la_LIBADD = $(MYSQL_LIBS) -lexpat
+libalps_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/cray/libalps/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/select/cray/libalps/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libalps.la: $(libalps_la_OBJECTS) $(libalps_la_DEPENDENCIES) 
+	$(libalps_la_LINK)  $(libalps_la_OBJECTS) $(libalps_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-atoul.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-basil_mysql_routines.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-basil_request.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-do_confirm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-do_query.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-do_release.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-do_reserve.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-do_switch.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-memory_handling.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-parser_basil_1.0.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-parser_basil_1.1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-parser_basil_3.1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-parser_basil_4.0.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-parser_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-popen2.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+libalps_la-basil_mysql_routines.lo: basil_mysql_routines.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-basil_mysql_routines.lo -MD -MP -MF $(DEPDIR)/libalps_la-basil_mysql_routines.Tpo -c -o libalps_la-basil_mysql_routines.lo `test -f 'basil_mysql_routines.c' || echo '$(srcdir)/'`basil_mysql_routines.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-basil_mysql_routines.Tpo $(DEPDIR)/libalps_la-basil_mysql_routines.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='basil_mysql_routines.c' object='libalps_la-basil_mysql_routines.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-basil_mysql_routines.lo `test -f 'basil_mysql_routines.c' || echo '$(srcdir)/'`basil_mysql_routines.c
+
+libalps_la-parser_common.lo: parser_common.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-parser_common.lo -MD -MP -MF $(DEPDIR)/libalps_la-parser_common.Tpo -c -o libalps_la-parser_common.lo `test -f 'parser_common.c' || echo '$(srcdir)/'`parser_common.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-parser_common.Tpo $(DEPDIR)/libalps_la-parser_common.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='parser_common.c' object='libalps_la-parser_common.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-parser_common.lo `test -f 'parser_common.c' || echo '$(srcdir)/'`parser_common.c
+
+libalps_la-parser_basil_1.0.lo: parser_basil_1.0.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-parser_basil_1.0.lo -MD -MP -MF $(DEPDIR)/libalps_la-parser_basil_1.0.Tpo -c -o libalps_la-parser_basil_1.0.lo `test -f 'parser_basil_1.0.c' || echo '$(srcdir)/'`parser_basil_1.0.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-parser_basil_1.0.Tpo $(DEPDIR)/libalps_la-parser_basil_1.0.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='parser_basil_1.0.c' object='libalps_la-parser_basil_1.0.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-parser_basil_1.0.lo `test -f 'parser_basil_1.0.c' || echo '$(srcdir)/'`parser_basil_1.0.c
+
+libalps_la-parser_basil_1.1.lo: parser_basil_1.1.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-parser_basil_1.1.lo -MD -MP -MF $(DEPDIR)/libalps_la-parser_basil_1.1.Tpo -c -o libalps_la-parser_basil_1.1.lo `test -f 'parser_basil_1.1.c' || echo '$(srcdir)/'`parser_basil_1.1.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-parser_basil_1.1.Tpo $(DEPDIR)/libalps_la-parser_basil_1.1.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='parser_basil_1.1.c' object='libalps_la-parser_basil_1.1.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-parser_basil_1.1.lo `test -f 'parser_basil_1.1.c' || echo '$(srcdir)/'`parser_basil_1.1.c
+
+libalps_la-parser_basil_3.1.lo: parser_basil_3.1.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-parser_basil_3.1.lo -MD -MP -MF $(DEPDIR)/libalps_la-parser_basil_3.1.Tpo -c -o libalps_la-parser_basil_3.1.lo `test -f 'parser_basil_3.1.c' || echo '$(srcdir)/'`parser_basil_3.1.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-parser_basil_3.1.Tpo $(DEPDIR)/libalps_la-parser_basil_3.1.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='parser_basil_3.1.c' object='libalps_la-parser_basil_3.1.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-parser_basil_3.1.lo `test -f 'parser_basil_3.1.c' || echo '$(srcdir)/'`parser_basil_3.1.c
+
+libalps_la-parser_basil_4.0.lo: parser_basil_4.0.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-parser_basil_4.0.lo -MD -MP -MF $(DEPDIR)/libalps_la-parser_basil_4.0.Tpo -c -o libalps_la-parser_basil_4.0.lo `test -f 'parser_basil_4.0.c' || echo '$(srcdir)/'`parser_basil_4.0.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-parser_basil_4.0.Tpo $(DEPDIR)/libalps_la-parser_basil_4.0.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='parser_basil_4.0.c' object='libalps_la-parser_basil_4.0.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-parser_basil_4.0.lo `test -f 'parser_basil_4.0.c' || echo '$(srcdir)/'`parser_basil_4.0.c
+
+libalps_la-basil_request.lo: basil_request.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-basil_request.lo -MD -MP -MF $(DEPDIR)/libalps_la-basil_request.Tpo -c -o libalps_la-basil_request.lo `test -f 'basil_request.c' || echo '$(srcdir)/'`basil_request.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-basil_request.Tpo $(DEPDIR)/libalps_la-basil_request.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='basil_request.c' object='libalps_la-basil_request.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-basil_request.lo `test -f 'basil_request.c' || echo '$(srcdir)/'`basil_request.c
+
+libalps_la-do_query.lo: do_query.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-do_query.lo -MD -MP -MF $(DEPDIR)/libalps_la-do_query.Tpo -c -o libalps_la-do_query.lo `test -f 'do_query.c' || echo '$(srcdir)/'`do_query.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-do_query.Tpo $(DEPDIR)/libalps_la-do_query.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='do_query.c' object='libalps_la-do_query.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-do_query.lo `test -f 'do_query.c' || echo '$(srcdir)/'`do_query.c
+
+libalps_la-do_reserve.lo: do_reserve.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-do_reserve.lo -MD -MP -MF $(DEPDIR)/libalps_la-do_reserve.Tpo -c -o libalps_la-do_reserve.lo `test -f 'do_reserve.c' || echo '$(srcdir)/'`do_reserve.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-do_reserve.Tpo $(DEPDIR)/libalps_la-do_reserve.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='do_reserve.c' object='libalps_la-do_reserve.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-do_reserve.lo `test -f 'do_reserve.c' || echo '$(srcdir)/'`do_reserve.c
+
+libalps_la-do_release.lo: do_release.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-do_release.lo -MD -MP -MF $(DEPDIR)/libalps_la-do_release.Tpo -c -o libalps_la-do_release.lo `test -f 'do_release.c' || echo '$(srcdir)/'`do_release.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-do_release.Tpo $(DEPDIR)/libalps_la-do_release.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='do_release.c' object='libalps_la-do_release.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-do_release.lo `test -f 'do_release.c' || echo '$(srcdir)/'`do_release.c
+
+libalps_la-do_confirm.lo: do_confirm.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-do_confirm.lo -MD -MP -MF $(DEPDIR)/libalps_la-do_confirm.Tpo -c -o libalps_la-do_confirm.lo `test -f 'do_confirm.c' || echo '$(srcdir)/'`do_confirm.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-do_confirm.Tpo $(DEPDIR)/libalps_la-do_confirm.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='do_confirm.c' object='libalps_la-do_confirm.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-do_confirm.lo `test -f 'do_confirm.c' || echo '$(srcdir)/'`do_confirm.c
+
+libalps_la-do_switch.lo: do_switch.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-do_switch.lo -MD -MP -MF $(DEPDIR)/libalps_la-do_switch.Tpo -c -o libalps_la-do_switch.lo `test -f 'do_switch.c' || echo '$(srcdir)/'`do_switch.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-do_switch.Tpo $(DEPDIR)/libalps_la-do_switch.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='do_switch.c' object='libalps_la-do_switch.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-do_switch.lo `test -f 'do_switch.c' || echo '$(srcdir)/'`do_switch.c
+
+libalps_la-memory_handling.lo: memory_handling.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-memory_handling.lo -MD -MP -MF $(DEPDIR)/libalps_la-memory_handling.Tpo -c -o libalps_la-memory_handling.lo `test -f 'memory_handling.c' || echo '$(srcdir)/'`memory_handling.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-memory_handling.Tpo $(DEPDIR)/libalps_la-memory_handling.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='memory_handling.c' object='libalps_la-memory_handling.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-memory_handling.lo `test -f 'memory_handling.c' || echo '$(srcdir)/'`memory_handling.c
+
+libalps_la-popen2.lo: popen2.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-popen2.lo -MD -MP -MF $(DEPDIR)/libalps_la-popen2.Tpo -c -o libalps_la-popen2.lo `test -f 'popen2.c' || echo '$(srcdir)/'`popen2.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-popen2.Tpo $(DEPDIR)/libalps_la-popen2.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='popen2.c' object='libalps_la-popen2.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-popen2.lo `test -f 'popen2.c' || echo '$(srcdir)/'`popen2.c
+
+libalps_la-atoul.lo: atoul.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-atoul.lo -MD -MP -MF $(DEPDIR)/libalps_la-atoul.Tpo -c -o libalps_la-atoul.lo `test -f 'atoul.c' || echo '$(srcdir)/'`atoul.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-atoul.Tpo $(DEPDIR)/libalps_la-atoul.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='atoul.c' object='libalps_la-atoul.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-atoul.lo `test -f 'atoul.c' || echo '$(srcdir)/'`atoul.c
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/select/cray/libalps/atoul.c b/src/plugins/select/cray/libalps/atoul.c
new file mode 100644
index 000000000..4eb7a09e0
--- /dev/null
+++ b/src/plugins/select/cray/libalps/atoul.c
@@ -0,0 +1,54 @@
+/*
+ * String utilities
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#define _ISOC99_SOURCE		/* for LLONG_{MIN,MAX} */
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <errno.h>
+
+/**
+ * atou64 - Convert string into u64
+ * Returns 1 if ok, < 0 on error.
+ */
+int atou64(const char *str, uint64_t *val)
+{
+	char *endptr;
+
+	errno = 0;				/* strtol() manpage */
+	*val  = strtoull(str, &endptr, 0);
+	if ((errno == ERANGE && *val == ULLONG_MAX) ||
+	    (errno != 0 && *val == 0)	||	/* other error */
+	    endptr == str		||	/* no digits */
+	    *endptr != '\0')			/* junk at end */
+		return -1;
+	return 1;
+}
+
+int atou32(const char *str, uint32_t *val)
+{
+	uint64_t tmp;
+
+	if (!atou64(str, &tmp) || tmp > 0xFFFFffffUL)
+		return -1;
+	*val = tmp;
+	return 1;
+}
+
+/*
+ * POSIX says time_t can be integer or floating type.
+ * On x86_32 it is an u32, on x86_64 it is an u64 type.
+ */
+int atotime_t(const char *str, time_t *val)
+{
+	uint64_t tmp;
+
+	if (!atou64(str, &tmp))
+		return -1;
+	*val = tmp;
+	return 1;
+}
diff --git a/src/plugins/select/cray/libalps/basil_mysql_routines.c b/src/plugins/select/cray/libalps/basil_mysql_routines.c
new file mode 100644
index 000000000..0261426e4
--- /dev/null
+++ b/src/plugins/select/cray/libalps/basil_mysql_routines.c
@@ -0,0 +1,300 @@
+/*
+ * Database interaction routines for Cray XT/XE systems.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "../basil_alps.h"
+
+/** Read options from the appropriate my.cnf configuration file. */
+static int cray_get_options_from_default_conf(MYSQL *handle)
+{
+	const char **path;
+	/*
+	 * Hardcoded list of paths my.cnf is known to exist on a Cray XT/XE
+	 */
+	const char *default_conf_paths[] = {
+		"/etc/my.cnf",
+		"/etc/opt/cray/MySQL/my.cnf",
+		"/etc/mysql/my.cnf",
+		NULL
+	};
+
+	for (path = default_conf_paths; *path; path++)
+		if (access(*path, R_OK) == 0)
+			break;
+	if (*path == NULL)
+		fatal("no readable 'my.cnf' found");
+	return  mysql_options(handle, MYSQL_READ_DEFAULT_FILE, *path);
+}
+
+/**
+ * cray_connect_sdb - Connect to the XTAdmin database on the SDB host
+ */
+extern MYSQL *cray_connect_sdb(void)
+{
+	MYSQL *handle = mysql_init(NULL);
+
+	if (handle == NULL)
+		return NULL;
+
+	if (cray_get_options_from_default_conf(handle) != 0) {
+		error("can not get options from configuration file (%u) - %s",
+		      mysql_errno(handle), mysql_error(handle));
+		goto connect_failed;
+	}
+
+	if (mysql_real_connect(handle, cray_conf->sdb_host, cray_conf->sdb_user,
+			       cray_conf->sdb_pass, cray_conf->sdb_db,
+			       cray_conf->sdb_port, NULL, 0) == NULL) {
+		error("can not connect to %s.%s (%u) - %s", cray_conf->sdb_host,
+		      cray_conf->sdb_db, mysql_errno(handle),
+		      mysql_error(handle));
+		goto connect_failed;
+	}
+
+	return handle;
+
+connect_failed:
+	mysql_close(handle);
+	return NULL;
+}
+
+/**
+ * cray_is_gemini_system -  Figure out whether SeaStar (XT) or Gemini (XE)
+ * @handle:	connected to sdb.XTAdmin database
+ * Returns
+ * -1 on error
+ *  1 if on a Gemini system
+ *  0 if on a SeaStar system
+ */
+int cray_is_gemini_system(MYSQL *handle)
+{
+	/*
+	 * Rationale:
+	 * - XT SeaStar systems have one SeaStar ASIC per node.
+	 *   There are 4 nodes and 4 SeaStar ASICS on each blade, giving
+	 *   4 distinct (X,Y,Z) coordinates per blade, so that the total
+	 *   node count equals the total count of torus coordinates.
+	 * - XE Gemini systems connect pairs of nodes to a Gemini chip.
+	 *   There are 4 nodes on a blade and 2 Gemini chips. Nodes 0/1
+	 *   are connected to Gemini chip 0, nodes 2/3 are connected to
+	 *   Gemini chip 1. This configuration acts as if the nodes were
+	 *   internally joined in Y dimension; hence there are half as
+	 *   many (X,Y,Z) coordinates than there are nodes in the system.
+	 * - Coordinates may be NULL if a network chip is deactivated.
+	 */
+	const char query[] =
+		"SELECT COUNT(DISTINCT x_coord, y_coord, z_coord) < COUNT(*) "
+		"FROM processor "
+		"WHERE x_coord IS NOT NULL "
+		"AND   y_coord IS NOT NULL "
+		"AND   z_coord IS NOT NULL";
+	MYSQL_BIND	result[1];
+	signed char	answer;
+	my_bool		is_null;
+	my_bool		is_error;
+	MYSQL_STMT	*stmt;
+
+	memset(result, 0, sizeof(result));
+	result[0].buffer_type	= MYSQL_TYPE_TINY;
+	result[0].buffer	= (char *)&answer;
+	result[0].is_null	= &is_null;
+	result[0].error		= &is_error;
+
+	stmt = prepare_stmt(handle, query, NULL, 0, result, 1);
+	if (stmt == NULL)
+		return -1;
+	if (exec_stmt(stmt, query, result, 1) < 0)
+		answer = -1;
+	mysql_stmt_close(stmt);
+	return answer;
+}
+
+/*
+ *	Auxiliary routines for using prepared statements
+ */
+
+/**
+ * validate_stmt_column_count - Validate column count of prepared statement
+ * @stmt:	 prepared statement
+ * @query:	 query text
+ * @expect_cols: expected number of columns
+ * Return true if ok.
+ */
+static bool validate_stmt_column_count(MYSQL_STMT *stmt, const char *query,
+				       unsigned long expect_cols)
+{
+	unsigned long	column_count;
+	MYSQL_RES	*result_metadata = mysql_stmt_result_metadata(stmt);
+
+	/* Fetch result-set meta information */
+	if (!result_metadata) {
+		error("can not obtain statement meta "
+		      "information for \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		return false;
+	}
+
+	/* Check total column count of query */
+	column_count = mysql_num_fields(result_metadata);
+	if (column_count != expect_cols) {
+		error("expected %lu columns for \"%s\", but got %lu",
+		      expect_cols, query, column_count);
+		mysql_free_result(result_metadata);
+		return false;
+	}
+
+	/* Free the prepared result metadata */
+	mysql_free_result(result_metadata);
+
+	return true;
+}
+
+/**
+ * prepare_stmt - Initialize and prepare a query statement.
+ * @handle:	connected handle
+ * @query:	query statement string to execute
+ * @bind_parm:  values for unbound variables (parameters) in @query
+ * @nparams:	length of @bind_parms
+ * @bind_col:	typed array to contain the column results
+ *		==> non-NULL 'is_null'/'error' fields are taken to mean
+ *		    that NULL values/errors are not acceptable
+ * @ncols:	number of expected columns (length of @bind_col)
+ * Return prepared statement handle on success, NULL on error.
+ */
+MYSQL_STMT *prepare_stmt(MYSQL *handle, const char *query,
+			 MYSQL_BIND bind_parm[], unsigned long nparams,
+			 MYSQL_BIND bind_col[], unsigned long ncols)
+{
+	MYSQL_STMT	*stmt;
+	unsigned long	param_count;
+
+	if (query == NULL || *query == '\0')
+		return NULL;
+
+	/* Initialize statement (fails only if out of memory). */
+	stmt = mysql_stmt_init(handle);
+	if (stmt == NULL) {
+		error("can not allocate handle for \"%s\"", query);
+		return NULL;
+	}
+
+	if (mysql_stmt_prepare(stmt, query, strlen(query))) {
+		error("can not prepare statement \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		goto prepare_failed;
+	}
+
+	/* Verify the parameter count */
+	param_count = mysql_stmt_param_count(stmt);
+	if (nparams != nparams) {
+		error("expected %lu parameters for \"%s\" but got %lu",
+		      nparams, query, param_count);
+		goto prepare_failed;
+	}
+
+	if (!validate_stmt_column_count(stmt, query, ncols))
+		goto prepare_failed;
+
+	if (nparams && mysql_stmt_bind_param(stmt, bind_parm)) {
+		error("can not bind parameter buffers for \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		goto prepare_failed;
+	}
+
+	if (mysql_stmt_bind_result(stmt, bind_col)) {
+		error("can not bind output buffers for \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		goto prepare_failed;
+	}
+
+	return stmt;
+
+prepare_failed:
+	(void)mysql_stmt_close(stmt);
+	return NULL;
+}
+
+/**
+ * store_stmt_results - Buffer all results of a query on the client
+ * Returns -1 on error, number_of_rows >= 0 if ok.
+ */
+static int store_stmt_results(MYSQL_STMT *stmt, const char *query,
+			      MYSQL_BIND bind_col[], unsigned long ncols)
+{
+	my_ulonglong nrows;
+	int i;
+
+	if (stmt == NULL || ncols == 0)
+		return -1;
+
+	if (mysql_stmt_store_result(stmt)) {
+		error("can not store query result for \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		return -1;
+	}
+
+	nrows = mysql_stmt_affected_rows(stmt);
+	if (nrows == (my_ulonglong)-1) {
+		error("query \"%s\" returned an error: %s",
+		      query, mysql_stmt_error(stmt));
+		return -1;
+	}
+
+	while (mysql_stmt_fetch(stmt) == 0)
+		for (i = 0; i < ncols; i++) {
+			if (bind_col[i].error && *bind_col[i].error)  {
+				error("result value in column %d truncated: %s",
+				      i, mysql_stmt_error(stmt));
+				return -1;
+			}
+		}
+
+	/* Seek back to begin of data set */
+	mysql_stmt_data_seek(stmt, 0);
+
+	return nrows;
+}
+
+/**
+ * exec_stmt - Execute, store and validate a prepared statement
+ * @query:	query text
+ * @bind_col:	as in prepare_stmt()
+ * @ncols:	as in prepare_stmt()
+ * Returns -1 on error, number_of_rows >= 0 if ok.
+ */
+int exec_stmt(MYSQL_STMT *stmt, const char *query,
+	      MYSQL_BIND bind_col[], unsigned long ncols)
+{
+	if (mysql_stmt_execute(stmt)) {
+		error("failed to execute \"%s\": %s",
+		      query, mysql_stmt_error(stmt));
+		return -1;
+	}
+	return store_stmt_results(stmt, query, bind_col, ncols);
+}
+
+/**
+ * fetch_stmt - return the next row in the result set.
+ * Returns 1 on error,  0 if ok.
+ */
+int fetch_stmt(MYSQL_STMT *stmt)
+{
+	return mysql_stmt_fetch(stmt);
+}
+
+my_bool free_stmt_result(MYSQL_STMT *stmt)
+{
+	return mysql_stmt_free_result(stmt);
+}
+
+my_bool stmt_close(MYSQL_STMT *stmt)
+{
+	return mysql_stmt_close(stmt);
+}
+
+void cray_close_sdb(MYSQL *handle)
+{
+	mysql_close(handle);
+}
diff --git a/src/plugins/select/cray/libalps/basil_request.c b/src/plugins/select/cray/libalps/basil_request.c
new file mode 100644
index 000000000..13c238d78
--- /dev/null
+++ b/src/plugins/select/cray/libalps/basil_request.c
@@ -0,0 +1,184 @@
+/*
+ * Fork apbasil process as co-process, parse output.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Portions Copyright (C) 2011 SchedMD <http://www.schedmd.com>.
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+
+
+static void _rsvn_write_reserve_xml(FILE *fp, struct basil_reservation *r)
+{
+	struct basil_rsvn_param *param;
+
+	fprintf(fp, " <ReserveParamArray user_name=\"%s\"", r->user_name);
+	if (*r->batch_id != '\0')
+		fprintf(fp, " batch_id=\"%s\"", r->batch_id);
+	if (*r->account_name != '\0')
+		fprintf(fp, " account_name=\"%s\"", r->account_name);
+	fprintf(fp, ">\n");
+
+	for (param = r->params; param; param = param->next) {
+		fprintf(fp, "  <ReserveParam architecture=\"%s\" "
+			"width=\"%ld\" depth=\"%ld\" nppn=\"%ld\"",
+			nam_arch[param->arch],
+			param->width, param->depth, param->nppn);
+
+		if (param->memory || param->labels ||
+		    param->nodes  || param->accel) {
+			fprintf(fp, ">\n");
+		} else {
+			fprintf(fp, "/>\n");
+			continue;
+		}
+
+		if (param->memory) {
+			struct basil_memory_param  *mem;
+
+			fprintf(fp, "   <MemoryParamArray>\n");
+			for (mem = param->memory; mem; mem = mem->next)
+				fprintf(fp, "    <MemoryParam type=\"%s\""
+					" size_mb=\"%u\"/>\n",
+					nam_memtype[mem->type],
+					mem->size_mb ? : 1);
+			fprintf(fp, "   </MemoryParamArray>\n");
+		}
+
+		if (param->labels) {
+			struct basil_label *label;
+
+			fprintf(fp, "   <LabelParamArray>\n");
+			for (label = param->labels; label; label = label->next)
+				fprintf(fp, "    <LabelParam name=\"%s\""
+					" type=\"%s\" disposition=\"%s\"/>\n",
+					label->name, nam_labeltype[label->type],
+					nam_ldisp[label->disp]);
+
+			fprintf(fp, "   </LabelParamArray>\n");
+		}
+
+		if (param->nodes && *param->nodes) {
+			/*
+			 * The NodeParamArray is declared within ReserveParam.
+			 * If the list is spread out over multiple NodeParam
+			 * elements, an
+			 *   "at least one command's user NID list is short"
+			 * error results. Hence more than 1 NodeParam element
+			 * is probably only meant to be used when suggesting
+			 * alternative node lists to ALPS. This was confirmed
+			 * by repeating an identical same NodeParam 20 times,
+			 * which had the same effect as supplying it once.
+			 * Hence the array expression is actually not needed.
+			 */
+			fprintf(fp, "   <NodeParamArray>\n"
+				    "    <NodeParam>%s</NodeParam>\n"
+				    "   </NodeParamArray>\n", param->nodes);
+		}
+
+		if (param->accel) {
+			struct basil_accel_param *accel;
+
+			fprintf(fp, "   <AccelParamArray>\n");
+			for (accel = param->accel; accel; accel = accel->next) {
+				fprintf(fp, "    <AccelParam type=\"%s\"",
+					nam_acceltype[accel->type]);
+
+				if (accel->memory_mb)
+					fprintf(fp, " memory_mb=\"%u\"",
+						accel->memory_mb);
+				fprintf(fp, "/>\n");
+			}
+			fprintf(fp, "   </AccelParamArray>\n");
+		}
+
+		fprintf(fp, "  </ReserveParam>\n");
+	}
+	fprintf(fp, " </ReserveParamArray>\n"
+		    "</BasilRequest>\n");
+}
+
+/*
+ * basil_request - issue BASIL request and parse response
+ * @bp:	method-dependent parse data to guide the parsing process
+ *
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ */
+int basil_request(struct basil_parse_data *bp)
+{
+	int to_child, from_child;
+	int ec, rc = -BE_UNKNOWN;
+	FILE *apbasil;
+	pid_t pid;
+
+	if (!cray_conf->apbasil) {
+		error("No alps client defined");
+		return 0;
+	}
+	assert(bp->version < BV_MAX);
+	assert(bp->method > BM_none && bp->method < BM_MAX);
+
+	pid = popen2(cray_conf->apbasil, &to_child, &from_child, true);
+	if (pid < 0)
+		fatal("popen2(\"%s\", ...)", cray_conf->apbasil);
+
+	/* write out request */
+	apbasil = fdopen(to_child, "w");
+	if (apbasil == NULL)
+		fatal("fdopen(): %s", strerror(errno));
+	setlinebuf(apbasil);
+
+	fprintf(apbasil, "<?xml version=\"1.0\"?>\n"
+		"<BasilRequest protocol=\"%s\" method=\"%s\" ",
+		bv_names[bp->version], bm_names[bp->method]);
+
+	switch (bp->method) {
+	case BM_engine:
+		fprintf(apbasil, "type=\"ENGINE\"/>");
+		break;
+	case BM_inventory:
+		fprintf(apbasil, "type=\"INVENTORY\"/>");
+		break;
+	case BM_reserve:
+		fprintf(apbasil, ">\n");
+		_rsvn_write_reserve_xml(apbasil, bp->mdata.res);
+		break;
+	case BM_confirm:
+		if (bp->version == BV_1_0 && *bp->mdata.res->batch_id != '\0')
+			fprintf(apbasil, "job_name=\"%s\" ",
+				bp->mdata.res->batch_id);
+		fprintf(apbasil, "reservation_id=\"%u\" %s=\"%llu\"/>\n",
+			bp->mdata.res->rsvn_id,
+			bp->version >= BV_3_1 ? "pagg_id" : "admin_cookie",
+			(unsigned long long)bp->mdata.res->pagg_id);
+		break;
+	case BM_release:
+		fprintf(apbasil, "reservation_id=\"%u\"/>\n",
+			bp->mdata.res->rsvn_id);
+		break;
+	case BM_switch:
+	{
+		char *suspend = bp->mdata.res->suspended ? "OUT" : "IN";
+		fprintf(apbasil, ">\n");
+		fprintf(apbasil, " <ReservationArray>\n");
+		fprintf(apbasil, "  <Reservation reservation_id=\"%u\" "
+			"action=\"%s\"/>\n",
+			bp->mdata.res->rsvn_id, suspend);
+		fprintf(apbasil, " </ReservationArray>\n");
+		fprintf(apbasil, "</BasilRequest>\n");
+	}
+		break;
+	default: /* ignore BM_none, BM_MAX, and BM_UNKNOWN covered above */
+		break;
+	}
+
+	if (fclose(apbasil) < 0)	/* also closes to_child */
+		error("fclose(apbasil): %s", strerror(errno));
+
+	rc = parse_basil(bp, from_child);
+	ec = wait_for_child(pid);
+	if (ec)
+		error("%s child process for BASIL %s method exited with %d",
+		      cray_conf->apbasil, bm_names[bp->method], ec);
+	return rc;
+}
diff --git a/src/plugins/select/cray/libalps/do_confirm.c b/src/plugins/select/cray/libalps/do_confirm.c
new file mode 100644
index 000000000..5e75cefcc
--- /dev/null
+++ b/src/plugins/select/cray/libalps/do_confirm.c
@@ -0,0 +1,46 @@
+/*
+ * Implements the Basil CONFIRM method for partition reservations.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "../basil_alps.h"
+
+static int rsvn_confirm(struct basil_reservation *res, uint64_t pagg_id)
+{
+	struct basil_parse_data bp = {0};
+
+	bp.method    = BM_confirm;
+	res->pagg_id = pagg_id;
+	bp.mdata.res = res;
+	bp.version   = BV_1_0;
+	/*
+	 * Rule:
+	 * - if *res->batch_id == '\0' we are not using Basil 1.0
+	 * - else we use Basil 1.0 to set the 'job_name'
+	 */
+	if (*res->batch_id == '\0')
+		bp.version = get_basil_version();
+
+	return basil_request(&bp);
+}
+
+/**
+ * basil_confirm - confirm an existing reservation
+ * @rsvn_id:    the reservation id
+ * @job_id:	job ID or -1 (see note below)
+ * @pagg_id:	SID or CSA PAGG ID of the shell process executing the job script
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ *
+ * NOTE: @job_id is only meaningful for confirmation of Basil 1.0 jobs.
+ *       Basil 1.1 jobs can register the batch_id when creating the reservation.
+ */
+int basil_confirm(uint32_t rsvn_id, int job_id, uint64_t pagg_id)
+{
+	struct basil_reservation rsvn = {0};
+
+	rsvn.rsvn_id = rsvn_id;
+	if (job_id >= 0)
+		snprintf(rsvn.batch_id, sizeof(rsvn.batch_id), "%u", job_id);
+	return rsvn_confirm(&rsvn, pagg_id);
+}
diff --git a/src/plugins/select/cray/libalps/do_query.c b/src/plugins/select/cray/libalps/do_query.c
new file mode 100644
index 000000000..7404b0ebb
--- /dev/null
+++ b/src/plugins/select/cray/libalps/do_query.c
@@ -0,0 +1,194 @@
+/*
+ * Access to ALPS QUERY methods
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "../basil_alps.h"
+
+/**
+ * _get_alps_engine  -  run QUERY of type ENGINE
+ * This uses the convention of returning the Engine.version attribute via 'msg'.
+ * Returns pointer to @buf, NULL on error.
+ */
+static const char *_get_alps_engine(char *buf, size_t buflen)
+{
+	struct basil_parse_data bp = {0};
+
+	/* For this query use Basil 1.0 as lowest common denominator */
+	bp.version = BV_1_0;
+	bp.method  = BM_engine;
+
+	if (basil_request(&bp) < 0)
+		return NULL;
+	strncpy(buf, bp.msg, buflen);
+	return buf;
+}
+
+/** Return true if @seg has at least a processor or a memory allocation. */
+static bool _segment_is_allocated(const struct basil_segment *seg)
+{
+	struct basil_node_processor *proc;
+	struct basil_node_memory *mem;
+
+	for (proc = seg->proc_head; proc; proc = proc->next)
+		if (proc->rsvn_id)
+			return true;
+	for (mem = seg->mem_head; mem; mem = mem->next)
+		if (mem->a_head != NULL)
+			return true;
+	return false;
+}
+
+/**
+ * get_basil_version  -  Detect highest BASIL version supported by ALPS.
+ *
+ * This uses the following correspondence table to find the highest supported
+ * BASIL version. Failing that, it falls back to Basil 1.0 as last resort.
+ *
+ * +------------+---------------+------+----------------+----------------------+
+ * | CLE release| Engine.version| ALPS | Basil Protocol |       Remarks        |
+ * +------------+---------------+------+----------------+----------------------+
+ * |  <= 2.2.48B|         1.1.0 |  1.1 |   1.0, 1.1     | see below            |
+ * |  >= 2.2.67 |         1.2.0 |  1.2 |   1.0, 1.1     | last CLE 2.2 update  |
+ * |     3.0    |         1.3.0 |  3.0 |   1.0, 1.1     | Cray ticket #762417  |
+ * |     3.1    |         3.1.0 |  3.1 |   1.0, 1.1     | Cray ticket #762035  |
+ * |     4.0    |         4.0.0 |  4.0 |   1.0,1.1,1.2  | starts GPU support   |
+ * +------------+---------------+------+----------------+----------------------+
+ *
+ * The 'ALPS' column shows the name of the ALPS engine; the 'Basil Protocol'
+ * column shows the supported versions for the BasilRequest.protocol attribute.
+ *
+ * No CLE 2 versions were released between 2.2.48B and 2.2.67; the Basil 1.2
+ * variant that came with the latter release behaved identically to Basil 1.1.
+ *
+ * Starting from Basil 3.1, there is also a 'basil_support' attribute to query
+ * the supported 'Basil Protocol' list.
+ */
+extern enum basil_version get_basil_version(void)
+{
+	char engine_version[BASIL_STRING_LONG];
+	static enum basil_version bv = BV_MAX;
+
+	if (bv != BV_MAX)
+		return bv;
+
+	if (_get_alps_engine(engine_version, sizeof(engine_version)) == NULL)
+		fatal("can not determine ALPS Engine version");
+	else if (strncmp(engine_version, "4.1.0", 5) == 0)
+		bv = BV_4_1;
+	else if (strncmp(engine_version, "4.0", 3) == 0)
+		bv = BV_4_0;
+	else if (strncmp(engine_version, "3.1.0", 5) == 0)
+		bv = BV_3_1;
+	else if (strncmp(engine_version, "1.3.0", 5) == 0)
+		/*
+		 * Cray Bug#762417 - strictly speaking, we should be
+		 * returning BV_3_0 here. Alps Engine Version 1.3.0
+		 * is reserved for the Cozla release (CLE 3.0), which
+		 * however was only a short time on the market.
+		 */
+		bv = BV_3_1;
+	else if (strncmp(engine_version, "1.2.0", 5) == 0)
+		bv = BV_1_2;
+	else if (strncmp(engine_version, "1.1", 3) == 0)
+		bv = BV_1_1;
+	else
+		fatal("unsupported ALPS Engine version '%s', please edit "
+		      "src/plugins/select/cray/libalps/do_query.c "
+		      "for this version",
+		      engine_version);
+	return bv;
+}
+
+/** Perform a detailed inventory */
+extern struct basil_inventory *get_full_inventory(enum basil_version version)
+{
+	struct basil_parse_data bp = {0};
+
+	bp.version   = version;
+	bp.method    = BM_inventory;
+	bp.mdata.inv = xmalloc(sizeof(*bp.mdata.inv));
+
+	if (bp.mdata.inv) {
+		bp.mdata.inv->f = xmalloc(sizeof(struct basil_full_inventory));
+		if (bp.mdata.inv->f == NULL) {
+			xfree(bp.mdata.inv);
+			bp.mdata.inv = NULL;
+		}
+	}
+
+	if (bp.mdata.inv == NULL)
+		return NULL;
+
+	if (basil_request(&bp) < 0) {
+		free_inv(bp.mdata.inv);
+		return NULL;
+	}
+
+	return bp.mdata.inv;
+}
+
+/*
+ *	Informations extracted from INVENTORY
+ */
+
+/** Return true if @node has at least a processor or a memory allocation. */
+extern bool node_is_allocated(const struct basil_node *node)
+{
+	struct basil_segment *seg;
+
+	for (seg = node->seg_head; seg; seg = seg->next)
+		if (_segment_is_allocated(seg))
+			return true;
+	return false;
+}
+
+/** Search @inv for a particular reservation identified by @rsvn_id */
+extern const struct basil_rsvn *basil_rsvn_by_id(
+	const struct basil_inventory *inv, uint32_t rsvn_id)
+{
+	const struct basil_rsvn *rsvn;
+
+	assert(inv && inv->f);
+	for (rsvn = inv->f->rsvn_head; rsvn; rsvn = rsvn->next)
+		if (rsvn->rsvn_id == rsvn_id)
+			break;
+	return rsvn;
+}
+
+/**
+ * basil_get_rsvn_aprun_apids  -  get list of aprun APIDs for @rsvn_id
+ * Returns 0-terminated array, which caller must free.
+ * WARNING: if the aprun application uses fewer nodes than are reserved by
+ *          @rsvn_id, additional information is required to confirm whether
+ *          that particular node is indeed in use by the given apid.
+ */
+extern uint64_t *basil_get_rsvn_aprun_apids(const struct basil_inventory *inv,
+					    uint32_t rsvn_id)
+{
+	const struct basil_rsvn	*rsvn = basil_rsvn_by_id(inv, rsvn_id);
+	const struct basil_rsvn_app *app;
+	uint64_t *apids = NULL;
+	int n = 1;	/* 0-terminated array */
+
+	if (rsvn == NULL)
+		return NULL;
+
+	for (app = rsvn->app_head; app; app = app->next)
+		/*
+		 * There are two types of basil_rsvn_app applications:
+		 * - the first application has a 'timestamp' of 0, a 'cmd' of
+		 *   "BASIL" - this is used to store the reservation parameters;
+		 * - all other applications have a non-0 timestamp and refer to
+		 *   actual aprun job steps (whose APIDs we are interested in).
+		 */
+		if (app->timestamp) {
+			apids = xrealloc(apids, (n + 1) * sizeof(*apids));
+			if (apids == NULL)
+				fatal("failed to allocate Apid entry");
+			apids[n-1] = app->apid;
+			apids[n++] = 0;
+		}
+	return apids;
+}
diff --git a/src/plugins/select/cray/libalps/do_release.c b/src/plugins/select/cray/libalps/do_release.c
new file mode 100644
index 000000000..9a7c20e87
--- /dev/null
+++ b/src/plugins/select/cray/libalps/do_release.c
@@ -0,0 +1,101 @@
+/*
+ * Implements the Basil RELEASE method for partition reservations.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "../basil_alps.h"
+
+static int rsvn_release(struct basil_reservation *res)
+{
+	struct basil_parse_data bp = {0};
+
+	bp.method    = BM_release;
+	bp.mdata.res = res;
+	bp.version   = get_basil_version();
+	/* NOTE - for simplicity we could use BV_1_0 here */
+
+	return basil_request(&bp);
+}
+
+/**
+ * basil_release - release an (un)confirmed reservation
+ * @rsvn_id:    the reservation id
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ */
+int basil_release(uint32_t rsvn_id)
+{
+	struct basil_reservation rsvn = {0};
+
+	rsvn.rsvn_id = rsvn_id;
+	return rsvn_release(&rsvn);
+}
+
+/**
+ * basil_signal_apids  -  send a signal to all APIDs of a given ALPS reservation
+ * @rsvn_id:	reservation ID to target
+ * @signal:	signal number
+ * @inv:	recent Basil Inventory, or NULL to generate internally
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ */
+int basil_signal_apids(int32_t rsvn_id, int signal, struct basil_inventory *inv)
+{
+	struct basil_inventory *new_inv = inv;
+	uint64_t *apid, *apids;
+	char cmd[512];
+
+	if (access(cray_conf->apkill, X_OK) < 0) {
+		error("FATAL: can not execute the apkill command '%s'",
+		      cray_conf->apkill);
+		return -BE_SYSTEM;
+	}
+
+	if (inv == NULL)
+		new_inv = get_full_inventory(get_basil_version());
+	if (new_inv == NULL) {
+		error("can not obtain a BASIL inventory to get APID list");
+		return -(BE_INTERNAL | BE_TRANSIENT);
+	}
+
+	apids = basil_get_rsvn_aprun_apids(new_inv, rsvn_id);
+	if (apids) {
+		for (apid = apids; *apid; apid++) {
+			debug2("ALPS resId %u, running apkill -%d %llu",
+				rsvn_id, signal, (unsigned long long)*apid);
+			snprintf(cmd, sizeof(cmd), "%s -%d %llu",
+				 cray_conf->apkill, signal,
+				 (unsigned long long)*apid);
+			if (system(cmd) < 0)
+				error("system(%s) failed", cmd);
+		}
+		xfree(apids);
+	}
+	if (inv == NULL)
+		free_inv(new_inv);
+	return BE_NONE;
+}
+
+/**
+ * basil_safe_release  -  release reservation after signalling job steps
+ * @rsvn_id:	reservation to release
+ * @inv:	recent Basil Inventory, or NULL to generate internally
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ */
+int basil_safe_release(int32_t rsvn_id, struct basil_inventory *inv)
+{
+	int rc = basil_release(rsvn_id);
+	/*
+	 * If there are still any live application IDs (APIDs) associated with
+	 * @rsvn_id, the RELEASE command will be without effect, since ALPS
+	 * holds on to a reservation until all of its application IDs have
+	 * disappeared.
+	 * On normal termination, ALPS should clean up the APIDs by itself. In
+	 * order to clean up orphaned reservations, try to terminate the APIDs
+	 * manually using apkill(1). If this step fails, fall back to releasing
+	 * the reservation normally and hope that ALPS resolves the situation.
+	 * To prevent that any subsequent aprun lines get started while the
+	 * apkill of the current one is still in progress, do the RELEASE first.
+	 */
+	basil_signal_apids(rsvn_id, SIGKILL, inv);
+	return rc;
+}
diff --git a/src/plugins/select/cray/libalps/do_reserve.c b/src/plugins/select/cray/libalps/do_reserve.c
new file mode 100644
index 000000000..2ec1a257d
--- /dev/null
+++ b/src/plugins/select/cray/libalps/do_reserve.c
@@ -0,0 +1,166 @@
+/*
+ * Implements the Basil RESERVE method for creating partitions.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "memory_handling.h"
+
+/**
+ * rsvn_add_mem_param  -  Add memory allocation request to reservation.
+ * @rp:     reservation to add to
+ * @mem_mb: memory size in MB requested for @rp
+ */
+static int _rsvn_add_mem_param(struct basil_rsvn_param *rp, uint32_t mem_mb)
+{
+	struct basil_memory_param *mp;
+
+	if (mem_mb == 0)	/* 0 means 'use defaults' */
+		return 0;
+
+	mp = xmalloc(sizeof(*mp));
+	if (mp == NULL)
+		return -1;
+
+	/* As of Basil 1.2/3.1, BMT_OS is still the only supported type. */
+	mp->type    = BMT_OS;
+	mp->size_mb = mem_mb;
+
+	if (rp->memory)
+		mp->next = rp->memory;
+	rp->memory = mp;
+	return 0;
+}
+
+/**
+ * rsvn_add_params  -  Populate parameters for a RESERVE request
+ * @resv:	the reservation to add to
+ * @width:	mppwidth > 0
+ * @depth:	mppdepth >= 0 (0 meaning 'use defaults')
+ * @nppn:	mppnppn  >= 0 (0 meaning 'use defaults')
+ * @mem_mb:	mppmem   >= 0 (0 meaning 'use defaults', else size in MB)
+ * @mppnodes:	comma-separated nodelist (will be freed if not NULL)
+ * @accel:	accelerator parameters (will be freed if not NULL)
+ */
+static int _rsvn_add_params(struct basil_reservation *resv,
+			    uint32_t width, uint32_t depth, uint32_t nppn,
+			    uint32_t mem_mb, char *mppnodes,
+			    struct basil_accel_param *accel)
+{
+	struct basil_rsvn_param *rp = xmalloc(sizeof(*rp));
+
+	if (rp == NULL)
+		return -1;
+
+	rp->arch  = BNA_XT;	/* "XT" is the only supported architecture */
+	rp->width = width;
+	rp->depth = depth;
+	rp->nppn  = nppn;
+	rp->nodes = mppnodes;
+	rp->accel = accel;
+
+	if (mem_mb && _rsvn_add_mem_param(rp, mem_mb) < 0) {
+		rsvn_free_param(rp);
+		return -1;
+	}
+
+	if (resv->params)
+		rp->next = resv->params;
+	resv->params = rp;
+
+	return 0;
+}
+
+/**
+ * rsvn_new  -  allocate new reservation with single 'ReserveParam' element
+ * @user:	owner (user_name) of the reservation (mandatory)
+ * @batch_id:	batch job ID associated with reservation or NULL (Basil 1.1 only)
+ *
+ * @width:	mppwidth > 0
+ * @depth:	mppdepth >= 0 (0 meaning 'use default')
+ * @nppn:	mppnppn  >= 0 (0 meaning 'use default')
+ * @mem_mb:	mppmem   >= 0 (0 meaning 'use defaults', else size in MB)
+ * @mppnodes:	comma-separated nodelist (will be freed if not NULL)
+ * @accel:	accelerator parameters or NULL
+ *
+ * The reservation ID is initially 0, since 0 is an invalid reservation ID.
+ */
+static struct basil_reservation *_rsvn_new(const char *user,
+					   const char *batch_id,
+					   uint32_t width, uint32_t depth,
+					   uint32_t nppn, uint32_t mem_mb,
+					   char *mppnodes,
+					   struct basil_accel_param *accel)
+{
+	struct basil_reservation *res;
+
+	assert(user != NULL && *user != '\0');
+
+	if (width <= 0 || depth < 0 || nppn < 0)
+		return NULL;
+
+	res = xmalloc(sizeof(*res));
+	if (res == NULL)
+		return NULL;
+
+	res->rsvn_id = 0;
+	strncpy(res->user_name, user, sizeof(res->user_name));
+
+	if (batch_id && *batch_id)
+		strncpy(res->batch_id, batch_id, sizeof(res->batch_id));
+
+	if (_rsvn_add_params(res, width, depth, nppn,
+			     mem_mb, mppnodes, accel) < 0) {
+		free_rsvn(res);
+		return NULL;
+	}
+
+	return res;
+}
+
+/**
+ * basil_reserve  -  wrapper around rsvn_new.
+ * @user:       owner of the reservation
+ * @batch_id:   (numeric) job ID
+ * @width:      mppwidth (aprun -n)
+ * @depth:      mppdepth (aprun -d)
+ * @nppn:       mppnppn  (aprun -N)
+ * @mem_mb:     mppmem   (aprun -m)
+ * @ns_head:    list of requested mppnodes (will be freed if not NULL)
+ * @accel_head: optional accelerator parameters
+ * Returns reservation ID > 0 if ok, negative %enum basil_error on error.
+ */
+long basil_reserve(const char *user, const char *batch_id,
+		   uint32_t width, uint32_t depth, uint32_t nppn,
+		   uint32_t mem_mb, struct nodespec *ns_head,
+		   struct basil_accel_param *accel_head)
+{
+	struct basil_reservation *rsvn;
+	struct basil_parse_data bp = {0};
+	/* do not free mppnodes it is stored/freed in the rsvn struct */
+	char *mppnodes = ns_to_string(ns_head);
+	long rc;
+
+	free_nodespec(ns_head);
+	rsvn = _rsvn_new(user, batch_id, width, depth, nppn, mem_mb,
+			 mppnodes, accel_head);
+	if (rsvn == NULL)
+		return -BE_INTERNAL;
+
+	bp.method    = BM_reserve;
+	bp.mdata.res = rsvn;
+	bp.version   = BV_1_0;
+	/*
+	 * Rule:
+	 * - if *res->batch_id is set, we are using Basil 1.1
+	 * - if *res->batch_id == '\0' we have to fall back to Basil 1.0
+	 */
+	if (batch_id && *batch_id)
+		bp.version = get_basil_version();
+
+	rc = basil_request(&bp);
+	if (rc >= 0)
+		rc = rsvn->rsvn_id;
+	free_rsvn(rsvn);
+	return rc;
+}
diff --git a/src/plugins/select/cray/libalps/do_switch.c b/src/plugins/select/cray/libalps/do_switch.c
new file mode 100644
index 000000000..1d8ed89be
--- /dev/null
+++ b/src/plugins/select/cray/libalps/do_switch.c
@@ -0,0 +1,60 @@
+/*****************************************************************************\
+ *  do_switch.c - Handle Switch method for cray systems.
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "../basil_alps.h"
+
+/**
+ * basil_switch - suspend/resume an existing reservation
+ * @rsvn_id:    the reservation id
+ * @suspend:    to suspend or not to suspend
+ * Returns 0 if ok, a negative %basil_error otherwise.
+ *
+ */
+int basil_switch(uint32_t rsvn_id, bool suspend)
+{
+	struct basil_reservation rsvn = {0};
+	struct basil_parse_data bp = {0};
+
+	rsvn.rsvn_id = rsvn_id;
+	rsvn.suspended = suspend;
+
+	bp.method    = BM_switch;
+	bp.mdata.res = &rsvn;
+	bp.version   = get_basil_version();
+	/* NOTE - for simplicity we could use BV_1_0 here */
+
+	return basil_request(&bp);
+}
diff --git a/src/plugins/select/cray/libalps/memory_handling.c b/src/plugins/select/cray/libalps/memory_handling.c
new file mode 100644
index 000000000..0a7afdfcd
--- /dev/null
+++ b/src/plugins/select/cray/libalps/memory_handling.c
@@ -0,0 +1,150 @@
+/*
+ * Memory de-allocation
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "memory_handling.h"
+
+static void _free_basil_processor(struct basil_node_processor *p)
+{
+	if (p) {
+		_free_basil_processor(p->next);
+		p->rsvn_id = 0; /* just to be safe */
+		xfree(p);
+	}
+}
+
+static void _free_basil_mem_alloc(struct basil_mem_alloc *m)
+{
+	if (m) {
+		_free_basil_mem_alloc(m->next);
+		xfree(m);
+	}
+}
+
+static void _free_basil_memory(struct basil_node_memory *m)
+{
+	if (m) {
+		_free_basil_memory(m->next);
+		_free_basil_mem_alloc(m->a_head);
+		xfree(m);
+	}
+}
+
+static void _free_basil_label(struct basil_label *l)
+{
+	if (l) {
+		_free_basil_label(l->next);
+		xfree(l);
+	}
+}
+
+static void _free_basil_segment(struct basil_segment *s)
+{
+	if (s) {
+		_free_basil_segment(s->next);
+		_free_basil_processor(s->proc_head);
+		_free_basil_memory(s->mem_head);
+		_free_basil_label(s->lbl_head);
+		xfree(s);
+	}
+}
+
+static void _free_basil_node(struct basil_node *n)
+{
+	if (n) {
+		_free_basil_node(n->next);
+		_free_basil_segment(n->seg_head);
+		xfree(n);
+	}
+}
+
+static void _free_basil_rsvn_cmd(struct basil_rsvn_app_cmd *c)
+{
+	if (c) {
+		_free_basil_rsvn_cmd(c->next);
+		xfree(c);
+	}
+}
+
+static void _free_basil_rsvn_app(struct basil_rsvn_app *a)
+{
+	if (a) {
+		_free_basil_rsvn_app(a->next);
+		_free_basil_rsvn_cmd(a->cmd_head);
+		xfree(a);
+	}
+}
+
+static void _free_basil_rsvn(struct basil_rsvn *r)
+{
+	if (r) {
+		_free_basil_rsvn(r->next);
+		_free_basil_rsvn_app(r->app_head);
+		xfree(r);
+	}
+}
+
+/*
+ * Reservation parameters
+ */
+static void _rsvn_free_param_mem(struct basil_memory_param *m)
+{
+	if (m) {
+		_rsvn_free_param_mem(m->next);
+		xfree(m);
+	}
+}
+
+static void _rsvn_free_param_accel(struct basil_accel_param *a)
+{
+	if (a) {
+		_rsvn_free_param_accel(a->next);
+		xfree(a);
+	}
+}
+
+void free_inv(struct basil_inventory *inv)
+{
+	if (inv) {
+		if (inv->f) {
+			_free_basil_node(inv->f->node_head);
+			_free_basil_rsvn(inv->f->rsvn_head);
+			xfree(inv->f);
+		}
+		xfree(inv);
+	}
+}
+
+/*
+ * Node-specifier lists
+ */
+void free_nodespec(struct nodespec *ns)
+{
+	if (ns) {
+		free_nodespec(ns->next);
+		xfree(ns);
+	}
+}
+
+void rsvn_free_param(struct basil_rsvn_param *p)
+{
+	if (p) {
+		rsvn_free_param(p->next);
+		_rsvn_free_param_mem(p->memory);
+		_rsvn_free_param_accel(p->accel);
+		_free_basil_label(p->labels);
+		xfree(p->nodes);
+		xfree(p);
+	}
+}
+
+void free_rsvn(struct basil_reservation *r)
+{
+	if (r) {
+		rsvn_free_param(r->params);
+		free_nodespec(r->rsvd_nodes);
+		xfree(r);
+	}
+}
diff --git a/src/plugins/select/cray/libalps/memory_handling.h b/src/plugins/select/cray/libalps/memory_handling.h
new file mode 100644
index 000000000..06ac88066
--- /dev/null
+++ b/src/plugins/select/cray/libalps/memory_handling.h
@@ -0,0 +1,46 @@
+/*****************************************************************************\
+ *  memory_handling.h - driver for gres plugin
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+#ifndef __CRAY_MEMORY_HANDLING_H__
+#define __CRAY_MEMORY_HANDLING_H__
+
+#include "../basil_alps.h"
+
+extern void free_nodespec(struct nodespec *head);
+
+extern void rsvn_free_param(struct basil_rsvn_param *p);
+extern void free_rsvn(struct basil_reservation *r);
+
+#endif
diff --git a/src/plugins/select/cray/libalps/parser_basil_1.0.c b/src/plugins/select/cray/libalps/parser_basil_1.0.c
new file mode 100644
index 000000000..3d12038bc
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_basil_1.0.c
@@ -0,0 +1,133 @@
+/*
+ * XML tag handlers specific to Basil 1.0.
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+
+const struct element_handler basil_1_0_elements[] = {
+	[BT_MESSAGE]	= {
+			.tag	= "Message",
+			.depth	= 0xff,	/* unused, can appear at any depth */
+			.uniq	= false,
+			.hnd	= eh_message
+	},
+	[BT_RESPONSE]	= {
+			.tag	= "BasilResponse",
+			.depth	= 0,
+			.uniq	= true,
+			.hnd	= eh_response
+	},
+	[BT_RESPDATA]	= {
+			.tag	= "ResponseData",
+			.depth	= 1,
+			.uniq	= true,
+			.hnd	= eh_resp_data
+	},
+	[BT_RESERVED]	= {
+			.tag	= "Reserved",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_reserved
+	},
+	[BT_CONFIRMED]	= {
+			.tag	= "Confirmed",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RELEASED]	= {
+			.tag	= "Released",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_ENGINE]	= {
+			.tag	= "Engine",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_engine
+	},
+	[BT_INVENTORY]	= {
+			.tag	= "Inventory",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_NODEARRAY]	= {
+			.tag	= "NodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_NODE]	= {
+			.tag	= "Node",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_node
+	},
+	[BT_PROCARRAY]	= {
+			.tag	= "ProcessorArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_PROCESSOR]	= {
+			.tag	= "Processor",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_proc
+	},
+	[BT_PROCALLOC]	= {
+			.tag	= "ProcessorAllocation",
+			.depth	= 7,
+			.uniq	= false,
+			.hnd	= eh_proc_alloc
+	},
+	[BT_MEMARRAY]	= {
+			.tag	= "MemoryArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_MEMORY]	= {
+			.tag	= "Memory",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_mem
+	},
+	[BT_MEMALLOC]	= {
+			.tag	= "MemoryAllocation",
+			.depth	= 7,
+			.uniq	= false,
+			.hnd	= eh_mem_alloc
+	},
+	[BT_LABELARRAY]	= {
+			.tag	= "LabelArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_LABEL]	= {
+			.tag	= "Label",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_label
+	},
+	[BT_RESARRAY]	= {
+			.tag	= "ReservationArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVN]	= {
+			.tag	= "Reservation",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resv
+	},
+	[BT_1_0_MAX]	= {
+			NULL, 0, 0, NULL
+	}
+};
diff --git a/src/plugins/select/cray/libalps/parser_basil_1.1.c b/src/plugins/select/cray/libalps/parser_basil_1.1.c
new file mode 100644
index 000000000..06607bdc9
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_basil_1.1.c
@@ -0,0 +1,186 @@
+/*
+ * XML tag handlers specific to Basil 1.1 (as used on XT systems up to CLE 2.x).
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+
+/** Basil 1.1 'Reservation' element (more attributes than in Basil 1.0) */
+void eh_resv_1_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "time_stamp", "batch_id" };
+
+	eh_resv(ud, attrs);
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (ud->ud_inventory) {
+		struct basil_rsvn *cur = ud->ud_inventory->rsvn_head;
+
+		if (atotime_t(attribs[0], &cur->timestamp) < 0)
+			fatal("illegal timestamp '%s'", attribs[0]);
+		strncpy(cur->batch_id, attribs[1], sizeof(cur->batch_id));
+	}
+}
+
+const struct element_handler basil_1_1_elements[] = {
+	[BT_MESSAGE]	= {
+			.tag	= "Message",
+			.depth	= 0xff,	/* unused, can appear at any depth */
+			.uniq	= false,
+			.hnd	= eh_message
+	},
+	[BT_RESPONSE]	= {
+			.tag	= "BasilResponse",
+			.depth	= 0,
+			.uniq	= true,
+			.hnd	= eh_response
+	},
+	[BT_RESPDATA]	= {
+			.tag	= "ResponseData",
+			.depth	= 1,
+			.uniq	= true,
+			.hnd	= eh_resp_data
+	},
+	[BT_RESERVED]	= {
+			.tag	= "Reserved",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_reserved
+	},
+	[BT_CONFIRMED]	= {
+			.tag	= "Confirmed",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RELEASED]	= {
+			.tag	= "Released",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_ENGINE]	= {
+			.tag	= "Engine",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_engine
+	},
+	[BT_INVENTORY]	= {
+			.tag	= "Inventory",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_NODEARRAY]	= {
+			.tag	= "NodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_NODE]	= {
+			.tag	= "Node",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_node
+	},
+	[BT_SEGMARRAY]	= {
+			.tag	= "SegmentArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_SEGMENT]	= {
+			.tag	= "Segment",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_segment
+	},
+	[BT_PROCARRAY]	= {
+			.tag	= "ProcessorArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_PROCESSOR]	= {
+			.tag	= "Processor",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_proc
+	},
+	[BT_PROCALLOC]	= {
+			.tag	= "ProcessorAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_proc_alloc
+	},
+	[BT_MEMARRAY]	= {
+			.tag	= "MemoryArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_MEMORY]	= {
+			.tag	= "Memory",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_mem
+	},
+	[BT_MEMALLOC]	= {
+			.tag	= "MemoryAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_mem_alloc
+	},
+	[BT_LABELARRAY]	= {
+			.tag	= "LabelArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_LABEL]	= {
+			.tag	= "Label",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_label
+	},
+	[BT_RESARRAY]	= {
+			.tag	= "ReservationArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVN]	= {
+			.tag	= "Reservation",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resv_1_1
+	},
+	[BT_APPARRAY]	= {
+			.tag	= "ApplicationArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_APPLICATION]	= {
+			.tag	= "Application",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_application
+	},
+	[BT_CMDARRAY]	= {
+			.tag	= "CommandArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_COMMAND]	= {
+			.tag	= "Command",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_command
+	},
+	[BT_1_1_MAX]	= {
+			NULL, 0, 0, NULL
+	}
+};
diff --git a/src/plugins/select/cray/libalps/parser_basil_3.1.c b/src/plugins/select/cray/libalps/parser_basil_3.1.c
new file mode 100644
index 000000000..a7d9885d4
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_basil_3.1.c
@@ -0,0 +1,294 @@
+/*
+ * XML tag handlers specific to Basil 3.1 (Basil 1.1 variant on XE/Gemini).
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+
+/** Basil 3.1 and above 'ReservedNode' element */
+void eh_resvd_node(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "node_id" };
+	uint32_t node_id;
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &node_id) < 0)
+		fatal("illegal node_id = %s", attribs[0]);
+	if (ns_add_node(&ud->bp->mdata.res->rsvd_nodes, node_id) < 0)
+		fatal("could not add node %u", node_id);
+}
+
+/** Basil 3.1 and above 'Confirmed' element */
+void eh_confirmed(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_id", "pagg_id" };
+	uint32_t rsvn_id;
+	uint64_t pagg_id;
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &rsvn_id) < 0)
+		fatal("illegal rsvn_id = %s", attribs[0]);
+	if (rsvn_id != ud->bp->mdata.res->rsvn_id)
+		fatal("rsvn_id mismatch '%s'", attribs[0]);
+	if (atou64(attribs[1], &pagg_id) < 0)
+		fatal("illegal pagg_id = %s", attribs[1]);
+	if (pagg_id != ud->bp->mdata.res->pagg_id)
+		fatal("pagg_id mismatch '%s'", attribs[1]);
+}
+
+/** Basil 3.1 'Released' element */
+void eh_released_3_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_id" };
+	uint32_t rsvn_id;
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &rsvn_id) < 0)
+		fatal("illegal rsvn_id = %s", attribs[0]);
+	if (rsvn_id != ud->bp->mdata.res->rsvn_id)
+		fatal("rsvn_id mismatch '%s'", attribs[0]);
+}
+
+/** Basil 3.1 and above 'Engine' element */
+void eh_engine_3_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "basil_support" };
+
+	eh_engine(ud, attrs);
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+}
+
+/** Basil 3.1 and above 'Inventory' element */
+void eh_inventory_3_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "mpp_host", "timestamp" };
+	struct basil_inventory *inv = ud->bp->mdata.inv;
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	strncpy(inv->mpp_host, attribs[0], sizeof(inv->mpp_host));
+	if (atotime_t(attribs[1], &inv->timestamp) < 0)
+		fatal("illegal timestamp = %s", attribs[1]);
+}
+
+/** Basil 3.1 and above 'Node' element */
+void eh_node_3_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "router_id" };
+	/*
+	 * The 'router_id' element can be used to determine the interconnect:
+	 * - on Gemini systems the 'Node' element has this attribute,
+	 * - on SeaStar systems the 'Node' element does not have this attribute.
+	 */
+	ud->bp->mdata.inv->is_gemini = true;
+
+	eh_node(ud, attrs);
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (ud->ud_inventory) {
+		struct basil_node *current = ud->ud_inventory->node_head;
+
+		if (atou32(attribs[0], &current->router_id) < 0)
+			fatal("illegal router_id = %s", attribs[0]);
+	}
+}
+
+/** Basil 3.1 and above 'Reservation' element */
+void eh_resv_3_1(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_mode", "gpc_mode" };
+
+	eh_resv_1_1(ud, attrs);
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (ud->ud_inventory) {
+		struct basil_rsvn *cur = ud->ud_inventory->rsvn_head;
+
+		for (cur->rsvn_mode = BRM_EXCLUSIVE;
+		     cur->rsvn_mode < BRM_MAX; cur->rsvn_mode++)
+			if (strcmp(attribs[0], nam_rsvn_mode[cur->rsvn_mode]) == 0)
+				break;
+		for (cur->gpc_mode = BGM_NONE;
+		     cur->gpc_mode < BGM_MAX; cur->gpc_mode++)
+			if (strcmp(attribs[1], nam_gpc_mode[cur->gpc_mode]) == 0)
+				break;
+	}
+}
+
+const struct element_handler basil_3_1_elements[] = {
+	[BT_MESSAGE]	= {
+			.tag	= "Message",
+			.depth	= 0xff,	/* unused, can appear at any depth */
+			.uniq	= false,
+			.hnd	= eh_message
+	},
+	[BT_RESPONSE]	= {
+			.tag	= "BasilResponse",
+			.depth	= 0,
+			.uniq	= true,
+			.hnd	= eh_response
+	},
+	[BT_RESPDATA]	= {
+			.tag	= "ResponseData",
+			.depth	= 1,
+			.uniq	= true,
+			.hnd	= eh_resp_data
+	},
+	[BT_RESERVED]	= {
+			.tag	= "Reserved",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_reserved
+	},
+	[BT_RESVDNODEARRAY] = {
+			.tag	= "ReservedNodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVDNODE] = {
+			.tag	= "ReservedNode",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resvd_node
+	},
+	[BT_CONFIRMED]	= {
+			.tag	= "Confirmed",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_confirmed
+	},
+	[BT_RELEASED]	= {
+			.tag	= "Released",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_released_3_1
+	},
+	[BT_ENGINE]	= {
+			.tag	= "Engine",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_engine_3_1
+	},
+	[BT_INVENTORY]	= {
+			.tag	= "Inventory",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_inventory_3_1
+	},
+	[BT_NODEARRAY]	= {
+			.tag	= "NodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_NODE]	= {
+			.tag	= "Node",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_node_3_1
+	},
+	[BT_SEGMARRAY]	= {
+			.tag	= "SegmentArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_SEGMENT]	= {
+			.tag	= "Segment",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_segment
+	},
+	[BT_PROCARRAY]	= {
+			.tag	= "ProcessorArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_PROCESSOR]	= {
+			.tag	= "Processor",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_proc
+	},
+	[BT_PROCALLOC]	= {
+			.tag	= "ProcessorAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_proc_alloc
+	},
+	[BT_MEMARRAY]	= {
+			.tag	= "MemoryArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_MEMORY]	= {
+			.tag	= "Memory",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_mem
+	},
+	[BT_MEMALLOC]	= {
+			.tag	= "MemoryAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_mem_alloc
+	},
+	[BT_LABELARRAY]	= {
+			.tag	= "LabelArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_LABEL]	= {
+			.tag	= "Label",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_label
+	},
+	[BT_RESARRAY]	= {
+			.tag	= "ReservationArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVN]	= {
+			.tag	= "Reservation",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resv_3_1
+	},
+	[BT_APPARRAY]	= {
+			.tag	= "ApplicationArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_APPLICATION]	= {
+			.tag	= "Application",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_application
+	},
+	[BT_CMDARRAY]	= {
+			.tag	= "CommandArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_COMMAND]	= {
+			.tag	= "Command",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_command
+	},
+	[BT_3_1_MAX]	= {
+			NULL, 0, 0, NULL
+	}
+};
diff --git a/src/plugins/select/cray/libalps/parser_basil_4.0.c b/src/plugins/select/cray/libalps/parser_basil_4.0.c
new file mode 100644
index 000000000..4ef61fd94
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_basil_4.0.c
@@ -0,0 +1,362 @@
+/*
+ * XML tag handlers specific to Basil 4.0 (development release)
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+
+/** Basil 4.0 'Released' element */
+static void eh_released_4_0(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "claims" };
+	/*
+	 * The 'claims' attribute is new in Basil 4.0 and indicates the
+	 * number of claims still outstanding against the reservation.
+	 * If the 'claims' value is 0, the reservation is assured to have
+	 * been removed.
+	 */
+	eh_released_3_1(ud, attrs);
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &ud->bp->mdata.res->claims) < 0)
+		fatal("illegal claims = %s", attribs[0]);
+}
+
+/** Basil 4.0 'NodeArray' element */
+static void eh_node_array_4_0(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "changecount" };
+	/*
+	 * The 'changecount' attribute is new in Basil 4.0. Quoting Basil 1.2
+	 * documentation:
+	 * "A new attribute to the NodeArray element in both QUERY(INVENTORY)
+	 *  method requests and responses, changecount, is used to associate a
+	 *  single value (the number of changes to the set of data since
+	 *  initialization) with all values found in node data (exempting
+	 *  resource allocation data). In a QUERY(INVENTORY) method response
+	 *  that includes node data, the value of the changecount attribute of
+	 *  the NodeArray element is monotonically increasing, starting at '1'.
+	 *
+	 *  Each time any data contained within the NodeArray element changes
+	 *  (again, exempting resource allocation data like memory allocations,
+	 *  processor allocations, or accelerator allocations), the value of the
+	 *  changecount attribute is incremented. If a node's state transitions
+	 *  from up to down, the value will be incremented. If that same node's
+	 *  state again transitions, this time from down to up, the value will
+	 *  again be incremented, and thus be different from the original value,
+	 *  even though the starting and final data is identical.
+	 *
+	 *  In other words, it is possible for the node data sections of two
+	 *  QUERY(INVENTORY) method responses to be identical except for the
+	 *  value of the changecount attribute in each of the NodeArray elements.
+	 */
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou64(attribs[0], &ud->bp->mdata.inv->change_count) < 0)
+		fatal("illegal change_count = %s", attribs[0]);
+}
+
+/** Basil 4.0 'Accelerator' element */
+void eh_accel(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_node_accelerator accel = {0};
+	char *attribs[] = { "ordinal", "type", "state", "family",
+			    "memory_mb", "clock_mhz" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &accel.ordinal) < 0)
+		fatal("illegal ordinal = %s", attribs[0]);
+	if (accel.ordinal != 0)		/* Basil 4.0: only 1 GPU/Node */
+		fatal("Basil 4.0 Accelerator.ordinal > 0 (%u)", accel.ordinal);
+
+	for (accel.type = BA_GPU; accel.type < BA_MAX; accel.type++)
+		if (strcmp(attribs[1], nam_acceltype[accel.type]) == 0)
+			break;
+	if (accel.type != BA_GPU)	/* Basil 4.0: GPU only supported type */
+		fatal("Basil 4.0 Accelerator.type not 'GPU' (%s)", attribs[1]);
+
+	for (accel.state = BAS_UP; accel.state < BAS_MAX; accel.state++)
+		if (strcmp(attribs[2], nam_accelstate[accel.state]) == 0)
+			break;
+
+	strncpy(accel.family, attribs[3], sizeof(accel.family));
+
+	if (atou32(attribs[4], &accel.memory_mb) < 0)
+		fatal("illegal Accelerator.memory_mb = %s", attribs[4]);
+
+	if (atou32(attribs[5], &accel.clock_mhz) < 0)
+		fatal("illegal Accelerator.clock_mhz = %s", attribs[5]);
+
+	if (ud->ud_inventory) {
+		struct basil_node_accelerator *new = xmalloc(sizeof(*new));
+
+		*new = accel;
+		xassert(ud->ud_inventory->node_head != NULL);
+		xassert(ud->ud_inventory->node_head->accel_head == NULL);
+
+		if (ud->ud_inventory->node_head->accel_head)
+			new->next = ud->ud_inventory->node_head->accel_head;
+		ud->ud_inventory->node_head->accel_head = new;
+	}
+}
+
+/** Basil 4.0 'AcceleratorAllocation' element */
+void eh_accel_alloc(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_id" };
+	uint32_t rsvn_id;
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &rsvn_id) < 0)
+		fatal("illegal Accelerator reservation_id = %s", attribs[0]);
+
+	if (ud->ud_inventory) {
+		struct basil_accel_alloc *new = xmalloc(sizeof(*new));
+
+		new->rsvn_id = rsvn_id;
+
+		xassert(ud->ud_inventory->node_head != NULL);
+		xassert(ud->ud_inventory->node_head->accel_head != NULL);
+		xassert(!ud->ud_inventory->node_head->accel_head->allocation);
+
+		ud->ud_inventory->node_head->accel_head->allocation = new;
+	}
+}
+
+void eh_switch_resv(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_id", "status" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	debug2("resv id %s switch status is %s", attribs[0], attribs[1]);
+}
+
+void eh_switch_app(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "application_id", "status" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	debug2("app id %s switch status is %s", attribs[0], attribs[1]);
+}
+
+
+const struct element_handler basil_4_0_elements[] = {
+	[BT_MESSAGE]	= {
+			.tag	= "Message",
+			.depth	= 0xff,	/* unused, can appear at any depth */
+			.uniq	= false,
+			.hnd	= eh_message
+	},
+	[BT_RESPONSE]	= {
+			.tag	= "BasilResponse",
+			.depth	= 0,
+			.uniq	= true,
+			.hnd	= eh_response
+	},
+	[BT_RESPDATA]	= {
+			.tag	= "ResponseData",
+			.depth	= 1,
+			.uniq	= true,
+			.hnd	= eh_resp_data
+	},
+	[BT_RESERVED]	= {
+			.tag	= "Reserved",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_reserved
+	},
+	[BT_RESVDNODEARRAY] = {
+			.tag	= "ReservedNodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVDNODE] = {
+			.tag	= "ReservedNode",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resvd_node
+	},
+	[BT_CONFIRMED]	= {
+			.tag	= "Confirmed",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_confirmed
+	},
+	[BT_RELEASED]	= {
+			.tag	= "Released",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_released_4_0
+	},
+	[BT_ENGINE]	= {
+			.tag	= "Engine",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_engine_3_1
+	},
+	[BT_INVENTORY]	= {
+			.tag	= "Inventory",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= eh_inventory_3_1
+	},
+	[BT_NODEARRAY]	= {
+			.tag	= "NodeArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= eh_node_array_4_0
+	},
+	[BT_NODE]	= {
+			.tag	= "Node",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_node_3_1
+	},
+	[BT_SEGMARRAY]	= {
+			.tag	= "SegmentArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_SEGMENT]	= {
+			.tag	= "Segment",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_segment
+	},
+	[BT_PROCARRAY]	= {
+			.tag	= "ProcessorArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_PROCESSOR]	= {
+			.tag	= "Processor",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_proc
+	},
+	[BT_PROCALLOC]	= {
+			.tag	= "ProcessorAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_proc_alloc
+	},
+	[BT_MEMARRAY]	= {
+			.tag	= "MemoryArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_MEMORY]	= {
+			.tag	= "Memory",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_mem
+	},
+	[BT_MEMALLOC]	= {
+			.tag	= "MemoryAllocation",
+			.depth	= 9,
+			.uniq	= false,
+			.hnd	= eh_mem_alloc
+	},
+	[BT_LABELARRAY]	= {
+			.tag	= "LabelArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_LABEL]	= {
+			.tag	= "Label",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_label
+	},
+	[BT_ACCELARRAY]	= {
+			.tag	= "AcceleratorArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_ACCEL]	= {
+			.tag	= "Accelerator",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_accel
+	},
+	[BT_ACCELALLOC]	= {
+			.tag	= "AcceleratorAllocation",
+			.depth	= 7,
+			.uniq	= false,
+			.hnd	= eh_accel_alloc
+	},
+	[BT_RESARRAY]	= {
+			.tag	= "ReservationArray",
+			.depth	= 3,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_RESVN]	= {
+			.tag	= "Reservation",
+			.depth	= 4,
+			.uniq	= false,
+			.hnd	= eh_resv_3_1
+	},
+	[BT_APPARRAY]	= {
+			.tag	= "ApplicationArray",
+			.depth	= 5,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_APPLICATION]	= {
+			.tag	= "Application",
+			.depth	= 6,
+			.uniq	= false,
+			.hnd	= eh_application
+	},
+	[BT_CMDARRAY]	= {
+			.tag	= "CommandArray",
+			.depth	= 7,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_COMMAND]	= {
+			.tag	= "Command",
+			.depth	= 8,
+			.uniq	= false,
+			.hnd	= eh_command
+	},
+	[BT_SWITCHRES]	= {
+			.tag	= "Reservation",
+			.depth	= 3,
+			.uniq	= false,
+			.hnd	= eh_switch_resv
+	},
+	[BT_SWITCHAPP]	= {
+			.tag	= "Application",
+			.depth	= 3,
+			.uniq	= false,
+			.hnd	= eh_switch_app
+	},
+	[BT_SWITCHRESARRAY]	= {
+			.tag	= "ReservationArray",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_SWITCHAPPARRAY]	= {
+			.tag	= "ApplicationArray",
+			.depth	= 2,
+			.uniq	= true,
+			.hnd	= NULL
+	},
+	[BT_4_0_MAX]	= {
+			NULL, 0, 0, NULL
+	}
+};
diff --git a/src/plugins/select/cray/libalps/parser_common.c b/src/plugins/select/cray/libalps/parser_common.c
new file mode 100644
index 000000000..b90c1c705
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_common.c
@@ -0,0 +1,716 @@
+/*
+ * Routines and data structures common to all BASIL versions
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "parser_internal.h"
+#include "../parser_common.h"
+
+const char *bv_names[BV_MAX];
+const char *bv_names_long[BV_MAX];
+const char *bm_names[BM_MAX];
+const char *be_names[BE_MAX];
+
+const char *nam_arch[BNA_MAX];
+const char *nam_memtype[BMT_MAX];
+const char *nam_labeltype[BLT_MAX];
+const char *nam_ldisp[BLD_MAX];
+
+const char *nam_noderole[BNR_MAX];
+const char *nam_nodestate[BNS_MAX];
+const char *nam_proc[BPT_MAX];
+const char *nam_rsvn_mode[BRM_MAX];
+const char *nam_gpc_mode[BGM_MAX];
+
+const char *nam_gpc_mode[BGM_MAX];
+const char *nam_rsvn_mode[BRM_MAX];
+
+const char *nam_acceltype[BA_MAX];
+const char *nam_accelstate[BAS_MAX];
+
+/*
+ *	General-purpose routines
+ */
+/** Decode (negative) error code following a Basil response. */
+const char *basil_strerror(int rc)
+{
+	return be_names_long[decode_basil_error(rc)];
+}
+
+/*
+ * Overwrite @reqc attribute keys supplied in @reqv with corresponding
+ * attribute value from @attr_list.
+ */
+void extract_attributes(const XML_Char **attr_list, char **reqv, int reqc)
+{
+	const XML_Char **attr, *val;
+
+	while (--reqc >= 0) {
+		for (attr = attr_list, val = NULL; *attr; attr += 2)
+			if (strcmp(reqv[reqc], *attr) == 0) {
+				if (val != NULL)
+					fatal("multiple '%s' occurrences",
+					      *attr);
+				val = attr[1];
+			}
+		if (val == NULL)
+			fatal("unspecified '%s' attribute", reqv[reqc]);
+		reqv[reqc] = (XML_Char *)val;
+	}
+}
+
+/*
+ *	XML Handlers
+ */
+
+/** Generic 'Message' element */
+void eh_message(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "severity" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	/* Message appears within ResponseData, which may set ud->error */
+	if (ud->error == BE_NONE)
+		snprintf(ud->bp->msg, sizeof(ud->bp->msg), "%s: ", attribs[0]);
+}
+
+/** Generic 'BasilResponse' element */
+void eh_response(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "protocol" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+	/*
+	 * When the method call failed (ResponseData with status="FAILURE"),
+	 * it can happen that ALPS sets the 'protocol' to the empty string ("").
+	 */
+	if (*attribs[0] && strcmp(attribs[0], bv_names[ud->bp->version]) != 0)
+		fatal("Version mismatch: expected %s, but got %s",
+		      bv_names[ud->bp->version], attribs[0]);
+}
+
+/** Generic 'ResponseData' element */
+void eh_resp_data(struct ud *ud, const XML_Char **attrs)
+{
+	char *attr_std[] = { "method", "status" };
+
+	extract_attributes(attrs, attr_std, ARRAY_SIZE(attr_std));
+
+	if (strcmp(attr_std[1], "SUCCESS") == 0) {
+		ud->error = BE_NONE;
+		/*
+		 * When the method call failed, ALPS in some cases sets the
+		 * 'method' to "UNDEFINED", hence verify this on success only.
+		 */
+		if (strcmp(attr_std[0], bm_names[ud->bp->method]) != 0)
+			fatal("method mismatch in=%s, out=%s",
+			      bm_names[ud->bp->method], attr_std[0]);
+	} else {
+		char *attr_err[] = { "error_source", "error_class" };
+
+		extract_attributes(attrs, attr_err, ARRAY_SIZE(attr_err));
+
+		for (ud->error = BE_INTERNAL;
+		     ud->error < BE_UNKNOWN; ud->error++)
+			if (strcmp(attr_err[0], be_names[ud->error]) == 0)
+				break;
+
+		snprintf(ud->bp->msg, sizeof(ud->bp->msg), "%s ALPS %s error: ",
+			 attr_err[1], be_names[ud->error]);
+
+		if (strcmp(attr_err[1], "TRANSIENT") == 0)
+			ud->error |= BE_TRANSIENT;
+	}
+}
+
+/** Basil 1.0/1.1/3.1 'Reserved' element */
+void eh_reserved(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "reservation_id" };
+	/*
+	 * The Catamount 'admin_cookie' and 'alloc_cookie' attributes
+	 * have been deprecated starting from Basil 1.1.
+	 */
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &ud->bp->mdata.res->rsvn_id) < 0)
+		fatal("illegal reservation_id = %s", attribs[0]);
+
+	ud->counter[BT_RESVDNODEARRAY] = 0;	/* Basil 3.1 */
+}
+
+/** Basil 1.0/1.1 'Engine' element */
+void eh_engine(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "name", "version" };
+	/*
+	 * Basil 3.1 has an additional attribute 'basil_support' which
+	 * contains a comma-separated list of supported Basil versions.
+	 */
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (strcmp(attribs[0], "ALPS") != 0)
+		fatal("unknown engine name '%s'", attribs[0]);
+	strncpy(ud->bp->msg, attribs[1], sizeof(ud->bp->msg));
+}
+
+/** Basil 1.0/1.1 'Node' element  */
+void eh_node(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_node node = {0};
+	char *attribs[] = { "node_id", "name", "architecture",
+			    "role", "state" };
+	/*
+	 * Basil 3.1 in addition has a 'router_id' attribute.
+	 */
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &node.node_id) < 0)
+		fatal("illegal node_id = %s", attribs[0]);
+
+	strncpy(node.name, attribs[1], sizeof(node.name));
+
+	for (node.arch = BNA_X2; node.arch < BNA_MAX; node.arch++)
+		if (strcmp(attribs[2], nam_arch[node.arch]) == 0)
+			break;
+
+	for (node.role = BNR_INTER; node.role < BNR_MAX; node.role++)
+		if (strcmp(attribs[3], nam_noderole[node.role]) == 0)
+			break;
+
+	for (node.state = BNS_UP; node.state < BNS_MAX; node.state++)
+		if (strcmp(attribs[4], nam_nodestate[node.state]) == 0)
+			break;
+
+	ud->current_node.available = node.arch == BNA_XT &&
+				     node.role == BNR_BATCH &&
+				     node.state == BNS_UP;
+	ud->current_node.reserved  = false;
+
+	if (ud->ud_inventory) {
+		struct basil_node *new = xmalloc(sizeof(*new));
+
+		*new = node;
+		if (ud->ud_inventory->node_head)
+			new->next = ud->ud_inventory->node_head;
+		ud->ud_inventory->node_head = new;
+	}
+
+	ud->counter[BT_SEGMARRAY]  = 0;
+	ud->counter[BT_ACCELARRAY] = 0;
+
+	/* Cover up Basil version differences by faking a segment. */
+	if (ud->bp->version < BV_1_1)
+		eh_segment(ud, NULL);
+}
+
+/** Basil 1.1/3.1 'Segment' element */
+void eh_segment(struct ud *ud, const XML_Char **attrs)
+{
+	uint32_t ordinal = 0;
+	char *attribs[] = { "ordinal" };
+
+	if (attrs) {
+		extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+		if (atou32(attribs[0], &ordinal) < 0)
+			fatal("illegal segment ordinal = %s", attribs[0]);
+	}
+
+	if (ud->ud_inventory) {
+		struct basil_segment *new = xmalloc(sizeof(*new));
+
+		new->ordinal = ordinal;
+		xassert(ud->ud_inventory->node_head);
+
+		if (ud->ud_inventory->node_head->seg_head)
+			new->next = ud->ud_inventory->node_head->seg_head;
+		ud->ud_inventory->node_head->seg_head = new;
+	}
+
+	ud->counter[BT_PROCARRAY]  = 0;
+	ud->counter[BT_MEMARRAY]   = 0;
+	ud->counter[BT_LABELARRAY] = 0;
+}
+
+/** Generic 'Processor' element */
+void eh_proc(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_node_processor proc = {0};
+	char *attribs[] = { "ordinal", "architecture", "clock_mhz" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &proc.ordinal) < 0)
+		fatal("illegal ordinal = %s", attribs[0]);
+
+	for (proc.arch = BPT_X86_64; proc.arch < BPT_MAX; proc.arch++)
+		if (strcmp(attribs[1], nam_proc[proc.arch]) == 0)
+			break;
+
+	if (atou32(attribs[2], &proc.clock_mhz) < 0)
+		fatal("illegal clock_mhz = %s", attribs[2]);
+
+	if (ud->ud_inventory) {
+		struct basil_node_processor *new = xmalloc(sizeof(*new));
+
+		*new = proc;
+		xassert(ud->ud_inventory->node_head);
+		xassert(ud->ud_inventory->node_head->seg_head);
+
+		if (ud->ud_inventory->node_head->seg_head->proc_head)
+			new->next = ud->ud_inventory->node_head->
+				seg_head->proc_head;
+		ud->ud_inventory->node_head->seg_head->proc_head = new;
+	}
+}
+
+/** Generic 'ProcessorAllocation' element */
+void eh_proc_alloc(struct ud *ud, const XML_Char **attrs)
+{
+	uint32_t rsvn_id;
+	char *attribs[] = { "reservation_id" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &rsvn_id) < 0)
+		fatal("illegal reservation_id = %s", attribs[0]);
+
+	/* A node is "reserved" if at has at least one allocation */
+	ud->current_node.reserved = true;
+
+	if (ud->ud_inventory) {
+		xassert(ud->ud_inventory->node_head);
+		xassert(ud->ud_inventory->node_head->seg_head);
+		xassert(ud->ud_inventory->node_head->seg_head->proc_head);
+
+		ud->ud_inventory->node_head->seg_head->proc_head->rsvn_id =
+			rsvn_id;
+	}
+}
+
+/** Generic 'Memory' element */
+void eh_mem(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_node_memory memory = {0};
+	char *attribs[] = { "type", "page_size_kb", "page_count" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	for (memory.type = BMT_OS; memory.type < BMT_MAX; memory.type++)
+		if (strcmp(attribs[0], nam_memtype[memory.type]) == 0)
+			break;
+
+	if (atou32(attribs[1], &memory.page_size_kb) < 0 ||
+	    memory.page_size_kb < 1)
+		fatal("illegal page_size_kb = %s", attribs[1]);
+
+	if (atou32(attribs[2], &memory.page_count) < 0 ||
+	    memory.page_count < 1)
+		fatal("illegal page_count = %s", attribs[2]);
+
+	if (ud->ud_inventory) {
+		struct basil_node_memory *new = xmalloc(sizeof(*new));
+
+		*new = memory;
+		xassert(ud->ud_inventory->node_head);
+		xassert(ud->ud_inventory->node_head->seg_head);
+
+		if (ud->ud_inventory->node_head->seg_head->mem_head)
+			new->next = ud->ud_inventory->node_head->
+				seg_head->mem_head;
+		ud->ud_inventory->node_head->seg_head->mem_head = new;
+	}
+}
+
+/** Generic 'MemoryAllocation' element */
+void eh_mem_alloc(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_mem_alloc memalloc = {0};
+	char *attribs[] = { "reservation_id", "page_count" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &memalloc.rsvn_id) < 0)
+		fatal("illegal reservation_id = %s", attribs[0]);
+
+	if (atou32(attribs[1], &memalloc.page_count) < 0)
+		fatal("illegal page_count = %s", attribs[1]);
+
+	ud->current_node.reserved = true;
+
+	if (ud->ud_inventory) {
+		struct basil_mem_alloc *new = xmalloc(sizeof(*new));
+
+		*new = memalloc;
+		xassert(ud->ud_inventory->node_head);
+		xassert(ud->ud_inventory->node_head->seg_head);
+		xassert(ud->ud_inventory->node_head->seg_head->mem_head);
+
+		if (ud->ud_inventory->node_head->seg_head->mem_head->a_head)
+			new->next = ud->ud_inventory->node_head->
+				seg_head->mem_head->a_head;
+		ud->ud_inventory->node_head->seg_head->mem_head->a_head = new;
+	}
+}
+
+/** Generic 'Label' element */
+void eh_label(struct ud *ud, const XML_Char **attrs)
+{
+	struct basil_label label = {0};
+	char *attribs[] = { "name", "type", "disposition" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	strncpy(label.name, attribs[0], sizeof(label.name));
+
+	for (label.type = BLT_HARD; label.type < BLT_MAX; label.type++)
+		if (strcmp(attribs[1], nam_labeltype[label.type]) == 0)
+			break;
+
+	for (label.disp = BLD_ATTRACT; label.disp < BLD_MAX; label.disp++)
+		if (strcmp(attribs[2], nam_ldisp[label.disp]) == 0)
+			break;
+
+	if (ud->ud_inventory) {
+		struct basil_label *new = xmalloc(sizeof(*new));
+
+		*new = label;
+		xassert(ud->ud_inventory->node_head);
+		xassert(ud->ud_inventory->node_head->seg_head);
+
+		if (ud->ud_inventory->node_head->seg_head->lbl_head)
+			new->next = ud->ud_inventory->node_head->
+				seg_head->lbl_head;
+		ud->ud_inventory->node_head->seg_head->lbl_head = new;
+	}
+}
+
+/** Basil 1.0 'Reservation' element (1.1 and 3.1 have additional attributes). */
+void eh_resv(struct ud *ud, const XML_Char **attrs)
+{
+	uint32_t rsvn_id;
+	char *attribs[] = { "reservation_id", "user_name", "account_name" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (atou32(attribs[0], &rsvn_id) < 0)
+		fatal("illegal reservation_id '%s'", attribs[0]);
+
+	if (ud->ud_inventory) {
+		struct basil_rsvn *new = xmalloc(sizeof(*new));
+
+		new->rsvn_id = rsvn_id;
+		strncpy(new->user_name, attribs[1], sizeof(new->user_name));
+		strncpy(new->account_name, attribs[2],
+			sizeof(new->account_name));
+
+		if (ud->ud_inventory->rsvn_head)
+			new->next = ud->ud_inventory->rsvn_head;
+		ud->ud_inventory->rsvn_head = new;
+	}
+
+	ud->counter[BT_APPARRAY] = 0; /* Basil 3.1 */
+}
+
+/** Basil 1.1/3.1 'Application' element */
+void eh_application(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "application_id", "user_id", "group_id",
+			    "time_stamp" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (ud->ud_inventory) {
+		struct basil_rsvn_app *new = xmalloc(sizeof(*new));
+
+		if (atou64(attribs[0], &new->apid) < 0)
+			fatal("invalid application_id '%s'", attribs[0]);
+		else if (atou32(attribs[1], &new->user_id) < 0)
+			fatal("invalid user_id '%s'", attribs[1]);
+		else if (atou32(attribs[2], &new->group_id) < 0)
+			fatal("invalid group_id '%s'", attribs[2]);
+		else if (atotime_t(attribs[3], &new->timestamp) < 0)
+			fatal("invalid time_stamp '%s'", attribs[3]);
+
+		xassert(ud->ud_inventory->rsvn_head);
+		if (ud->ud_inventory->rsvn_head->app_head)
+			new->next = ud->ud_inventory->rsvn_head->app_head;
+		ud->ud_inventory->rsvn_head->app_head = new;
+	}
+
+	ud->counter[BT_CMDARRAY] = 0;
+}
+
+/** Basil 1.1/3.1 'Command' element */
+void eh_command(struct ud *ud, const XML_Char **attrs)
+{
+	char *attribs[] = { "width", "depth", "nppn", "memory",
+			    "architecture", "cmd" };
+
+	extract_attributes(attrs, attribs, ARRAY_SIZE(attribs));
+
+	if (ud->ud_inventory) {
+		struct basil_rsvn_app_cmd *new = xmalloc(sizeof(*new));
+
+		xassert(ud->ud_inventory->rsvn_head);
+		xassert(ud->ud_inventory->rsvn_head->app_head);
+
+		if (atou32(attribs[0], &new->width) < 0)
+			fatal("invalid width '%s'", attribs[0]);
+		else if (atou32(attribs[1], &new->depth) < 0)
+			fatal("invalid depth '%s'", attribs[1]);
+		else if (atou32(attribs[2], &new->nppn) < 0)
+			fatal("invalid nppn '%s'", attribs[2]);
+		else if (atou32(attribs[3], &new->memory) < 0)
+			fatal("invalid memory '%s'", attribs[3]);
+		for (new->arch = BNA_X2; new->arch < BNA_MAX; new->arch += 1)
+			if (strcmp(attribs[4], nam_arch[new->arch]) == 0)
+				break;
+		strncpy(new->cmd, attribs[5], sizeof(new->cmd));
+
+		if (ud->ud_inventory->rsvn_head->app_head->cmd_head)
+			new->next = ud->ud_inventory->rsvn_head->
+				app_head->cmd_head;
+		ud->ud_inventory->rsvn_head->app_head->cmd_head = new;
+	}
+}
+
+/*
+ *	Top-Level Handlers
+ */
+static const struct element_handler *basil_tables[BV_MAX] = {
+	[BV_1_0] = basil_1_0_elements,
+	[BV_1_1] = basil_1_1_elements,
+	[BV_1_2] = basil_1_1_elements,		/* Basil 1.2 behaves like 1.1 */
+	[BV_3_1] = basil_3_1_elements,
+	[BV_4_0] = basil_4_0_elements,
+	[BV_4_1] = basil_4_0_elements
+};
+
+/**
+ * tag_to_method - Look up Basil method by tag.
+ * NOTE: This must be kept in synch with the order in %basil_element!
+ */
+static enum basil_method _tag_to_method(const enum basil_element tag)
+{
+	switch (tag) {
+	case BT_MESSAGE ... BT_RESPDATA:	/* generic, no method */
+		return BM_none;
+	case BT_RESVDNODEARRAY ... BT_RESVDNODE:/* RESERVE, Basil >= 3.1 */
+	case BT_RESERVED:			/* RESERVE, Basil >= 1.0 */
+		return BM_reserve;
+	case BT_CONFIRMED:
+		return BM_confirm;
+	case BT_RELEASED:
+		return BM_release;
+	case BT_ENGINE:
+		return BM_engine;
+	case BT_ACCELARRAY ... BT_ACCELALLOC:	/* INVENTORY, Basil >= 4.0 */
+	case BT_SEGMARRAY ... BT_COMMAND:	/* INVENTORY, Basil >= 1.1 */
+	case BT_INVENTORY ... BT_RESVN:		/* INVENTORY, Basil >= 1.0 */
+		return BM_inventory;
+	case BT_SWITCH ... BT_SWITCHAPPARRAY:
+		return BM_switch;
+	default:
+		return BM_UNKNOWN;
+	}
+}
+
+static void _start_handler(void *user_data,
+			   const XML_Char *el, const XML_Char **attrs)
+{
+	struct ud *ud = user_data;
+	const struct element_handler *table = basil_tables[ud->bp->version];
+	enum basil_method method;
+	enum basil_element tag;
+
+	for (tag = BT_MESSAGE; table[tag].tag; tag++) {
+		if (strcmp(table[tag].tag, el) == 0) {
+			/* since BM_inventory is returned for Arrays
+			   if the method is switch we need to "switch"
+			   it up here.
+			*/
+			if (ud->bp->method == BM_switch) {
+				if(!strcmp(table[tag].tag, "ReservationArray"))
+					tag = BT_SWITCHRESARRAY;
+				else if(!strcmp(table[tag].tag, "Reservation"))
+					tag = BT_SWITCHRES;
+				else if(!strcmp(table[tag].tag,
+						"ApplicationArray"))
+					tag = BT_SWITCHAPPARRAY;
+				else if(!strcmp(table[tag].tag,	"Application"))
+					tag = BT_SWITCHAPP;
+			}
+			break;
+		}
+	}
+	if (table[tag].tag == NULL)
+		fatal("Unrecognized XML start tag '%s'", el);
+
+	method = _tag_to_method(tag);
+	if (method == BM_UNKNOWN)
+		fatal("Unsupported XML start tag '%s'", el);
+
+	if (method != BM_none && method != ud->bp->method)
+		fatal("Unexpected '%s' start tag within %u response, "
+		      "expected %u",
+		      el, method, ud->bp->method);
+
+	if (tag != BT_MESSAGE) {
+		if (ud->depth != table[tag].depth)
+			fatal("Tag '%s' appeared at depth %d instead of %d",
+					el, ud->depth, table[tag].depth);
+		if (ud->counter[tag] && table[tag].uniq)
+			fatal("Multiple occurrences of %s in document", el);
+	}
+
+	if (ud->depth == TAG_DEPTH_MAX)
+		fatal("BUG: maximum tag depth reached");
+	ud->stack[ud->depth] = tag;
+	ud->counter[tag]++;
+
+	if (table[tag].hnd == NULL && *attrs != NULL)
+		fatal("Unexpected attribute '%s' in %s", *attrs, el);
+	else if (table[tag].hnd != NULL && *attrs == NULL)
+		fatal("Tag %s without expected attributes", el);
+	else if (table[tag].hnd != NULL)
+		table[tag].hnd(ud, attrs);
+	ud->depth++;
+}
+
+static void _end_handler(void *user_data, const XML_Char *el)
+{
+	struct ud *ud = user_data;
+	const struct element_handler *table = basil_tables[ud->bp->version];
+	enum basil_element end_tag;
+
+	--ud->depth;
+	for (end_tag = BT_MESSAGE; table[end_tag].tag; end_tag++)
+		if (strcmp(table[end_tag].tag, el) == 0) {
+			/* since BM_inventory is returned for Arrays
+			   if the method is switch we need to "switch"
+			   it up here.
+			*/
+			if (ud->bp->method == BM_switch) {
+				if(!strcmp(table[end_tag].tag,
+					   "ReservationArray"))
+					end_tag = BT_SWITCHRESARRAY;
+				else if(!strcmp(table[end_tag].tag,
+						"Reservation"))
+					end_tag = BT_SWITCHRES;
+				else if(!strcmp(table[end_tag].tag,
+						"ApplicationArray"))
+					end_tag = BT_SWITCHAPPARRAY;
+				else if(!strcmp(table[end_tag].tag,
+						"Application"))
+					end_tag = BT_SWITCHAPP;
+			}
+			break;
+		}
+	if (table[end_tag].tag == NULL) {
+		fatal("Unknown end tag '%s'", el);
+	} else if (end_tag != ud->stack[ud->depth]) {
+		fatal("Non-matching end element '%s'", el);
+	} else if (end_tag == BT_NODE) {
+		if (ud->current_node.reserved) {
+			ud->bp->mdata.inv->batch_total++;
+		} else if (ud->current_node.available) {
+			ud->bp->mdata.inv->batch_avail++;
+			ud->bp->mdata.inv->batch_total++;
+		}
+		ud->bp->mdata.inv->nodes_total++;
+	} else if (end_tag == BT_RESPDATA && ud->error) {
+		/*
+		 * Re-classify errors. The error message has been added by the
+		 * cdata handler nested inside the ResponseData tags.
+		 *
+		 * Match substrings that are common to all Basil versions:
+		 * - the ' No entry for resId ' string is returned when calling
+		 *   the RELEASE method multiple times;
+		 * - the ' cannot find resId ' string is returned when trying to
+		 *   confirm a reservation which does not or no longer exist.
+		 */
+		if (strstr(ud->bp->msg, " No entry for resId ") ||
+		    strstr(ud->bp->msg, " cannot find resId "))
+			ud->error = BE_NO_RESID;
+	}
+}
+
+static void _cdata_handler(void *user_data, const XML_Char *s, int len)
+{
+	struct ud *ud = user_data;
+	size_t mrest;
+
+	if (!ud->depth || ud->stack[ud->depth - 1] != BT_MESSAGE)
+		return;
+
+	while (isspace(*s))
+		++s, --len;
+
+	mrest = sizeof(ud->bp->msg) - (strlen(ud->bp->msg) + 1);
+	if (mrest > 0)
+		strncat(ud->bp->msg, s, len > mrest ? mrest : len);
+}
+
+/*
+ * parse_basil  -  parse the response to a Basil query (version-independent)
+ *
+ * @bp:	information passed in to guide the parsing process
+ * @fd: file descriptor connected to the output of apbasil
+ * Returns 0 if ok, negative %basil_error otherwise.
+ */
+int parse_basil(struct basil_parse_data *bp, int fd)
+{
+	char xmlbuf[65536];
+	struct ud ud = {0};
+	XML_Parser parser;
+	int len;
+
+	/* Almost all methods require method-specific data in mdata */
+	xassert(bp->method == BM_engine || bp->mdata.raw != NULL);
+	ud.bp = bp;
+
+	parser = XML_ParserCreate("US-ASCII");
+	if (parser == NULL)
+		fatal("can not allocate memory for parser");
+
+	XML_SetUserData(parser, &ud);
+	XML_SetElementHandler(parser, _start_handler, _end_handler);
+	XML_SetCharacterDataHandler(parser, _cdata_handler);
+	do {
+		len = read(fd, xmlbuf, sizeof(xmlbuf));
+		if (len == -1)
+			fatal("read error on stream: len=%d", len);
+		switch (XML_Parse(parser, xmlbuf, len, len == 0)) {
+		case XML_STATUS_ERROR:
+			xmlbuf[len] = '\0';
+			snprintf(ud.bp->msg, sizeof(ud.bp->msg),
+				 "Basil %s %s response parse error: %s "
+				 "at line %lu: '%s'",
+				 bv_names_long[bp->version],
+				 bm_names[bp->method],
+				 XML_ErrorString(XML_GetErrorCode(parser)),
+				 XML_GetCurrentLineNumber(parser), xmlbuf);
+			/* fall through */
+		case XML_STATUS_SUSPENDED:
+			ud.error = BE_PARSER;
+			/* fall through */
+		case XML_STATUS_OK:
+			break;
+		}
+	} while (len && ud.error == BE_NONE);
+
+	close(fd);
+	XML_ParserFree(parser);
+
+	switch (ud.error) {
+	case BE_NO_RESID:	/* resId no longer exists */
+	case BE_NONE:		/* no error: bp->msg is empty */
+		break;
+	default:
+		error("%s", bp->msg);
+	}
+	return -ud.error;
+}
diff --git a/src/plugins/select/cray/libalps/parser_internal.h b/src/plugins/select/cray/libalps/parser_internal.h
new file mode 100644
index 000000000..b805e1ff9
--- /dev/null
+++ b/src/plugins/select/cray/libalps/parser_internal.h
@@ -0,0 +1,111 @@
+/*
+ * Shared routines to parse XML from different BASIL versions
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#ifndef __PARSER_INTERNAL_H__
+#define __PARSER_INTERNAL_H__
+
+#include "../basil_alps.h"
+#include <errno.h>
+
+/*
+ * struct ud - user data passed to XML element handlers
+ * @stack:	tag stack
+ * @depth:	tag stack pointer
+ * @counter:	tag counter (enforce tag uniqueness)
+ *
+ * @error:	%basil_error error information
+ * @bp:		combined input/output data
+ */
+struct ud {
+	uint8_t			depth;
+	enum basil_element	stack[TAG_DEPTH_MAX];
+	uint8_t			counter[BT_MAX];
+	uint32_t		error;
+
+	struct	{
+		bool	available;	/* arch=XT && role=BATCH && state=UP */
+		bool	reserved;	/* at least 1 reservation on this node */
+	} current_node;
+
+	struct basil_parse_data		*bp;
+#define ud_inventory			bp->mdata.inv->f
+};
+
+/*
+ * Tag handler lookup
+ *
+ * @tag:	NUL-terminated tag name
+ * @depth:	depth at which this tag expected (not valid for all tags)
+ * @uniq:	whether @tag should be unique within document
+ * @hnd:	attribute-parsing function
+ */
+struct element_handler {
+	char	*tag;
+	uint8_t	depth;
+	bool	uniq;
+	void	(*hnd)(struct ud *, const XML_Char **);
+};
+
+/* MACROS */
+/* Taken from linux/kernel.h 2.6.33 */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+/* parser_basil_x.y.c */
+extern const struct element_handler basil_1_0_elements[];
+extern const struct element_handler basil_1_1_elements[];
+extern const struct element_handler basil_3_1_elements[];
+extern const struct element_handler basil_4_0_elements[];
+/* atoul.c */
+extern int atou64(const char *str, uint64_t *value);
+extern int atou32(const char *str, uint32_t *value);
+extern int atotime_t(const char *str, time_t *value);
+
+/* popen2.c */
+extern pid_t popen2(const char *path, int *in, int *out, bool no_stderr);
+extern unsigned char wait_for_child(pid_t pid);
+
+/*
+ * parser_common.c
+ */
+extern int parse_basil(struct basil_parse_data *bp, int fd);
+extern void extract_attributes(const XML_Char **attrs, char **reqv, int reqc);
+
+/* Basil 1.0/1.1 common handlers */
+extern void eh_message(struct ud *ud, const XML_Char **attrs);
+extern void eh_response(struct ud *ud, const XML_Char **attrs);
+extern void eh_resp_data(struct ud *ud, const XML_Char **attrs);
+extern void eh_reserved(struct ud *ud, const XML_Char **attrs);
+extern void eh_engine(struct ud *ud, const XML_Char **attrs);
+extern void eh_node(struct ud *ud, const XML_Char **attrs);
+extern void eh_proc(struct ud *ud, const XML_Char **attrs);
+extern void eh_proc_alloc(struct ud *ud, const XML_Char **attrs);
+extern void eh_mem(struct ud *ud, const XML_Char **attrs);
+extern void eh_mem_alloc(struct ud *ud, const XML_Char **attrs);
+extern void eh_label(struct ud *ud, const XML_Char **attrs);
+extern void eh_resv(struct ud *ud, const XML_Char **attrs);
+
+/* Basil 1.1/3.1 common handlers */
+extern void eh_segment(struct ud *ud, const XML_Char **attrs);
+extern void eh_application(struct ud *ud, const XML_Char **attrs);
+extern void eh_command(struct ud *ud, const XML_Char **attrs);
+extern void eh_resv_1_1(struct ud *ud, const XML_Char **attrs);
+
+/* Basil 3.1 and above common handlers */
+extern void eh_resvd_node(struct ud *ud, const XML_Char **attrs);
+extern void eh_confirmed(struct ud *ud, const XML_Char **attrs);
+extern void eh_released_3_1(struct ud *ud, const XML_Char **attrs);
+extern void eh_engine_3_1(struct ud *ud, const XML_Char **attrs);
+extern void eh_inventory_3_1(struct ud *ud, const XML_Char **attrs);
+extern void eh_node_3_1(struct ud *ud, const XML_Char **attrs);
+extern void eh_resv_3_1(struct ud *ud, const XML_Char **attrs);
+
+/* Basil 4.0 and above common handlers */
+extern void eh_accel(struct ud *ud, const XML_Char **attrs);
+extern void eh_accel_alloc(struct ud *ud, const XML_Char **attrs);
+extern void eh_switch_res(struct ud *ud, const XML_Char **attrs);
+extern void eh_switch_app(struct ud *ud, const XML_Char **attrs);
+
+#endif /*__PARSER_INTERNAL_H__ */
diff --git a/src/plugins/select/cray/libalps/popen2.c b/src/plugins/select/cray/libalps/popen2.c
new file mode 100644
index 000000000..c43fc93bb
--- /dev/null
+++ b/src/plugins/select/cray/libalps/popen2.c
@@ -0,0 +1,150 @@
+/*
+ *     Implementation of a coprocess forked as child.
+ *
+ *     +--------------------------------------------+
+ *     |                 PARENT                     |
+ *     |                                            |
+ *     |    in                            out       |
+ *     | child_in[1]                  child_out[0]  |
+ *     +--------------------------------------------+
+ *           |                             ^
+ *           |                             |
+ *           V                             |
+ *     +--------------------------------------------+
+ *     | child_in[0]                  child_out[1]  |
+ *     |     |                             |        |
+ *     | STDIN_FILENO                STDOUT_FILENO  |
+ *     |                                            |
+ *     |                  CHILD                     |
+ *     +--------------------------------------------+
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "../basil_alps.h"
+
+static int dup_devnull(int stream_fd)
+{
+	int fd = open("/dev/null", O_RDWR);
+
+	if (fd == -1 || dup2(fd, stream_fd) < 0)
+		return -1;
+	return close(fd);
+}
+
+/* move @fd out of the way of stdin/stdout/stderr */
+static int fd_no_clobber_stdio(int *fd)
+{
+	if (*fd <= STDERR_FILENO) {
+		int moved_fd = fcntl(*fd, F_DUPFD, STDERR_FILENO + 1);
+
+		if (moved_fd < 0 || close(*fd) < 0)
+			return -1;
+		*fd = moved_fd;
+	}
+	return 0;
+}
+
+/**
+ * popen2  -  Open a bidirectional pipe to a process
+ * @path:	full path of executable to run
+ * @to_child:	return file descriptor for child stdin
+ * @from_child:	return file descriptor for child stdout
+ *
+ * Return -1 on error, pid of child process on success.
+ */
+pid_t popen2(const char *path, int *to_child, int *from_child, bool no_stderr)
+{
+	int child_in[2], child_out[2];
+	pid_t pid;
+
+	if (access(path, X_OK) < 0)
+		return -1;
+
+	if (pipe(child_in) < 0 || pipe(child_out) < 0)
+		return -1;
+
+	pid = fork();
+	if (pid < 0)
+		return -1;
+
+	if (pid == 0) {
+		/*
+		 * child
+		 */
+		close(child_in[1]);
+		close(child_out[0]);
+
+		/*
+		 * Rationale: by making sure that fd > 2, dup(fd) <= 2,
+		 * a new copy is created, allowing to close the pipe end,
+		 * and with FD_CLOEXEC to off (on linux).
+		 */
+		if (fd_no_clobber_stdio(&child_in[0]) < 0		||
+		    dup2(child_in[0], STDIN_FILENO) != STDIN_FILENO	||
+		    close(child_in[0]) < 0)
+			_exit(127);
+
+		if (fd_no_clobber_stdio(&child_out[1]) < 0		||
+		    dup2(child_out[1], STDOUT_FILENO) != STDOUT_FILENO	||
+		    close(child_out[1]) < 0)
+			_exit(127);
+
+		if (no_stderr && dup_devnull(STDERR_FILENO) < 0)
+			_exit(127);
+
+		closeall(STDERR_FILENO + 1);
+
+		if (execl(path, path, NULL) < 0)
+			_exit(1);
+	}
+
+	/*
+	 * parent
+	 */
+	close(child_in[0]);
+	close(child_out[1]);
+
+	*to_child   = child_in[1];
+	*from_child = child_out[0];
+
+	return pid;
+}
+
+/*
+ * From git sources: wait for child termination
+ * Return 0 if child exited with 0, a positive value < 256 on error.
+ */
+unsigned char wait_for_child(pid_t pid)
+{
+	int status, failed_errno = 0;
+	unsigned char code = -1;
+	pid_t waiting;
+
+	do {
+		waiting = waitpid(pid, &status, 0);
+	} while (waiting < 0 && errno == EINTR);
+
+	if (waiting < 0) {
+		failed_errno = errno;
+	} else if (WIFSIGNALED(status)) {
+		code = WTERMSIG(status);
+		/*
+		 * This return value is chosen so that code & 0xff
+		 * mimics the exit code that a POSIX shell would report for
+		 * a program that died from this signal.
+		 */
+		code -= 128;
+	} else if (WIFEXITED(status)) {
+		code = WEXITSTATUS(status);
+		/*
+		 * Convert special exit code when execvp failed.
+		 */
+		if (code == 127) {
+			code = -1;
+			failed_errno = ENOENT;
+		}
+	}
+	errno = failed_errno;
+	return code;
+}
diff --git a/src/plugins/select/cray/libemulate/Makefile.am b/src/plugins/select/cray/libemulate/Makefile.am
new file mode 100644
index 000000000..3e769a6aa
--- /dev/null
+++ b/src/plugins/select/cray/libemulate/Makefile.am
@@ -0,0 +1,16 @@
+# Makefile for building Cray/Basil XML-RPC low-level interface
+
+AUTOMAKE_OPTIONS = foreign
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I. -I../
+
+noinst_LTLIBRARIES = libalps.la
+
+libalps_la_SOURCES =		\
+	../basil_alps.h		\
+	alps_emulate.c		\
+	hilbert.c		\
+	hilbert.h
+libalps_la_CFLAGS  = $(MYSQL_CFLAGS)
+libalps_la_LIBADD  = $(MYSQL_LIBS) -lexpat
+libalps_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
diff --git a/src/plugins/select/cray/libemulate/Makefile.in b/src/plugins/select/cray/libemulate/Makefile.in
new file mode 100644
index 000000000..b1d1e89b8
--- /dev/null
+++ b/src/plugins/select/cray/libemulate/Makefile.in
@@ -0,0 +1,618 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for building Cray/Basil XML-RPC low-level interface
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/select/cray/libemulate
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+am__DEPENDENCIES_1 =
+libalps_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am_libalps_la_OBJECTS = libalps_la-alps_emulate.lo \
+	libalps_la-hilbert.lo
+libalps_la_OBJECTS = $(am_libalps_la_OBJECTS)
+libalps_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(libalps_la_CFLAGS) \
+	$(CFLAGS) $(libalps_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libalps_la_SOURCES)
+DIST_SOURCES = $(libalps_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -I. -I../
+noinst_LTLIBRARIES = libalps.la
+libalps_la_SOURCES = \
+	../basil_alps.h		\
+	alps_emulate.c		\
+	hilbert.c		\
+	hilbert.h
+
+libalps_la_CFLAGS = $(MYSQL_CFLAGS)
+libalps_la_LIBADD = $(MYSQL_LIBS) -lexpat
+libalps_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/cray/libemulate/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/select/cray/libemulate/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libalps.la: $(libalps_la_OBJECTS) $(libalps_la_DEPENDENCIES) 
+	$(libalps_la_LINK)  $(libalps_la_OBJECTS) $(libalps_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-alps_emulate.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libalps_la-hilbert.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+libalps_la-alps_emulate.lo: alps_emulate.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-alps_emulate.lo -MD -MP -MF $(DEPDIR)/libalps_la-alps_emulate.Tpo -c -o libalps_la-alps_emulate.lo `test -f 'alps_emulate.c' || echo '$(srcdir)/'`alps_emulate.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-alps_emulate.Tpo $(DEPDIR)/libalps_la-alps_emulate.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='alps_emulate.c' object='libalps_la-alps_emulate.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-alps_emulate.lo `test -f 'alps_emulate.c' || echo '$(srcdir)/'`alps_emulate.c
+
+libalps_la-hilbert.lo: hilbert.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -MT libalps_la-hilbert.lo -MD -MP -MF $(DEPDIR)/libalps_la-hilbert.Tpo -c -o libalps_la-hilbert.lo `test -f 'hilbert.c' || echo '$(srcdir)/'`hilbert.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/libalps_la-hilbert.Tpo $(DEPDIR)/libalps_la-hilbert.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='hilbert.c' object='libalps_la-hilbert.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libalps_la_CFLAGS) $(CFLAGS) -c -o libalps_la-hilbert.lo `test -f 'hilbert.c' || echo '$(srcdir)/'`hilbert.c
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/select/cray/libemulate/alps_emulate.c b/src/plugins/select/cray/libemulate/alps_emulate.c
new file mode 100644
index 000000000..9707e53d4
--- /dev/null
+++ b/src/plugins/select/cray/libemulate/alps_emulate.c
@@ -0,0 +1,609 @@
+/*****************************************************************************\
+ *  alps_emulate.c - simple ALPS emulator used for testing purposes
+ *****************************************************************************
+ *  Copyright (C) 2011 SchedMD <http://www.schedmd.com>.
+ *  Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_STDINT_H
+#    include <stdint.h>
+#  endif
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  endif
+#endif
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "src/common/log.h"
+#include "src/common/node_conf.h"
+#include "src/common/xmalloc.h"
+#include "../basil_alps.h"
+#include "../parser_common.h"
+#include "hilbert.h"
+
+/* Global variables */
+const char *bv_names[BV_MAX];
+const char *bv_names_long[BV_MAX];
+const char *bm_names[BM_MAX];
+const char *be_names[BE_MAX];
+
+const char *nam_arch[BNA_MAX];
+const char *nam_memtype[BMT_MAX];
+const char *nam_labeltype[BLT_MAX];
+const char *nam_ldisp[BLD_MAX];
+
+const char *nam_noderole[BNR_MAX];
+const char *nam_nodestate[BNS_MAX];
+const char *nam_proc[BPT_MAX];
+const char *nam_rsvn_mode[BRM_MAX];
+const char *nam_gpc_mode[BGM_MAX];
+
+const char *nam_acceltype[BA_MAX];
+const char *nam_accelstate[BAS_MAX];
+
+/* If _ADD_DELAYS is set, then include sleep calls to emulate delays
+ * expected for ALPS/BASIL interactions */
+#define _ADD_DELAYS  0
+#define _DEBUG       0
+#define MAX_RESV_CNT 500
+#define NODES_PER_COORDINATE 1
+
+static MYSQL *mysql_handle = NULL;
+static MYSQL_BIND *my_bind_col = NULL;
+static struct node_record *my_node_ptr = NULL;
+static int my_node_inx = 0;
+
+static int hw_cabinet, hw_row, hw_cage, hw_slot, hw_cpu;
+static int coord[3], max_dim[3];
+
+static int sys_spur_cnt = 0, last_spur_inx = 0;
+static int *sys_coords = NULL;
+static coord_t *sys_hilbert;
+
+static int last_resv_id = 0;
+static uint32_t resv_jobid[MAX_RESV_CNT];
+
+
+/* Given a count of elements to distribute over a "dims" size space, 
+ * compute the minimum number of elements in each dimension to accomodate
+ * them assuming the number of elements in each dimension is similar (i.e.
+ * a cube rather than a long narrow box shape).
+ * IN spur_cnt - number of nodes at each coordinate
+ * IN/OUT coord - maximum coordinates in each dimension
+ * IN dims - number of dimensions to use */
+static void _get_dims(int spur_cnt, int *coord, int dims)
+{
+	int count = 1, i, j;
+	coord_t hilbert[3];
+
+	xfree(sys_coords);
+	xfree(sys_hilbert);
+	for (i = 0; i < dims; i++)
+		coord[i] = 1;
+
+	do {
+		/* Increase size of dimensions from high to low here and do so
+		 * by doubling sizes, but fill from low to high to improve
+		 * performance of Hilbert curve fitting for better job
+		 * locality */
+		for (i = (dims - 1); i >= 0; i--) {
+			if (count >= spur_cnt)
+				break;
+			count /= coord[i];
+			coord[i] *= 2;
+			count *= coord[i];
+		}
+	} while (count < spur_cnt);
+
+	/* Build table of possible coordinates */
+	sys_spur_cnt = spur_cnt;
+	sys_coords  = xmalloc(sizeof(int) * spur_cnt * dims);
+	/* We leave record zero at coordinate 000 */
+	for (i = 1; i < spur_cnt; i++) {
+		for (j = 0; j < dims; j++)
+			sys_coords[i*dims + j] = sys_coords[i*dims + j - dims];
+		for (j = 0; j < dims; j++) {
+			sys_coords[i*dims+j]++;
+			if (sys_coords[i*dims+j] < coord[j])
+				break;
+			sys_coords[i*dims+j] = 0;
+		}
+	}
+
+	/* For each coordinate, generate it's Hilbert number */
+	sys_hilbert = xmalloc(sizeof(coord_t) * spur_cnt);
+	for (i = 0; i < spur_cnt; i++) {
+		for (j = 0; j < dims; j++)
+			hilbert[j] = sys_coords[i*dims + j];
+		AxestoTranspose(hilbert, 5, dims);
+		/* A variation on the below calculation would be required here
+		 * for other dimension counts */
+		sys_hilbert[i] =
+			((hilbert[0]>>4 & 1) << 14) +
+			((hilbert[1]>>4 & 1) << 13) +
+			((hilbert[2]>>4 & 1) << 12) +
+			((hilbert[0]>>3 & 1) << 11) +
+			((hilbert[1]>>3 & 1) << 10) +
+			((hilbert[2]>>3 & 1) <<  9) +
+			((hilbert[0]>>2 & 1) <<  8) +
+			((hilbert[1]>>2 & 1) <<  7) +
+			((hilbert[2]>>2 & 1) <<  6) +
+			((hilbert[0]>>1 & 1) <<  5) +
+			((hilbert[1]>>1 & 1) <<  4) +
+			((hilbert[2]>>1 & 1) <<  3) +
+			((hilbert[0]>>0 & 1) <<  2) +
+			((hilbert[1]>>0 & 1) <<  1) +
+			((hilbert[2]>>0 & 1) <<  0);
+	}
+
+	/* Sort the entries by increasing hilbert number */
+	for (i = 0; i < spur_cnt; i++) {
+		int tmp_int, low_inx = i;
+		for (j = i+1; j < spur_cnt; j++) {
+			if (sys_hilbert[j] < sys_hilbert[low_inx])
+				low_inx = j;
+		}
+		if (low_inx == i)
+			continue;
+		tmp_int = sys_hilbert[i];
+		sys_hilbert[i] = sys_hilbert[low_inx];
+		sys_hilbert[low_inx] = tmp_int;
+		for (j = 0; j < dims; j++) {
+			tmp_int = sys_coords[i*dims + j];
+			sys_coords[i*dims + j] = sys_coords[low_inx*dims + j];
+			sys_coords[low_inx*dims + j] = tmp_int;
+		}
+	}
+
+#if _DEBUG
+	for (i = 0; i < spur_cnt; i++) {
+		info("coord:%d:%d:%d hilbert:%d", sys_coords[i*dims],
+		     sys_coords[i*dims+1], sys_coords[i*dims+2],
+		     sys_hilbert[i]);
+	}
+#endif
+}
+
+/* increment coordinates for a node */
+static void _incr_dims(int *coord, int dims)
+{
+	int j;
+
+	last_spur_inx++;
+	if (last_spur_inx >= sys_spur_cnt) {
+		error("alps_emualte: spur count exceeded");
+		last_spur_inx = 0;
+	}
+
+	for (j = 0; j < dims; j++)
+		coord[j] = sys_coords[last_spur_inx*dims + j];
+}
+
+/* Initialize the hardware pointer records */
+static void _init_hw_recs(int dims)
+{
+	int j, spur_cnt;
+
+	hw_cabinet = 0;
+	hw_row = 0;
+	hw_cage = 0;
+	hw_slot = 0;
+	hw_cpu = 0;
+
+	my_node_ptr = node_record_table_ptr;
+	my_node_inx = 0;
+	spur_cnt = node_record_count + NODES_PER_COORDINATE - 1;
+	spur_cnt /= NODES_PER_COORDINATE;
+	_get_dims(spur_cnt, max_dim, 3);
+
+	last_spur_inx = 0;
+	for (j = 0; j < dims; j++)
+		coord[j] = sys_coords[last_spur_inx*dims + j];
+}
+
+/* Increment the hardware pointer records */
+static void _incr_hw_recs(void)
+{
+	if (++my_node_inx >= node_record_count)
+		return;	/* end of node table */
+
+	my_node_ptr++;
+	if ((my_node_inx % NODES_PER_COORDINATE) == 0)
+		_incr_dims(coord, 3);
+	hw_cpu++;
+	if (hw_cpu > 3) {
+		hw_cpu = 0;
+		hw_slot++;
+	}
+	if (hw_slot > 7) {
+		hw_slot = 0;
+		hw_cage++;
+	}
+	if (hw_cage > 2) {
+		hw_cage = 0;
+		hw_cabinet++;
+	}
+	if (hw_cabinet > 16) {
+		hw_cabinet = 0;
+		hw_row++;
+	}
+}
+
+extern void free_nodespec(struct nodespec *head)
+{
+#if _DEBUG
+	info("free_nodespec: start:%u end:%u", head->start, head->end);
+#endif
+
+	if (head) {
+		free_nodespec(head->next);
+		xfree(head);
+	}
+}
+
+/*
+ *	Routines to interact with SDB database (uses prepared statements)
+ */
+/** Connect to the XTAdmin table on the SDB */
+extern MYSQL *cray_connect_sdb(void)
+{
+#if _DEBUG
+	info("cray_connect_sdb");
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+	if (mysql_handle)
+		error("cray_connect_sdb: Duplicate MySQL connection");
+	else
+		mysql_handle = (MYSQL *) xmalloc(1);
+
+	return mysql_handle;
+}
+
+/** Initialize and prepare statement */
+extern MYSQL_STMT *prepare_stmt(MYSQL *handle, const char *query,
+				MYSQL_BIND bind_parm[], unsigned long nparams,
+				MYSQL_BIND bind_cols[], unsigned long ncols)
+{
+#if _DEBUG
+	info("prepare_stmt: query:%s", query);
+#endif
+	if (handle != mysql_handle)
+		error("prepare_stmt: bad MySQL handle");
+	_init_hw_recs(3);
+
+	return (MYSQL_STMT *) query;
+}
+
+/** Execute and return the number of rows. */
+extern int exec_stmt(MYSQL_STMT *stmt, const char *query,
+		     MYSQL_BIND *bind_col, unsigned long ncols)
+{
+#if _DEBUG
+	info("exec_stmt");
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+	my_bind_col = bind_col;
+
+	return 0;
+}
+
+extern int fetch_stmt(MYSQL_STMT *stmt)
+{
+#if _DEBUG
+	info("fetch_stmt");
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+	if (my_node_inx >=node_record_count)
+		return 1;
+
+	strncpy(my_bind_col[COL_TYPE].buffer, "compute", BASIL_STRING_SHORT);
+	*((unsigned int *)my_bind_col[COL_CORES].buffer)  =
+			my_node_ptr->config_ptr->cpus;
+	*((unsigned int *)my_bind_col[COL_MEMORY].buffer) =
+			my_node_ptr->config_ptr->real_memory;
+
+	*((int *)my_bind_col[COL_CAB].buffer) = hw_cabinet;
+	*((int *)my_bind_col[COL_ROW].buffer) = hw_row;
+	*((int *)my_bind_col[COL_CAGE].buffer) = hw_cage;
+	*((int *)my_bind_col[COL_SLOT].buffer) = hw_slot;
+	*((int *)my_bind_col[COL_CPU].buffer) = hw_cpu;
+
+	*((int *)my_bind_col[COL_X].buffer) = coord[0];
+	*((int *)my_bind_col[COL_Y].buffer) = coord[1];
+	*((int *)my_bind_col[COL_Z].buffer) = coord[2];
+
+	*((my_bool *)my_bind_col[COL_MEMORY].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_CORES].is_null)  = (my_bool) 0;
+
+	*((my_bool *)my_bind_col[COL_CAB].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_ROW].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_CAGE].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_SLOT].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_CPU].is_null)  = (my_bool) 0;
+
+	*((my_bool *)my_bind_col[COL_X].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_Y].is_null)  = (my_bool) 0;
+	*((my_bool *)my_bind_col[COL_Z].is_null)  = (my_bool) 0;
+
+	_incr_hw_recs();
+
+	return 0;
+}
+
+my_bool free_stmt_result(MYSQL_STMT *stmt)
+{
+#if _DEBUG
+	info("free_stmt_result");
+#endif
+	return (my_bool) 0;
+}
+
+my_bool stmt_close(MYSQL_STMT *stmt)
+{
+#if _DEBUG
+	info("stmt_close");
+#endif
+	sys_spur_cnt = 0;
+	xfree(sys_coords);
+	xfree(sys_hilbert);
+
+	return (my_bool) 0;
+}
+
+void cray_close_sdb(MYSQL *handle)
+{
+#if _DEBUG
+	info("cray_close_sdb");
+#endif
+	xfree(mysql_handle);
+	return;
+}
+
+/** Find out interconnect chip: Gemini (XE) or SeaStar (XT) */
+extern int cray_is_gemini_system(MYSQL *handle)
+{
+#if _DEBUG
+	info("cray_is_gemini_system");
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+	if (handle != mysql_handle)
+		error("cray_is_gemini_system: bad MySQL handle");
+	return 0;
+}
+
+/*
+ *	Basil XML-RPC API prototypes
+ */
+extern enum basil_version get_basil_version(void)
+{
+#if _DEBUG
+	info("basil_version get_basil_version");
+#endif
+	return BV_3_1;
+}
+
+extern int basil_request(struct basil_parse_data *bp)
+{
+#if _DEBUG
+	info("basil_request");
+#endif
+	return 0;
+}
+
+extern struct basil_inventory *get_full_inventory(enum basil_version version)
+{
+	int i;
+	char *end_ptr;
+	struct basil_inventory *inv;
+	struct node_record *node_ptr;
+	struct basil_node *basil_node_ptr, **last_basil_node_ptr;
+	struct basil_rsvn *basil_rsvn_ptr, **last_basil_rsvn_ptr;
+
+#if _DEBUG
+	info("get_full_inventory");
+#endif
+
+	inv = xmalloc(sizeof(struct basil_inventory));
+	inv->is_gemini   = true;
+	inv->batch_avail = node_record_count;
+	inv->batch_total = node_record_count;
+	inv->nodes_total = node_record_count;
+	inv->f = xmalloc(sizeof(struct basil_full_inventory));
+	last_basil_node_ptr = &inv->f->node_head;
+	for (i = 0, node_ptr = node_record_table_ptr; i <node_record_count;
+	     i++, node_ptr++) {
+		basil_node_ptr = xmalloc(sizeof(struct basil_node));
+		*last_basil_node_ptr = basil_node_ptr;
+		basil_node_ptr->node_id = strtol(node_ptr->name+3, &end_ptr,
+						 10);
+		if (end_ptr[0] != '\0') {
+			error("Invalid node name: %s", basil_node_ptr->name);
+			basil_node_ptr->node_id = i;
+		}
+		strncpy(basil_node_ptr->name, node_ptr->name,
+			BASIL_STRING_SHORT);
+		basil_node_ptr->state = BNS_UP;
+		basil_node_ptr->role  = BNR_BATCH;
+		basil_node_ptr->arch  = BNA_XT;
+		last_basil_node_ptr = &basil_node_ptr->next;
+	}
+	last_basil_rsvn_ptr = &inv->f->rsvn_head;
+	for (i = 0; i < MAX_RESV_CNT; i++) {
+		if (resv_jobid[i] == 0)
+			continue;
+		basil_rsvn_ptr = xmalloc(sizeof(struct basil_rsvn));
+		*last_basil_rsvn_ptr = basil_rsvn_ptr;
+		basil_rsvn_ptr->rsvn_id = i + 1;
+		last_basil_rsvn_ptr = &basil_rsvn_ptr->next;
+	}
+	return inv;
+}
+
+extern void   free_inv(struct basil_inventory *inv)
+{
+	struct basil_node *basil_node_ptr, *next_basil_node_ptr;
+	struct basil_rsvn *basil_rsvn_ptr, *next_basil_rsvn_ptr;
+#if _DEBUG
+	info("free_inv");
+#endif
+	if (inv) {
+		basil_node_ptr = inv->f->node_head;
+		while (basil_node_ptr) {
+			next_basil_node_ptr = basil_node_ptr->next;
+			xfree(basil_node_ptr);
+			basil_node_ptr = next_basil_node_ptr;
+		}
+		basil_rsvn_ptr = inv->f->rsvn_head;
+		while (basil_rsvn_ptr) {
+			next_basil_rsvn_ptr = basil_rsvn_ptr->next;
+			xfree(basil_rsvn_ptr);
+			basil_rsvn_ptr = next_basil_rsvn_ptr;
+		}
+		xfree(inv->f);
+		xfree(inv);
+	}
+}
+
+extern long basil_reserve(const char *user, const char *batch_id,
+			  uint32_t width, uint32_t depth, uint32_t nppn,
+			  uint32_t mem_mb, struct nodespec *ns_head,
+			  struct basil_accel_param *accel_head)
+{
+	int i;
+	uint32_t job_id;
+
+#if _DEBUG
+	struct nodespec *my_node_spec;
+	info("basil_reserve user:%s batch_id:%s width:%u depth:%u nppn:%u "
+	     "mem_mb:%u",
+	     user, batch_id, width, depth, nppn, mem_mb);
+	my_node_spec = ns_head;
+	while (my_node_spec) {
+		info("basil_reserve node_spec:start:%u,end:%u",
+		     my_node_spec->start, my_node_spec->end);
+		my_node_spec = my_node_spec->next;
+	}
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+
+	job_id = atol(batch_id);
+	for (i = 0; i < MAX_RESV_CNT; i++) {
+		int my_resv_id;
+		if (resv_jobid[last_resv_id])
+			continue;
+		resv_jobid[last_resv_id] = job_id;
+		my_resv_id = ++last_resv_id;	/* one origin */
+		last_resv_id %= MAX_RESV_CNT;
+		return my_resv_id;
+	}
+
+	return 0;
+}
+
+extern int basil_confirm(uint32_t rsvn_id, int job_id, uint64_t pagg_id)
+{
+#if _DEBUG
+	info("basil_confirm: rsvn_id:%u", rsvn_id);
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+	if ((job_id == 0) || (rsvn_id > MAX_RESV_CNT))
+		return -BE_NO_RESID;
+#if 0
+	/* This is executed from the slurmd, so we really can not confirm
+	 * here if the reseravation was made by the slurmctld. Just assume
+	 * it is valid. */
+	if (resv_jobid[rsvn_id-1] != job_id)
+		return -1;
+#endif
+
+	return 0;
+}
+
+extern int basil_release(uint32_t rsvn_id)
+{
+#if _DEBUG
+	info("basil_release: rsvn_id:%u", rsvn_id);
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+
+	resv_jobid[rsvn_id - 1] = 0;
+
+	return 0;
+}
+
+int basil_signal_apids(int32_t rsvn_id, int signal, struct basil_inventory *inv)
+{
+#if _DEBUG
+	info("basil_signal_apids: rsvn_id:%u signal:%d", rsvn_id, signal);
+#endif
+#if _ADD_DELAYS
+	usleep(5000);
+#endif
+
+	return 0;
+}
+
+extern bool node_is_allocated(const struct basil_node *node)
+{
+	char nid[9];	/* nid%05d\0 */
+	struct node_record *node_ptr;
+
+	snprintf(nid, sizeof(nid), "nid%05u", node->node_id);
+	node_ptr = find_node_record(nid);
+	if (node_ptr == NULL)
+		return false;
+
+	return IS_NODE_ALLOCATED(node_ptr);
+}
+
+int basil_switch(uint32_t rsvn_id, bool suspend)
+{
+	return 0;
+}
diff --git a/src/plugins/select/cray/libemulate/hilbert.c b/src/plugins/select/cray/libemulate/hilbert.c
new file mode 100644
index 000000000..6a26e722f
--- /dev/null
+++ b/src/plugins/select/cray/libemulate/hilbert.c
@@ -0,0 +1,88 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+// Filename:  hilbert.c
+//
+// Purpose:   Hilbert and Linked-list utility procedures for BayeSys3.
+//
+// History:   TreeSys.c   17 Apr 1996 - 31 Dec 2002
+//            Peano.c     10 Apr 2001 - 11 Jan 2003
+//            merged       1 Feb 2003
+//            Arith debug 28 Aug 2003
+//            Hilbert.c   14 Oct 2003
+//                         2 Dec 2003
+//-----------------------------------------------------------------------------
+/*
+    Copyright (c) 1996-2003 Maximum Entropy Data Consultants Ltd,
+                            114c Milton Road, Cambridge CB4 1XE, England
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+#include "license.txt"
+*/
+
+#include "src/plugins/topology/3d_torus/hilbert.h"
+
+extern void TransposetoAxes(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n)            // I    dimension
+{
+    coord_t  M, P, Q, t;
+    int      i;
+
+// Gray decode by  H ^ (H/2)
+    t = X[n-1] >> 1;
+    for( i = n-1; i; i-- )
+        X[i] ^= X[i-1];
+    X[0] ^= t;
+
+// Undo excess work
+    M = 2 << (b - 1);
+    for( Q = 2; Q != M; Q <<= 1 )
+    {
+        P = Q - 1;
+        for( i = n-1; i; i-- )
+            if( X[i] & Q ) X[0] ^= P;                              // invert
+            else{ t = (X[0] ^ X[i]) & P;  X[0] ^= t;  X[i] ^= t; } // exchange
+        if( X[0] & Q ) X[0] ^= P;                                  // invert
+    }
+}
+extern void AxestoTranspose(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n)            // I    dimension
+{
+    coord_t  P, Q, t;
+    int      i;
+
+// Inverse undo
+    for( Q = 1 << (b - 1); Q > 1; Q >>= 1 )
+    {
+        P = Q - 1;
+        if( X[0] & Q ) X[0] ^= P;                                  // invert
+        for( i = 1; i < n; i++ )
+            if( X[i] & Q ) X[0] ^= P;                              // invert
+            else{ t = (X[0] ^ X[i]) & P;  X[0] ^= t;  X[i] ^= t; } // exchange
+    }
+
+// Gray encode (inverse of decode)
+    for( i = 1; i < n; i++ )
+        X[i] ^= X[i-1];
+    t = X[n-1];
+    for( i = 1; i < b; i <<= 1 )
+        X[n-1] ^= X[n-1] >> i;
+    t ^= X[n-1];
+    for( i = n-2; i >= 0; i-- )
+        X[i] ^= t;
+}
diff --git a/src/plugins/select/cray/libemulate/hilbert.h b/src/plugins/select/cray/libemulate/hilbert.h
new file mode 100644
index 000000000..fe7eeda63
--- /dev/null
+++ b/src/plugins/select/cray/libemulate/hilbert.h
@@ -0,0 +1,44 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+// Filename:  hilbert.h
+//
+// Purpose:   Hilbert and Linked-list utility procedures for BayeSys3.
+//
+// History:   TreeSys.c   17 Apr 1996 - 31 Dec 2002
+//            Peano.c     10 Apr 2001 - 11 Jan 2003
+//            merged       1 Feb 2003
+//            Arith debug 28 Aug 2003
+//            Hilbert.c   14 Oct 2003
+//                         2 Dec 2003
+//-----------------------------------------------------------------------------
+/*
+    Copyright (c) 1996-2003 Maximum Entropy Data Consultants Ltd,
+                            114c Milton Road, Cambridge CB4 1XE, England
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+#include "license.txt"
+*/
+
+typedef unsigned int coord_t; // char,short,int for up to 8,16,32 bits per word
+
+extern void TransposetoAxes(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n);           // I    dimension
+
+extern void AxestoTranspose(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n);           // I    dimension
diff --git a/src/plugins/select/cray/nodespec.c b/src/plugins/select/cray/nodespec.c
new file mode 100644
index 000000000..f5ac145df
--- /dev/null
+++ b/src/plugins/select/cray/nodespec.c
@@ -0,0 +1,151 @@
+/*
+ * Strictly-ordered, singly-linked list to represent disjoint node ranges
+ * of the type 'a' (single node) or 'a-b' (range, with a < b).
+ *
+ * For example, '1,7-8,20,33-29'
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#include "basil_alps.h"
+#define CRAY_MAX_DIGITS	5	/* nid%05d format */
+
+/* internal constructor */
+static struct nodespec *ns_new(uint32_t start, uint32_t end)
+{
+	struct nodespec *new = xmalloc(sizeof(*new));
+
+	if (new) {
+		new->start = start;
+		new->end   = end;
+	}
+	return new;
+}
+
+/**
+ * ns_add_range  -  Insert/merge new range into existing nodespec list.
+ * @head:       head of the ordered list
+ * @new_start:  start value of node range to insert
+ * @new_end:    end value of node range to insert
+ *
+ * Maintains @head as duplicate-free list, ordered in ascending order of
+ * node-specifier intervals, with a gap of at least 2 between adjacent entries.
+ * Returns 0 if ok, -1 on failure.
+ */
+static int ns_add_range(struct nodespec **head,
+			uint32_t new_start,
+			uint32_t new_end)
+{
+	struct nodespec *cur = *head, *next;
+
+	assert(new_start <= new_end);
+
+	if (cur == NULL || new_end + 1 < cur->start) {
+		*head = ns_new(new_start, new_end);
+		if (*head == NULL)
+			return -1;
+		(*head)->next = cur;
+		return 0;
+	}
+
+	for (next = cur->next;
+	     new_start > cur->end + 1;
+	     cur = next, next = cur->next)
+		if (next == NULL || new_end + 1 < next->start) {
+			next = ns_new(new_start, new_end);
+			if (next == NULL)
+				return -1;
+			next->next = cur->next;
+			cur->next  = next;
+			return 0;
+		}
+
+	/* new_start <= cur->end + 1 */
+	if (new_start < cur->start)
+		cur->start = new_start;
+
+	if (new_end <= cur->end)
+		return 0;
+	cur->end = new_end;
+
+	while ((next = cur->next) && next->start <= new_end + 1) {
+		if (next->end > new_end)
+			cur->end = next->end;
+		cur->next = next->next;
+		xfree(next);
+	}
+	/* next == NULL || next->start > new_end + 1 */
+
+	return 0;
+}
+
+/** Add a single node (1-element range) */
+int ns_add_node(struct nodespec **head, uint32_t node_id)
+{
+	return ns_add_range(head, node_id, node_id);
+}
+
+/* count the number of nodes starting at @head */
+static int ns_count_nodes(const struct nodespec *head)
+{
+	const struct nodespec *cur;
+	uint32_t node_count = 0;
+
+	for (cur = head; cur; cur = cur->next)
+		node_count += cur->end - cur->start + 1;
+
+	return node_count;
+}
+
+/**
+ * ns_ranged_string - Write compressed node specification to buffer.
+ * @head:   start of nodespec list
+ * @buf:    buffer to write to
+ * @buflen: size of @buf
+ * Returns number of characters written if successful, -1 on overflow.
+ */
+static ssize_t ns_ranged_string(const struct nodespec *head,
+				char *buf, size_t buflen)
+{
+	const struct nodespec *cur;
+	ssize_t n, len = 0;
+
+	for (cur = head; cur; cur = cur->next) {
+		if (cur != head) {
+			n = snprintf(buf + len, buflen - len, ",");
+			if (n < 0 || (len += n) >= buflen)
+				return -1;
+		}
+
+		n = snprintf(buf + len, buflen - len, "%u", cur->start);
+		if (n < 0 || (len += n) >= buflen)
+			return -1;
+
+		if (cur->start != cur->end) {
+			n = snprintf(buf + len, buflen - len, "-%u", cur->end);
+			if (n < 0 || (len += n) >= buflen)
+				return -1;
+		}
+	}
+	return len;
+}
+
+/* Compress @head into nodestring. Result must be xfree()d. */
+char *ns_to_string(const struct nodespec *head)
+{
+	char *buf = NULL;
+	size_t size = ns_count_nodes(head);
+
+	if (size) {
+		/* Over-estimation: using all digits, plus either '-' or '\0' */
+		size *= CRAY_MAX_DIGITS + 1;
+
+		buf = xmalloc(size);
+		if (buf == NULL)
+			fatal("can not allocate %d", (int)size);
+
+		if (ns_ranged_string(head, buf, size) < 0)
+			fatal("can not expand nodelist expression");
+	}
+	return buf;
+}
diff --git a/src/plugins/select/cray/other_select.c b/src/plugins/select/cray/other_select.c
index 33f79feb9..18ddd3a21 100644
--- a/src/plugins/select/cray/other_select.c
+++ b/src/plugins/select/cray/other_select.c
@@ -16,7 +16,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -78,15 +78,21 @@ static slurm_select_ops_t *_other_select_get_ops(slurm_select_context_t *c)
 		"select_p_state_save",
 		"select_p_state_restore",
 		"select_p_job_init",
+		"select_p_node_ranking",
 		"select_p_node_init",
 		"select_p_block_init",
 		"select_p_job_test",
 		"select_p_job_begin",
 		"select_p_job_ready",
+		"select_p_job_expand_allow",
+		"select_p_job_expand",
 		"select_p_job_resized",
+		"select_p_job_signal",
 		"select_p_job_fini",
 		"select_p_job_suspend",
 		"select_p_job_resume",
+		"select_p_step_pick_nodes",
+		"select_p_step_finish",
 		"select_p_pack_select_info",
                 "select_p_select_nodeinfo_pack",
                 "select_p_select_nodeinfo_unpack",
@@ -111,6 +117,10 @@ static slurm_select_ops_t *_other_select_get_ops(slurm_select_context_t *c)
 		"select_p_update_node_state",
 		"select_p_alter_node_cnt",
 		"select_p_reconfigure",
+		"select_p_resv_test",
+		"select_p_ba_init",
+		"select_p_ba_fini",
+		"select_p_ba_get_dims",
 	};
 	int n_syms = sizeof(syms) / sizeof(char *);
 
@@ -379,12 +389,39 @@ extern int other_job_ready(struct job_record *job_ptr)
 }
 
 /*
- * Modify internal data structures for a job that has changed size
- *	Only support jobs shrinking now.
+ * Test if expanding a job is permitted
+ */
+extern bool other_job_expand_allow(void)
+{
+	if (other_select_init() < 0)
+		return false;
+
+	return (*(other_select_context->ops.job_expand_allow))();
+}
+
+/*
+ * Move the resource allocated to one job into that of another job.
+ *	All resources are removed from "from_job_ptr" and moved into
+ *	"to_job_ptr". Also see other_job_resized().
+ * RET: 0 or an error code
+ */
+extern int other_job_expand(struct job_record *from_job_ptr,
+			    struct job_record *to_job_ptr)
+{
+	if (other_select_init() < 0)
+		return -1;
+
+	return (*(other_select_context->ops.job_expand))
+		(from_job_ptr, to_job_ptr);
+}
+
+/*
+ * Modify internal data structures for a job that has decreased job size.
+ *	Only support jobs shrinking. Also see other_job_expand();
  * RET: 0 or an error code
  */
 extern int other_job_resized(struct job_record *job_ptr,
-				struct node_record *node_ptr)
+			     struct node_record *node_ptr)
 {
 	if (other_select_init() < 0)
 		return -1;
@@ -393,6 +430,20 @@ extern int other_job_resized(struct job_record *job_ptr,
 		(job_ptr, node_ptr);
 }
 
+/*
+ * Pass job-step signal to other plugin.
+ * IN job_ptr - job to be signalled
+ * IN signal  - signal(7) number
+ */
+extern int other_job_signal(struct job_record *job_ptr, int signal)
+{
+	if (other_select_init() < 0)
+		return -1;
+
+	return (*(other_select_context->ops.job_signal))
+		(job_ptr, signal);
+}
+
 /*
  * Note termination of job is starting. Executed from slurmctld.
  * IN job_ptr - pointer to job being terminated
@@ -409,29 +460,69 @@ extern int other_job_fini(struct job_record *job_ptr)
 /*
  * Suspend a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being suspended
+ * indf_susp IN - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int other_job_suspend(struct job_record *job_ptr)
+extern int other_job_suspend(struct job_record *job_ptr, bool indf_susp)
 {
 	if (other_select_init() < 0)
 		return SLURM_ERROR;
 
 	return (*(other_select_context->ops.job_suspend))
-		(job_ptr);
+		(job_ptr, indf_susp);
 }
 
 /*
  * Resume a job. Executed from slurmctld.
+ * indf_susp IN - set if job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
  * IN job_ptr - pointer to job being resumed
  * RET SLURM_SUCCESS or error code
  */
-extern int other_job_resume(struct job_record *job_ptr)
+extern int other_job_resume(struct job_record *job_ptr, bool indf_susp)
 {
 	if (other_select_init() < 0)
 		return SLURM_ERROR;
 
 	return (*(other_select_context->ops.job_resume))
-		(job_ptr);
+		(job_ptr, indf_susp);
+}
+
+/*
+ * Select the "best" nodes for given job step from those available in
+ * a job allocation.
+ *
+ * IN/OUT job_ptr - pointer to job already allocated and running in a
+ *                  block where the step is to run.
+ *                  set's start_time when job expected to start
+ * OUT step_jobinfo - Fill in the resources to be used if not
+ *                    full size of job.
+ * IN node_count  - How many nodes we are looking for.
+ * RET map of slurm nodes to be used for step, NULL on failure
+ */
+extern bitstr_t *other_step_pick_nodes(struct job_record *job_ptr,
+				       select_jobinfo_t *jobinfo,
+				       uint32_t node_count)
+{
+	if (other_select_init() < 0)
+		return NULL;
+
+	return (*(other_select_context->ops.step_pick_nodes))
+		(job_ptr, jobinfo, node_count);
+}
+
+/*
+ * clear what happened in select_g_step_pick_nodes
+ * IN/OUT step_ptr - Flush the resources from the job and step.
+ */
+extern int other_step_finish(struct step_record *step_ptr)
+{
+	if (other_select_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(other_select_context->ops.step_finish))
+		(step_ptr);
 }
 
 extern int other_pack_select_info(time_t last_query_time, uint16_t show_flags,
@@ -445,8 +536,8 @@ extern int other_pack_select_info(time_t last_query_time, uint16_t show_flags,
 }
 
 extern int other_select_nodeinfo_pack(select_nodeinfo_t *nodeinfo,
-					 Buf buffer,
-					 uint16_t protocol_version)
+				      Buf buffer,
+				      uint16_t protocol_version)
 {
 	if (other_select_init() < 0)
 		return SLURM_ERROR;
@@ -466,12 +557,12 @@ extern int other_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
 		(nodeinfo, buffer, protocol_version);
 }
 
-extern select_nodeinfo_t *other_select_nodeinfo_alloc(uint32_t size)
+extern select_nodeinfo_t *other_select_nodeinfo_alloc(void)
 {
 	if (other_select_init() < 0)
 		return NULL;
 
-	return (*(other_select_context->ops.nodeinfo_alloc))(size);
+	return (*(other_select_context->ops.nodeinfo_alloc))();
 }
 
 extern int other_select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
@@ -698,13 +789,12 @@ extern int other_update_node_config (int index)
  * IN state  - state to update to
  * RETURN SLURM_SUCCESS on success || SLURM_ERROR else wise
  */
-extern int other_update_node_state (int index, uint16_t state)
+extern int other_update_node_state (struct node_record *node_ptr)
 {
 	if (other_select_init() < 0)
 		return SLURM_ERROR;
 
-	return (*(other_select_context->ops.update_node_state))
-		(index, state);
+	return (*(other_select_context->ops.update_node_state))(node_ptr);
 }
 
 /*
@@ -729,3 +819,45 @@ extern int other_reconfigure (void)
 
 	return (*(other_select_context->ops.reconfigure))();
 }
+
+/*
+ * other_resv_test - Identify the nodes which "best" satisfy a reservation
+ *	request. "best" is defined as either single set of consecutive nodes
+ *	satisfying the request and leaving the minimum number of unused nodes
+ *	OR the fewest number of consecutive node sets
+ * IN avail_bitmap - nodes available for the reservation
+ * IN node_cnt - count of required nodes
+ * RET - nodes selected for use by the reservation
+ */
+extern bitstr_t * other_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+	if (other_select_init() < 0)
+		return NULL;
+
+	return (*(other_select_context->ops.resv_test))
+		(avail_bitmap, node_cnt);
+}
+
+extern void other_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	if (other_select_init() < 0)
+		return;
+
+	(*(other_select_context->ops.ba_init))(node_info_ptr, sanity_check);
+}
+
+extern void other_ba_fini(void)
+{
+	if (other_select_init() < 0)
+		return;
+
+	(*(other_select_context->ops.ba_fini))();
+}
+
+extern int *other_ba_get_dims(void)
+{
+	if (other_select_init() < 0)
+		return NULL;
+
+	return (*(other_select_context->ops.ba_get_dims))();
+}
diff --git a/src/plugins/select/cray/other_select.h b/src/plugins/select/cray/other_select.h
index d6509eca5..ba2f746ce 100644
--- a/src/plugins/select/cray/other_select.h
+++ b/src/plugins/select/cray/other_select.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,8 +40,8 @@
 #ifndef _CRAY_OTHER_SELECT_H
 #define _CRAY_OTHER_SELECT_H
 
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"	/* Must be first */
 #include "src/common/list.h"
@@ -128,7 +128,7 @@ extern int other_update_node_config(int index);
  * IN state  - state to update to
  * RETURN SLURM_SUCCESS on success || SLURM_ERROR else wise
  */
-extern int other_update_node_state(int index, uint16_t state);
+extern int other_update_node_state(struct node_record *node_ptr);
 
 /*
  * Alter the node count for a job given the type of system we are on
@@ -176,13 +176,34 @@ extern int other_job_begin(struct job_record *job_ptr);
 extern int other_job_ready(struct job_record *job_ptr);
 
 /*
- * Modify internal data structures for a job that has changed size
- *	Only support jobs shrinking now.
+ * Test if expanding a job is permitted
+ */
+extern bool other_job_expand_allow(void);
+
+/*
+ * Move the resource allocated to one job into that of another job.
+ *	All resources are removed from "from_job_ptr" and moved into
+ *	"to_job_ptr". Also see other_job_resized().
+ * RET: 0 or an error code
+ */
+extern int other_job_expand(struct job_record *from_job_ptr,
+			    struct job_record *to_job_ptr);
+
+/*
+ * Modify internal data structures for a job that has decreased job size.
+ *	Only support jobs shrinking. Also see other_job_expand();
  * RET: 0 or an error code
  */
 extern int other_job_resized(struct job_record *job_ptr,
 			     struct node_record *node_ptr);
 
+/*
+ * Pass job-step signal to other plugin.
+ * IN job_ptr - job to be signalled
+ * IN signal  - signal(7) number
+ */
+extern int other_job_signal(struct job_record *job_ptr, int signal);
+
 /*
  * Note termination of job is starting. Executed from slurmctld.
  * IN job_ptr - pointer to job being terminated
@@ -192,16 +213,40 @@ extern int other_job_fini(struct job_record *job_ptr);
 /*
  * Suspend a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being suspended
+ * indf_susp IN - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int other_job_suspend(struct job_record *job_ptr);
+extern int other_job_suspend(struct job_record *job_ptr, bool indf_susp);
 
 /*
  * Resume a job. Executed from slurmctld.
  * IN job_ptr - pointer to job being resumed
+ * indf_susp IN - set if job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
  * RET SLURM_SUCCESS or error code
  */
-extern int other_job_resume(struct job_record *job_ptr);
+extern int other_job_resume(struct job_record *job_ptr, bool indf_susp);
+
+/*
+ * Select the "best" nodes for given job from those available
+ * IN/OUT job_ptr - pointer to job already allocated and running in a
+ *                  block where the step is to run.
+ *                  set's start_time when job expected to start
+ * OUT step_jobinfo - Fill in the resources to be used if not
+ *                    full size of job.
+ * IN node_count  - How many nodes we are looking for.
+ * RET map of slurm nodes to be used for step, NULL on failure
+ */
+extern bitstr_t * other_step_pick_nodes(struct job_record *job_ptr,
+					select_jobinfo_t *jobinfo,
+					uint32_t node_count);
+
+/*
+ * clear what happened in select_g_step_pick_nodes
+ * IN/OUT step_ptr - Flush the resources from the job and step.
+ */
+extern int other_step_finish(struct step_record *step_ptr);
 
 /* allocate storage for a select job credential
  * RET jobinfo - storage for a select job credential
@@ -294,7 +339,7 @@ extern int other_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
 					Buf buffer,
 					uint16_t protocol_version);
 
-extern select_nodeinfo_t *other_select_nodeinfo_alloc(uint32_t size);
+extern select_nodeinfo_t *other_select_nodeinfo_alloc(void);
 
 extern int other_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
 
@@ -326,4 +371,10 @@ extern int other_pack_select_info(time_t last_query_time, uint16_t show_flags,
 /* Note reconfiguration or change in partition configuration */
 extern int other_reconfigure(void);
 
+extern bitstr_t * other_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt);
+
+extern void other_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check);
+extern void other_ba_fini(void);
+extern int *other_ba_get_dims(void);
+
 #endif /* _CRAY_OTHER_SELECT_H */
diff --git a/src/plugins/select/cray/parser_common.h b/src/plugins/select/cray/parser_common.h
new file mode 100644
index 000000000..3a631cbd2
--- /dev/null
+++ b/src/plugins/select/cray/parser_common.h
@@ -0,0 +1,163 @@
+/*
+ * Routines and data structures common to libalps and libemulate
+ *
+ * Copyright (c) 2009-2011 Centro Svizzero di Calcolo Scientifico (CSCS)
+ * Licensed under the GPLv2.
+ */
+#ifndef __PARSER_COMMON_H__
+#define __PARSER_COMMON_H__
+
+#include "basil_alps.h"
+
+/*
+ * Global enum-to-string mapping tables
+ */
+
+/* Basil versions */
+const char *bv_names[BV_MAX] = {	/* Basil Protocol version */
+	[BV_1_0] = "1.0",
+	[BV_1_1] = "1.1",
+	[BV_1_2] = "1.1",
+	[BV_3_1] = "1.1",
+	[BV_4_0] = "1.2",
+	[BV_4_1] = "1.2"
+};
+
+const char *bv_names_long[BV_MAX] = {	/* Actual version name */
+	[BV_1_0] = "1.0",
+	[BV_1_1] = "1.1",
+	[BV_1_2] = "1.2",
+	[BV_3_1] = "3.1",
+	[BV_4_0] = "4.0",
+	[BV_4_1] = "4.1"
+};
+
+/* Basil methods */
+const char *bm_names[BM_MAX] = {
+	[BM_none]	= "NONE",
+	[BM_engine]	= "QUERY",
+	[BM_inventory]	= "QUERY",
+	[BM_reserve]	= "RESERVE",
+	[BM_confirm]	= "CONFIRM",
+	[BM_release]	= "RELEASE",
+	[BM_switch]	= "SWITCH",
+};
+
+/* Error codes */
+const char *be_names[BE_MAX] = {
+	[BE_NONE]	= "",
+	[BE_INTERNAL]	= "INTERNAL",
+	[BE_SYSTEM]	= "SYSTEM",
+	[BE_PARSER]	= "PARSER",
+	[BE_SYNTAX]	= "SYNTAX",
+	[BE_BACKEND]	= "BACKEND",
+	[BE_NO_RESID]	= "BACKEND",	/* backend can not locate resId */
+	[BE_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *be_names_long[BE_MAX] = {
+	[BE_NONE]	= "no ALPS error",
+	[BE_INTERNAL]	= "internal error: unexpected condition encountered",
+	[BE_SYSTEM]	= "system call failed",
+	[BE_PARSER]	= "XML parser error",
+	[BE_SYNTAX]	= "improper XML content or structure",
+	[BE_BACKEND]	= "ALPS backend error",
+	[BE_NO_RESID]	= "ALPS resId entry does not (or no longer) exist",
+	[BE_UNKNOWN]	= "UNKNOWN ALPS ERROR"
+};
+
+/*
+ * RESERVE/INVENTORY data
+ */
+const char *nam_arch[BNA_MAX] = {
+	[BNA_NONE]	= "UNDEFINED",
+	[BNA_X2]	= "X2",
+	[BNA_XT]	= "XT",
+	[BNA_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_memtype[BMT_MAX] = {
+	[BMT_NONE]	= "UNDEFINED",
+	[BMT_OS]	= "OS",
+	[BMT_HUGEPAGE]	= "HUGEPAGE",
+	[BMT_VIRTUAL]	= "VIRTUAL",
+	[BMT_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_labeltype[BLT_MAX] = {
+	[BLT_NONE]	= "UNDEFINED",
+	[BLT_HARD]	= "HARD",
+	[BLT_SOFT]	= "SOFT",
+	[BLT_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_ldisp[BLD_MAX] = {
+	[BLD_NONE]	= "UNDEFINED",
+	[BLD_ATTRACT]	= "ATTRACT",
+	[BLD_REPEL]	= "REPEL",
+	[BLD_UNKNOWN]	= "UNKNOWN"
+};
+
+/*
+ * INVENTORY-only data
+ */
+const char *nam_noderole[BNR_MAX] = {
+	[BNR_NONE]	= "UNDEFINED",
+	[BNR_INTER]	= "INTERACTIVE",
+	[BNR_BATCH]	= "BATCH",
+	[BNR_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_nodestate[BNS_MAX] = {
+	[BNS_NONE]	= "UNDEFINED",
+	[BNS_UP]	= "UP",
+	[BNS_DOWN]	= "DOWN",
+	[BNS_UNAVAIL]	= "UNAVAILABLE",
+	[BNS_ROUTE]	= "ROUTING",
+	[BNS_SUSPECT]	= "SUSPECT",
+	[BNS_ADMINDOWN]	= "ADMIN",
+	[BNS_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_proc[BPT_MAX] = {
+	[BPT_NONE]	= "UNDEFINED",
+	[BPT_CRAY_X2]	= "cray_x2",
+	[BPT_X86_64]	= "x86_64",
+	[BPT_UNKNOWN]	= "UNKNOWN"
+};
+
+/*
+ * Enum-to-string mapping tables specific to Basil 3.1
+ */
+const char *nam_rsvn_mode[BRM_MAX] = {
+	[BRM_NONE]      = "UNDEFINED",
+	[BRM_EXCLUSIVE] = "EXCLUSIVE",
+	[BRM_SHARE]     = "SHARED",
+	[BRM_UNKNOWN]   = "UNKNOWN"
+};
+
+const char *nam_gpc_mode[BGM_MAX] = {
+	[BGM_NONE]      = "NONE",
+	[BRM_PROCESSOR] = "PROCESSOR",
+	[BRM_LOCAL]     = "LOCAL",
+	[BRM_GLOBAL]    = "GLOBAL",
+	[BGM_UNKNOWN]   = "UNKNOWN"
+};
+
+/*
+ * Enum-to-string mapping tables introduced in Alps 4.0
+ */
+const char *nam_acceltype[BA_MAX] = {
+	[BA_NONE]	= "UNDEFINED",
+	[BA_GPU]	= "GPU",
+	[BA_UNKNOWN]	= "UNKNOWN"
+};
+
+const char *nam_accelstate[BAS_MAX] = {
+	[BAS_NONE]	= "UNDEFINED",
+	[BAS_UP]	= "UP",
+	[BAS_DOWN]	= "DOWN",
+	[BAS_UNKNOWN]	= "UNKNOWN"
+};
+
+#endif /* __PARSER_COMMON_H__ */
diff --git a/src/plugins/select/cray/select_cray.c b/src/plugins/select/cray/select_cray.c
index 5eff8fbac..db7b76e7d 100644
--- a/src/plugins/select/cray/select_cray.c
+++ b/src/plugins/select/cray/select_cray.c
@@ -3,12 +3,13 @@
  *****************************************************************************
  *  Copyright (C) 2010 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Supported by the Oak Ridge National Laboratory Extreme Scale Systems Center
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,9 +53,11 @@
 #include <sys/stat.h>
 #include <unistd.h>
 
+#include "src/common/slurm_xlator.h"	/* Must be first */
 #include "other_select.h"
+#include "basil_interface.h"
+#include "cray_config.h"
 
-#define NOT_FROM_CONTROLLER -2
 /* These are defined here so when we link with something other than
  * the slurmctld we will have these symbols defined.  They will get
  * overwritten when linking with the slurmctld.
@@ -62,38 +65,42 @@
 #if defined (__APPLE__)
 slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
 struct node_record *node_record_table_ptr __attribute__((weak_import));
-int bg_recover __attribute__((weak_import)) = NOT_FROM_CONTROLLER;
 List part_list __attribute__((weak_import));
 List job_list __attribute__((weak_import));
 int node_record_count __attribute__((weak_import));
 time_t last_node_update __attribute__((weak_import));
 struct switch_record *switch_record_table __attribute__((weak_import));
 int switch_record_cnt __attribute__((weak_import));
+slurmdb_cluster_rec_t *working_cluster_rec  __attribute__((weak_import)) = NULL;
+void *acct_db_conn __attribute__((weak_import)) = NULL;
+bitstr_t *avail_node_bitmap __attribute__((weak_import)) = NULL;
 #else
 slurm_ctl_conf_t slurmctld_conf;
 struct node_record *node_record_table_ptr;
-int bg_recover = NOT_FROM_CONTROLLER;
 List part_list;
 List job_list;
 int node_record_count;
 time_t last_node_update;
 struct switch_record *switch_record_table;
 int switch_record_cnt;
+slurmdb_cluster_rec_t *working_cluster_rec = NULL;
+void *acct_db_conn = NULL;
+bitstr_t *avail_node_bitmap = NULL;
 #endif
 
-#define JOBINFO_MAGIC 0x8cb3
-#define NODEINFO_MAGIC 0x82a3
-
-struct select_jobinfo {
-	uint16_t		magic;		/* magic number */
-	select_jobinfo_t	*other_jobinfo;
-	uint32_t		reservation_id;	/* BASIL reservation ID */
-};
+/*
+ * SIGRTMIN isn't defined on osx, so lets keep it above the signals in use.
+ */
+#if !defined (SIGRTMIN) && defined (__APPLE__)
+#  define SIGRTMIN SIGUSR2+1
+#endif
 
-struct select_nodeinfo {
-	uint16_t magic;		/* magic number */
-	select_nodeinfo_t *other_nodeinfo;
-};
+/* All current (2011) XT/XE installations have a maximum dimension of 3,
+ * smaller systems deploy a 2D Torus which has no connectivity in
+ * X-dimension.  We know the highest system dimensions possible here
+ * are 3 so we set it to that.  Do not use SYSTEM_DIMENSIONS since
+ * that could easily be wrong if built on a non Cray system. */
+static int select_cray_dim_size[3] = {-1};
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -125,9 +132,8 @@ struct select_nodeinfo {
  */
 const char plugin_name[]	= "Cray node selection plugin";
 const char plugin_type[]	= "select/cray";
-uint32_t plugin_id	        = 104;
-const uint32_t plugin_version	= 1;
-
+uint32_t plugin_id		= 104;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -138,7 +144,7 @@ extern int init ( void )
 	/*
 	 * FIXME: At the moment the smallest Cray allocation unit are still
 	 * full nodes. Node sharing (even across NUMA sockets of the same
-	 * node) is, as of CLE 3.1 (summer 2010) still not supported, i.e.
+	 * node) is, as of CLE 3.x (Summer 2011) still not supported, i.e.
 	 * as per the LIMITATIONS section of the aprun(1) manpage of the
 	 * 3.1.27A release).
 	 * Hence for the moment we can only use select/linear.  If some
@@ -147,15 +153,13 @@ extern int init ( void )
 	 * if (slurmctld_conf.select_type_param & CR_CONS_RES)
 	 *	plugin_id = 105;
 	 */
-#ifndef HAVE_CRAY
-	if (bg_recover != NOT_FROM_CONTROLLER)
-		fatal("select/cray is incompatible with a non Cray system");
-#endif
+	create_config();
 	return SLURM_SUCCESS;
 }
 
 extern int fini ( void )
 {
+	destroy_config();
 	return SLURM_SUCCESS;
 }
 
@@ -179,8 +183,22 @@ extern int select_p_job_init(List job_list)
 	return other_job_init(job_list);
 }
 
+/*
+ * select_p_node_ranking - generate node ranking for Cray nodes
+ */
+extern bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)
+{
+	if (basil_node_ranking(node_ptr, node_cnt) < 0)
+		fatal("can not resolve node coordinates: ALPS problem?");
+	return true;
+}
+
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
+	if (basil_geometry(node_ptr, node_cnt)) {
+		error("can not get initial ALPS node state");
+		return SLURM_ERROR;
+	}
 	return other_node_init(node_ptr, node_cnt);
 }
 
@@ -226,7 +244,6 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			     List preemptee_candidates,
 			     List *preemptee_job_list)
 {
-
 	return other_job_test(job_ptr, bitmap, min_nodes, max_nodes,
 			      req_nodes, mode, preemptee_candidates,
 			      preemptee_job_list);
@@ -234,35 +251,136 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 
 extern int select_p_job_begin(struct job_record *job_ptr)
 {
+	xassert(job_ptr);
 
+	if (do_basil_reserve(job_ptr) != SLURM_SUCCESS) {
+		job_ptr->state_reason = WAIT_RESOURCES;
+		xfree(job_ptr->state_desc);
+		return SLURM_ERROR;
+	}
 	return other_job_begin(job_ptr);
 }
 
 extern int select_p_job_ready(struct job_record *job_ptr)
 {
+	int rc = SLURM_SUCCESS;
+
+	xassert(job_ptr);
+	/*
+	 * Convention:	this function may be called also from stepdmgr, to
+	 *		confirm the ALPS reservation of a batch job. In this
+	 *		case, job_ptr only has minimal information and sets
+	 *		job_state == NO_VAL to distinguish this call from one
+	 *		done by slurmctld. It also sets batch_flag == 0, which
+	 *		means that we need to confirm only if batch_flag is 0,
+	 *		and execute the other_job_ready() only in slurmctld.
+	 */
+	if (!job_ptr->batch_flag)
+		rc = do_basil_confirm(job_ptr);
+	if (rc != SLURM_SUCCESS || (job_ptr->job_state == (uint16_t)NO_VAL))
+		return rc;
 	return other_job_ready(job_ptr);
 }
 
-
 extern int select_p_job_resized(struct job_record *job_ptr,
 				struct node_record *node_ptr)
 {
-	return other_job_resized(job_ptr, node_ptr);
+	/* return other_job_resized(job_ptr, node_ptr); */
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern bool select_p_job_expand_allow(void)
+{
+	return false;
+}
+
+extern int select_p_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int select_p_job_signal(struct job_record *job_ptr, int signal)
+{
+	xassert(job_ptr);
+	/*
+	 * Release the ALPS reservation already here for those signals that are
+	 * likely to terminate the job. Otherwise there is a race condition if a
+	 * script has more than one aprun line: while the apkill of the current
+	 * aprun line is underway, the job script proceeds to run and executes
+	 * the next following aprun line, until reaching the end of the script.
+	 * This not only creates large delays, it can also mess up cleaning up
+	 * after the job. Releasing the reservation will stop any new aprun
+	 * lines from being executed.
+	 */
+	switch (signal) {
+		case SIGCONT:
+		case SIGSTOP:
+		case SIGTSTP:
+		case SIGTTIN:
+		case SIGTTOU:
+		case SIGURG:
+		case SIGCHLD:
+		case SIGWINCH:
+			break;
+		default:
+			if (signal < SIGRTMIN)
+				do_basil_release(job_ptr);
+	}
+
+	if (do_basil_signal(job_ptr, signal) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+	return other_job_signal(job_ptr, signal);
 }
 
 extern int select_p_job_fini(struct job_record *job_ptr)
 {
+	if (job_ptr == NULL)
+		return SLURM_SUCCESS;
+	if (do_basil_release(job_ptr) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+	/*
+	 * Convention: like select_p_job_ready, may be called also from
+	 *             stepdmgr, where job_state == NO_VAL is used to
+	 *             distinguish the context from that of slurmctld.
+	 */
+	if (job_ptr->job_state == (uint16_t)NO_VAL)
+		return SLURM_SUCCESS;
 	return other_job_fini(job_ptr);
 }
 
-extern int select_p_job_suspend(struct job_record *job_ptr)
+extern int select_p_job_suspend(struct job_record *job_ptr, bool indf_susp)
 {
-	return other_job_suspend(job_ptr);
+	if (job_ptr == NULL)
+		return SLURM_SUCCESS;
+
+	if (do_basil_switch(job_ptr, 1) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+	return other_job_suspend(job_ptr, indf_susp);
+}
+
+extern int select_p_job_resume(struct job_record *job_ptr, bool indf_susp)
+{
+	if (job_ptr == NULL)
+		return SLURM_SUCCESS;
+
+	if (do_basil_switch(job_ptr, 0) != SLURM_SUCCESS)
+		return SLURM_ERROR;
+
+	return other_job_resume(job_ptr, indf_susp);
 }
 
-extern int select_p_job_resume(struct job_record *job_ptr)
+extern bitstr_t *select_p_step_pick_nodes(struct job_record *job_ptr,
+					  select_jobinfo_t *jobinfo,
+					  uint32_t node_count)
 {
-	return other_job_resume(job_ptr);
+	return other_step_pick_nodes(job_ptr, jobinfo, node_count);
+}
+
+extern int select_p_step_finish(struct step_record *step_ptr)
+{
+	return other_step_finish(step_ptr);
 }
 
 extern int select_p_pack_select_info(time_t last_query_time,
@@ -273,12 +391,12 @@ extern int select_p_pack_select_info(time_t last_query_time,
 				      protocol_version);
 }
 
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size)
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(void)
 {
 	select_nodeinfo_t *nodeinfo = xmalloc(sizeof(struct select_nodeinfo));
 
 	nodeinfo->magic = NODEINFO_MAGIC;
-	nodeinfo->other_nodeinfo = other_select_nodeinfo_alloc(size);
+	nodeinfo->other_nodeinfo = other_select_nodeinfo_alloc();
 
 	return nodeinfo;
 }
@@ -349,11 +467,11 @@ extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
 	select_nodeinfo_t **select_nodeinfo = (select_nodeinfo_t **) data;
 
 	if (nodeinfo == NULL) {
-		error("other_get_nodeinfo: nodeinfo not set");
+		error("select/cray nodeinfo_get: nodeinfo not set");
 		return SLURM_ERROR;
 	}
 	if (nodeinfo->magic != NODEINFO_MAGIC) {
-		error("set_nodeinfo: nodeinfo magic bad");
+		error("select/cray nodeinfo_get: nodeinfo magic bad");
 		return SLURM_ERROR;
 	}
 
@@ -385,13 +503,14 @@ extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
 {
 	int rc = SLURM_SUCCESS;
 	uint32_t *uint32 = (uint32_t *) data;
+	uint64_t *uint64 = (uint64_t *) data;
 
 	if (jobinfo == NULL) {
-		error("set_jobinfo: jobinfo not set");
+		error("select/cray jobinfo_set: jobinfo not set");
 		return SLURM_ERROR;
 	}
 	if (jobinfo->magic != JOBINFO_MAGIC) {
-		error("set_jobinfo: jobinfo magic bad");
+		error("select/cray jobinfo_set: jobinfo magic bad");
 		return SLURM_ERROR;
 	}
 
@@ -399,6 +518,9 @@ extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
 	case SELECT_JOBDATA_RESV_ID:
 		jobinfo->reservation_id = *uint32;
 		break;
+	case SELECT_JOBDATA_PAGG_ID:
+		jobinfo->confirm_cookie = *uint64;
+		break;
 	default:
 		rc = other_select_jobinfo_set(jobinfo, data_type, data);
 		break;
@@ -413,14 +535,15 @@ extern int select_p_select_jobinfo_get(select_jobinfo_t *jobinfo,
 {
 	int rc = SLURM_SUCCESS;
 	uint32_t *uint32 = (uint32_t *) data;
+	uint64_t *uint64 = (uint64_t *) data;
 	select_jobinfo_t **select_jobinfo = (select_jobinfo_t **) data;
 
 	if (jobinfo == NULL) {
-		error("get_jobinfo: jobinfo not set");
+		error("select/cray jobinfo_get: jobinfo not set");
 		return SLURM_ERROR;
 	}
 	if (jobinfo->magic != JOBINFO_MAGIC) {
-		error("get_jobinfo: jobinfo magic bad");
+		error("select/cray jobinfo_get: jobinfo magic bad");
 		return SLURM_ERROR;
 	}
 
@@ -431,6 +554,9 @@ extern int select_p_select_jobinfo_get(select_jobinfo_t *jobinfo,
 	case SELECT_JOBDATA_RESV_ID:
 		*uint32 = jobinfo->reservation_id;
 		break;
+	case SELECT_JOBDATA_PAGG_ID:
+		*uint64 = jobinfo->confirm_cookie;
+		break;
 	default:
 		rc = other_select_jobinfo_get(jobinfo, data_type, data);
 		break;
@@ -446,11 +572,12 @@ extern select_jobinfo_t *select_p_select_jobinfo_copy(select_jobinfo_t *jobinfo)
 	if (jobinfo == NULL)
 		;
 	else if (jobinfo->magic != JOBINFO_MAGIC)
-		error("copy_jobinfo: jobinfo magic bad");
+		error("select/cray jobinfo_copy: jobinfo magic bad");
 	else {
 		rc = xmalloc(sizeof(struct select_jobinfo));
 		rc->magic = JOBINFO_MAGIC;
 		rc->reservation_id = jobinfo->reservation_id;
+		rc->confirm_cookie = jobinfo->confirm_cookie;
 	}
 	return rc;
 }
@@ -461,7 +588,7 @@ extern int select_p_select_jobinfo_free(select_jobinfo_t *jobinfo)
 
 	if (jobinfo) {
 		if (jobinfo->magic != JOBINFO_MAGIC) {
-			error("free_jobinfo: jobinfo magic bad");
+			error("select/cray jobinfo_free: jobinfo magic bad");
 			return EINVAL;
 		}
 
@@ -483,6 +610,7 @@ extern int select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
 			return SLURM_SUCCESS;
 		}
 		pack32(jobinfo->reservation_id, buffer);
+		pack64(jobinfo->confirm_cookie, buffer);
 		rc = other_select_jobinfo_pack(jobinfo->other_jobinfo, buffer,
 					       protocol_version);
 	}
@@ -500,6 +628,7 @@ extern int select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo_pptr,
 	jobinfo->magic = JOBINFO_MAGIC;
 	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&jobinfo->reservation_id, buffer);
+		safe_unpack64(&jobinfo->confirm_cookie, buffer);
 		rc = other_select_jobinfo_unpack(&jobinfo->other_jobinfo,
 						 buffer, protocol_version);
 	}
@@ -521,39 +650,44 @@ extern char *select_p_select_jobinfo_sprint(select_jobinfo_t *jobinfo,
 {
 
 	if (buf == NULL) {
-		error("sprint_jobinfo: buf is null");
+		error("select/cray jobinfo_sprint: buf is null");
 		return NULL;
 	}
 
 	if ((mode != SELECT_PRINT_DATA)
 	    && jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
-		error("sprint_jobinfo: jobinfo magic bad");
+		error("select/cray jobinfo_sprint: jobinfo magic bad");
 		return NULL;
 	}
 
 	if (jobinfo == NULL) {
 		if (mode != SELECT_PRINT_HEAD) {
-			error("sprint_jobinfo: jobinfo bad");
+			error("select/cray jobinfo_sprint: jobinfo bad");
 			return NULL;
 		}
 	}
 
 	switch (mode) {
+	/*
+	 * SLURM only knows the ALPS reservation ID. The application IDs (APIDs)
+	 * of the reservation need to be queried from the Inventory response.
+	 * The maximum known reservation ID is 4096, it wraps around after that.
+	 */
 	case SELECT_PRINT_HEAD:
-		snprintf(buf, size, "RESV_ID");
+		snprintf(buf, size, "ALPS");
 		break;
 	case SELECT_PRINT_DATA:
 		if (jobinfo->reservation_id)
-			snprintf(buf, size, "%7u", jobinfo->reservation_id);
+			snprintf(buf, size, "%4u", jobinfo->reservation_id);
 		else
-			snprintf(buf, size, "%7s", "none");
+			snprintf(buf, size, "%4s", "none");
 		break;
 	case SELECT_PRINT_MIXED:
 		if (jobinfo->reservation_id)
-			snprintf(buf, size, "Resv_ID=%u",
+			snprintf(buf, size, "resId=%u",
 				 jobinfo->reservation_id);
 		else
-			snprintf(buf, size, "Resv_ID=none");
+			snprintf(buf, size, "resId=none");
 		break;
 	case SELECT_PRINT_RESV_ID:
 		snprintf(buf, size, "%u", jobinfo->reservation_id);
@@ -574,32 +708,33 @@ extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
 
 	if ((mode != SELECT_PRINT_DATA)
 	    && jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
-		error("xstrdup_jobinfo: jobinfo magic bad");
+		error("select/cray jobinfo_xstrdup: jobinfo magic bad");
 		return NULL;
 	}
 
 	if (jobinfo == NULL) {
 		if (mode != SELECT_PRINT_HEAD) {
-			error("xstrdup_jobinfo: jobinfo bad");
+			error("select/cray jobinfo_xstrdup: jobinfo bad");
 			return NULL;
 		}
 	}
 
 	switch (mode) {
+	/* See comment in select_p_select_jobinfo_sprint() regarding format. */
 	case SELECT_PRINT_HEAD:
-		xstrcat(buf, "RESV_ID");
+		xstrcat(buf, "ALPS");
 		break;
 	case SELECT_PRINT_DATA:
 		if (jobinfo->reservation_id)
-			xstrfmtcat(buf, "%7u", jobinfo->reservation_id);
+			xstrfmtcat(buf, "%4u", jobinfo->reservation_id);
 		else
-			xstrfmtcat(buf, "%7s", "none");
+			xstrfmtcat(buf, "%4s", "none");
 		break;
 	case SELECT_PRINT_MIXED:
 		if (jobinfo->reservation_id)
-			xstrfmtcat(buf, "Resv_ID=%u", jobinfo->reservation_id);
+			xstrfmtcat(buf, "resId=%u", jobinfo->reservation_id);
 		else
-			xstrcat(buf, "Resv_ID=none");
+			xstrcat(buf, "resId=none");
 		break;
 	case SELECT_PRINT_RESV_ID:
 		xstrfmtcat(buf, "%u", jobinfo->reservation_id);
@@ -635,9 +770,9 @@ extern int select_p_update_node_config(int index)
 	return other_update_node_config(index);
 }
 
-extern int select_p_update_node_state(int index, uint16_t state)
+extern int select_p_update_node_state(struct node_record *node_ptr)
 {
-	return other_update_node_state(index, state);
+	return other_update_node_state(node_ptr);
 }
 
 extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
@@ -647,5 +782,73 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 
 extern int select_p_reconfigure(void)
 {
+	if (basil_inventory())
+		return SLURM_ERROR;
 	return other_reconfigure();
 }
+
+extern bitstr_t * select_p_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+	return other_resv_test(avail_bitmap, node_cnt);
+}
+
+extern void select_p_ba_init(node_info_msg_t *node_info_ptr, bool sanity_check)
+{
+	int i, j, offset;
+	int dims = slurmdb_setup_cluster_dims();
+
+	if (select_cray_dim_size[0] == -1) {
+		node_info_t *node_ptr;
+
+		/* init the rest of the dim sizes. All current (2011)
+		 * XT/XE installations have a maximum dimension of 3,
+		 * smaller systems deploy a 2D Torus which has no
+		 * connectivity in X-dimension.  Just incase they
+		 * decide to change it where we only get 2 instead of
+		 * 3 we will initialize it later. */
+		for (i = 1; i < dims; i++)
+			select_cray_dim_size[i] = -1;
+		for (i = 0; i < node_info_ptr->record_count; i++) {
+			node_ptr = &(node_info_ptr->node_array[i]);
+			if (!node_ptr->node_addr ||
+			    (strlen(node_ptr->node_addr) != dims))
+				continue;
+			for (j = 0; j < dims; j++) {
+				offset = select_char2coord(
+					node_ptr->node_addr[j]);
+				select_cray_dim_size[j] =
+					MAX((offset+1),
+					    select_cray_dim_size[j]);
+			}
+		}
+	}
+
+	/*
+	 * Override the generic setup of dim_size made in _setup_cluster_rec()
+	 * FIXME: use a better way, e.g. encoding the 3-dim triplet as a
+	 *        string which gets stored in a database (event_table?) entry.
+	 */
+	if (working_cluster_rec) {
+		xfree(working_cluster_rec->dim_size);
+		working_cluster_rec->dim_size = xmalloc(sizeof(int) * dims);
+		for (j = 0; j < dims; j++)
+			working_cluster_rec->dim_size[j] =
+				select_cray_dim_size[j];
+	}
+
+	other_ba_init(node_info_ptr, sanity_check);
+}
+
+extern int *select_p_ba_get_dims(void)
+{
+	/* Size of system in each dimension as set by basil_geometry(),
+	 * which might not be called yet */
+	if (select_cray_dim_size[0] != -1)
+		return select_cray_dim_size;
+	return NULL;
+}
+
+extern void select_p_ba_fini(void)
+{
+	other_ba_fini();
+}
diff --git a/src/plugins/select/linear/Makefile.in b/src/plugins/select/linear/Makefile.in
index 1c65209ff..82c029f56 100644
--- a/src/plugins/select/linear/Makefile.in
+++ b/src/plugins/select/linear/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 48ae07e15..22b57e055 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,8 +54,10 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+#include <time.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"	/* Must be first */
 #include "src/common/gres.h"
@@ -115,6 +117,9 @@ static void _add_run_job(struct cr_record *cr_ptr, uint32_t job_id);
 static void _add_tot_job(struct cr_record *cr_ptr, uint32_t job_id);
 static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap);
 static int  _cr_job_list_sort(void *x, void *y);
+static job_resources_t *_create_job_resources(int node_cnt);
+static int _decr_node_job_cnt(int node_inx, struct job_record *job_ptr,
+			      char *pre_err);
 static void _dump_node_cr(struct cr_record *cr_ptr);
 static struct cr_record *_dup_cr(struct cr_record *cr_ptr);
 static int  _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap,
@@ -128,6 +133,8 @@ static int _job_count_bitmap(struct cr_record *cr_ptr,
 			     struct job_record *job_ptr,
 			     bitstr_t * bitmap, bitstr_t * jobmap,
 			     int run_job_cnt, int tot_job_cnt, uint16_t mode);
+static int _job_expand(struct job_record *from_job_ptr,
+		       struct job_record *to_job_ptr);
 static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		     uint32_t min_nodes, uint32_t max_nodes,
 		     uint32_t req_nodes);
@@ -146,6 +153,8 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    int max_share, uint32_t req_nodes,
 		    List preemptee_candidates,
 		    List *preemptee_job_list);
+static int _sort_usable_nodes_dec(struct job_record *job_a,
+				  struct job_record *job_b);
 static bool _test_run_job(struct cr_record *cr_ptr, uint32_t job_id);
 static bool _test_tot_job(struct cr_record *cr_ptr, uint32_t job_id);
 static int _test_only(struct job_record *job_ptr, bitstr_t *bitmap,
@@ -157,7 +166,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			  List preemptee_candidates,
 			  List *preemptee_job_list);
 
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size);
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc();
 extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
 
 /*
@@ -191,7 +200,7 @@ extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
 const char plugin_name[]       	= "Linear node selection plugin";
 const char plugin_type[]       	= "select/linear";
 const uint32_t plugin_id	= 102;
-const uint32_t plugin_version	= 90;
+const uint32_t plugin_version	= 100;
 
 static struct node_record *select_node_ptr = NULL;
 static int select_node_cnt = 0;
@@ -516,6 +525,21 @@ static uint16_t _get_total_cpus(int index)
 		return node_ptr->cpus;
 }
 
+static job_resources_t *_create_job_resources(int node_cnt)
+{
+	job_resources_t *job_resrcs_ptr;
+
+	job_resrcs_ptr = create_job_resources();
+	job_resrcs_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->cpu_array_value = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt);
+	job_resrcs_ptr->memory_allocated = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->memory_used = xmalloc(sizeof(uint32_t) * node_cnt);
+	job_resrcs_ptr->nhosts = node_cnt;
+	return job_resrcs_ptr;
+}
+
 /* Build the full job_resources_t *structure for a job based upon the nodes
  *	allocated to it (the bitmap) and the job's memory requirement */
 static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
@@ -539,14 +563,7 @@ static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
 		free_job_resources(&job_ptr->job_resrcs);
 
 	node_cnt = bit_set_count(bitmap);
-	job_ptr->job_resrcs = job_resrcs_ptr = create_job_resources();
-	job_resrcs_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t) * node_cnt);
-	job_resrcs_ptr->cpu_array_value = xmalloc(sizeof(uint16_t) * node_cnt);
-	job_resrcs_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt);
-	job_resrcs_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt);
-	job_resrcs_ptr->memory_allocated = xmalloc(sizeof(uint32_t) * node_cnt);
-	job_resrcs_ptr->memory_used = xmalloc(sizeof(uint32_t) * node_cnt);
-	job_resrcs_ptr->nhosts = node_cnt;
+	job_ptr->job_resrcs = job_resrcs_ptr = _create_job_resources(node_cnt);
 	job_resrcs_ptr->node_bitmap = bit_copy(bitmap);
 	job_resrcs_ptr->nodes = bitmap2node_name(bitmap);
 	if (job_resrcs_ptr->node_bitmap == NULL)
@@ -656,9 +673,14 @@ static int _job_count_bitmap(struct cr_record *cr_ptr,
 						 gres_list, use_total_gres,
 						 NULL, 0, 0, job_ptr->job_id,
 						 node_ptr->name);
-		if ((gres_cpus != NO_VAL) && (gres_cpus < cpu_cnt)) {
-			bit_clear(jobmap, i);
-			continue;
+		if (gres_cpus != NO_VAL) {
+			if ((gres_cpus < cpu_cnt) ||
+			    (gres_cpus < job_ptr->details->ntasks_per_node) ||
+			    ((job_ptr->details->cpus_per_task > 1) &&
+			     (gres_cpus < job_ptr->details->cpus_per_task))) {
+				bit_clear(jobmap, i);
+				continue;
+			}
 		}
 
 		if (mode == SELECT_MODE_TEST_ONLY) {
@@ -779,7 +801,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	int rem_cpus, rem_nodes;	/* remaining resources desired */
 	int best_fit_nodes, best_fit_cpus, best_fit_req;
 	int best_fit_location = 0, best_fit_sufficient;
-	int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+	int avail_cpus, total_cpus = 0;
 
 	if (bit_set_count(bitmap) < min_nodes)
 		return error_code;
@@ -828,7 +850,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				rem_nodes--;
 				max_nodes--;
 				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
 				total_cpus += _get_total_cpus(index);
 			} else {	 /* node not required (yet) */
 				bit_clear(bitmap, index);
@@ -956,7 +977,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
 				total_cpus += _get_total_cpus(i);
 			}
 			for (i = (best_fit_req - 1);
@@ -971,7 +991,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
 				total_cpus += _get_total_cpus(i);
 			}
 		} else {
@@ -987,7 +1006,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				max_nodes--;
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
 				total_cpus += _get_total_cpus(i);
 			}
 		}
@@ -1028,29 +1046,45 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 {
 	bitstr_t **switches_bitmap;		/* nodes on this switch */
 	int       *switches_cpu_cnt;		/* total CPUs on switch */
-	int       *switches_node_cnt;		/* total nodes on switch */
+	uint32_t  *switches_node_cnt;		/* total nodes on switch */
 	int       *switches_required;		/* set if has required node */
 
-	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
 	bitstr_t  *req_nodes_bitmap   = NULL;
-	int rem_cpus, rem_nodes;	/* remaining resources desired */
-	int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+	int rem_cpus;			/* remaining resources desired */
+	int avail_cpus, total_cpus = 0;
+	uint32_t want_nodes, alloc_nodes = 0;
 	int i, j, rc = SLURM_SUCCESS;
 	int best_fit_inx, first, last;
 	int best_fit_nodes, best_fit_cpus;
 	int best_fit_location = 0, best_fit_sufficient;
 	bool sufficient;
+	long time_waiting = 0;
+	int leaf_switch_count = 0;	/* Count of leaf node switches used */
+
+	if (job_ptr->req_switch) {
+		time_t     time_now;
+		time_now = time(NULL);
+		if (job_ptr->wait4switch_start == 0)
+			job_ptr->wait4switch_start = time_now;
+		time_waiting = time_now - job_ptr->wait4switch_start;
+	}
 
 	rem_cpus = job_ptr->details->min_cpus;
 	if (req_nodes > min_nodes)
-		rem_nodes = req_nodes;
+		want_nodes = req_nodes;
 	else
-		rem_nodes = min_nodes;
+		want_nodes = min_nodes;
 
+	/* Construct a set of switch array entries,
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(uint32_t)   * switch_record_cnt);
+	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
 	if (job_ptr->details->req_node_bitmap) {
 		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
 		i = bit_set_count(req_nodes_bitmap);
-		if (i > max_nodes) {
+		if (i > (int)max_nodes) {
 			info("job %u requires more nodes than currently "
 			     "available (%u>%u)",
 			     job_ptr->job_id, i, max_nodes);
@@ -1059,22 +1093,21 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 		}
 	}
 
-	/* Construct a set of switch array entries,
-	 * use the same indexes as switch_record_table in slurmctld */
-	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
-	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
-	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
-	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
-	avail_nodes_bitmap = bit_alloc(node_record_count);
+	/* phase 1: make availability bitmaps for switches */
+#if SELECT_DEBUG
+	debug5("_job_test_topo: phase 1");
+#endif
+	sufficient = false;
 	for (i=0; i<switch_record_cnt; i++) {
 		switches_bitmap[i] = bit_copy(switch_record_table[i].
 					      node_bitmap);
 		bit_and(switches_bitmap[i], bitmap);
-		bit_or(avail_nodes_bitmap, switches_bitmap[i]);
-		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
 		if (req_nodes_bitmap &&
-		    bit_overlap(req_nodes_bitmap, switches_bitmap[i])) {
-			switches_required[i] = 1;
+		    !bit_super_set(req_nodes_bitmap, switches_bitmap[i]))
+			switches_node_cnt[i] = 0;
+		else {
+			switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+			sufficient = true;
 		}
 	}
 	bit_nclear(bitmap, 0, node_record_count - 1);
@@ -1085,130 +1118,48 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 		char *node_names = NULL;
 		if (switches_node_cnt[i])
 			node_names = bitmap2node_name(switches_bitmap[i]);
-		debug("switch=%s nodes=%u:%s required:%u speed=%u",
+		debug("switch=%s nodes=%u:%s speed=%u",
 		      switch_record_table[i].name,
 		      switches_node_cnt[i], node_names,
-		      switches_required[i],
 		      switch_record_table[i].link_speed);
 		xfree(node_names);
 	}
 #endif
 
-	if (req_nodes_bitmap &&
-	    (!bit_super_set(req_nodes_bitmap, avail_nodes_bitmap))) {
+	/* check if requested nodes are available */
+	if (!sufficient) {
 		info("job %u requires nodes not available on any switch",
 		     job_ptr->job_id);
 		rc = EINVAL;
 		goto fini;
 	}
 
-	if (req_nodes_bitmap) {
-		/* Accumulate specific required resources, if any */
-		first = bit_ffs(req_nodes_bitmap);
-		last  = bit_fls(req_nodes_bitmap);
-		for (i=first; ((i<=last) && (first>=0)); i++) {
-			if (!bit_test(req_nodes_bitmap, i))
-				continue;
-			if (max_nodes <= 0) {
-				info("job %u requires nodes than allowed",
-				     job_ptr->job_id);
-				rc = EINVAL;
-				goto fini;
-			}
-			bit_set(bitmap, i);
-			bit_clear(avail_nodes_bitmap, i);
-			rem_nodes--;
-			max_nodes--;
-			avail_cpus = _get_avail_cpus(job_ptr, i);
-			rem_cpus   -= avail_cpus;
-			alloc_cpus += avail_cpus;
-			total_cpus += _get_total_cpus(i);
-			for (j=0; j<switch_record_cnt; j++) {
-				if (!bit_test(switches_bitmap[j], i))
-					continue;
-				bit_clear(switches_bitmap[j], i);
-				switches_node_cnt[j]--;
-			}
-		}
-		if ((rem_nodes <= 0) && (rem_cpus <= 0))
-			goto fini;
-
-		/* Accumulate additional resources from leafs that
-		 * contain required nodes */
-		for (j=0; j<switch_record_cnt; j++) {
-			if ((switch_record_table[j].level != 0) ||
-			    (switches_node_cnt[j] == 0) ||
-			    (switches_required[j] == 0)) {
-				continue;
-			}
-			while ((max_nodes > 0) &&
-			       ((rem_nodes > 0) || (rem_cpus > 0))) {
-				i = bit_ffs(switches_bitmap[j]);
-				if (i == -1)
-					break;
-				bit_clear(switches_bitmap[j], i);
-				switches_node_cnt[j]--;
-				if (bit_test(bitmap, i)) {
-					/* node on multiple leaf switches
-					 * and already selected */
-					continue;
-				}
-				bit_set(bitmap, i);
-				bit_clear(avail_nodes_bitmap, i);
-				rem_nodes--;
-				max_nodes--;
-				avail_cpus = _get_avail_cpus(job_ptr, i);
-				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
-				total_cpus += _get_total_cpus(i);
-			}
-		}
-		if ((rem_nodes <= 0) && (rem_cpus <= 0))
-			goto fini;
-
-		/* Update bitmaps and node counts for higher-level switches */
-		for (j=0; j<switch_record_cnt; j++) {
-			if (switches_node_cnt[j] == 0)
-				continue;
-			first = bit_ffs(switches_bitmap[j]);
-			if (first < 0)
-				continue;
-			last  = bit_fls(switches_bitmap[j]);
-			for (i=first; i<=last; i++) {
-				if (!bit_test(switches_bitmap[j], i))
-					continue;
-				if (!bit_test(avail_nodes_bitmap, i)) {
-					/* cleared from lower level */
-					bit_clear(switches_bitmap[j], i);
-					switches_node_cnt[j]--;
-				} else {
-					switches_cpu_cnt[j] +=
-						_get_avail_cpus(job_ptr, i);
-				}
-			}
-		}
-	} else {
-		/* No specific required nodes, calculate CPU counts */
+	/* phase 2: accumulate all cpu resources for each switch */
+#if SELECT_DEBUG
+	debug5("_job_test_topo: phase 2");
+#endif
+	for (i = 0; i < node_record_count; i++) {
+		avail_cpus = _get_avail_cpus(job_ptr, i);
 		for (j=0; j<switch_record_cnt; j++) {
-			first = bit_ffs(switches_bitmap[j]);
-			if (first < 0)
-				continue;
-			last  = bit_fls(switches_bitmap[j]);
-			for (i=first; i<=last; i++) {
-				if (!bit_test(switches_bitmap[j], i))
-					continue;
-				switches_cpu_cnt[j] +=
-					_get_avail_cpus(job_ptr, i);
+			if (bit_test(switches_bitmap[j], i)) {
+				switches_cpu_cnt[j] += avail_cpus;
 			}
 		}
 	}
 
+	/* phase 3 */
+#if SELECT_DEBUG
+	debug5("_job_test_topo: phase 3");
+#endif
 	/* Determine lowest level switch satifying request with best fit */
 	best_fit_inx = -1;
 	for (j=0; j<switch_record_cnt; j++) {
+#if SELECT_DEBUG
+		debug5("checking switch %d: nodes %u cpus %d", j,
+		       switches_node_cnt[j], switches_cpu_cnt[j]);
+#endif
 		if ((switches_cpu_cnt[j]  < rem_cpus) ||
-		    (!_enough_nodes(switches_node_cnt[j], rem_nodes,
-				    min_nodes, req_nodes)))
+		    (switches_node_cnt[j] < min_nodes))
 			continue;
 		if ((best_fit_inx == -1) ||
 		    (switch_record_table[j].level <
@@ -1224,27 +1175,123 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 		rc = EINVAL;
 		goto fini;
 	}
-	bit_and(avail_nodes_bitmap, switches_bitmap[best_fit_inx]);
 
+	/* phase 4: select resources from already allocated leaves */
+#if SELECT_DEBUG
+	debug5("_job_test_topo: phase 4");
+#endif
 	/* Identify usable leafs (within higher switch having best fit) */
 	for (j=0; j<switch_record_cnt; j++) {
-		if ((switch_record_table[j].level != 0) ||
+		if ((switch_record_table[j].level > 0) ||
 		    (!bit_super_set(switches_bitmap[j],
 				    switches_bitmap[best_fit_inx]))) {
 			switches_node_cnt[j] = 0;
+		} else if (req_nodes_bitmap) {
+			/* we have subnodes count zeroed yet so count them */
+			switches_node_cnt[j] = bit_set_count(switches_bitmap[j]);
+		}
+	}
+	/* set already allocated nodes and gather additional resources */
+	if (req_nodes_bitmap) {
+		/* Accumulate specific required resources, if any */
+		for (j=0; j<switch_record_cnt; j++) {
+			if (alloc_nodes > max_nodes)
+				break;
+			if (switches_node_cnt[j] == 0 ||
+			    bit_overlap(req_nodes_bitmap,
+					switches_bitmap[j]) == 0)
+				continue;
+
+			/* Use nodes from this leaf */
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0) {
+				switches_node_cnt[j] = 0;
+				continue;
+			}
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				if (!bit_test(req_nodes_bitmap, i)) {
+					/* node wasn't requested */
+					continue;
+				}
+
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+				avail_cpus = _get_avail_cpus(job_ptr, i);
+				switches_cpu_cnt[j] -= avail_cpus;
+
+				if (bit_test(bitmap, i)) {
+					/* node on multiple leaf switches
+					 * and already selected */
+					continue;
+				}
+
+				switches_required[j] = 1;
+				bit_set(bitmap, i);
+				alloc_nodes++;
+				rem_cpus -= avail_cpus;
+				total_cpus += _get_total_cpus(i);
+			}
+		}
+		/* Accumulate additional resources from leafs that
+		 * contain required nodes */
+		for (j=0; j<switch_record_cnt; j++) {
+			if ((alloc_nodes > max_nodes) ||
+			    ((alloc_nodes >= want_nodes) && (rem_cpus <= 0)))
+				break;
+			if (switches_required[j] == 0)
+				continue;
+
+			/* Use nodes from this leaf */
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0) {
+				switches_node_cnt[j] = 0;
+				continue;
+			}
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+
+				/* there is no need here to reset anything
+				   for switch j as we disable it after cycle
+				   by setting switches_node_cnt[j] to 0 */
+				if (bit_test(bitmap, i)) {
+					/* node on multiple leaf switches
+					 * and already selected */
+					continue;
+				}
+
+				bit_set(bitmap, i);
+				alloc_nodes++;
+				rem_cpus -= _get_avail_cpus(job_ptr, i);
+				total_cpus += _get_total_cpus(i);
+				if ((alloc_nodes > max_nodes) ||
+				    ((alloc_nodes >= want_nodes) &&
+				     (rem_cpus <= 0)))
+					break;
+			}
+			switches_node_cnt[j] = 0; /* it's used up */
 		}
 	}
 
+	/* phase 5 */
+#if SELECT_DEBUG
+	debug5("_job_test_topo: phase 5");
+#endif
 	/* Select resources from these leafs on a best-fit basis */
-	while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+	/* Compute best-switch nodes available array */
+	while ((alloc_nodes <= max_nodes) &&
+	       ((alloc_nodes < want_nodes) || (rem_cpus > 0))) {
 		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
+		i = min_nodes - alloc_nodes; /* use it as a temp. int */
 		for (j=0; j<switch_record_cnt; j++) {
 			if (switches_node_cnt[j] == 0)
 				continue;
 			sufficient = (switches_cpu_cnt[j] >= rem_cpus) &&
-				_enough_nodes(switches_node_cnt[j],
-					      rem_nodes, min_nodes,
-					      req_nodes);
+				     ((int)switches_node_cnt[j] >= i);
 			/* If first possibility OR */
 			/* first set large enough for request OR */
 			/* tightest fit (less resource waste) OR */
@@ -1253,48 +1300,73 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			    (sufficient && (best_fit_sufficient == 0)) ||
 			    (sufficient &&
 			     (switches_cpu_cnt[j] < best_fit_cpus)) ||
-			    ((sufficient == 0) &&
+			    (!sufficient &&
 			     (switches_cpu_cnt[j] > best_fit_cpus))) {
 				best_fit_cpus =  switches_cpu_cnt[j];
 				best_fit_nodes = switches_node_cnt[j];
 				best_fit_location = j;
 				best_fit_sufficient = sufficient;
+				leaf_switch_count++;
 			}
 		}
+#if SELECT_DEBUG
+		debug5("found switch %d for allocation: nodes %d cpus %d "
+		       "allocated %u", best_fit_location, best_fit_nodes,
+		       best_fit_cpus, alloc_nodes);
+#endif
 		if (best_fit_nodes == 0)
 			break;
+
 		/* Use select nodes from this leaf */
 		first = bit_ffs(switches_bitmap[best_fit_location]);
+		if (first < 0) {
+			switches_node_cnt[best_fit_location] = 0;
+			continue;
+		}
 		last  = bit_fls(switches_bitmap[best_fit_location]);
-		for (i=first; ((i<=last) && (first>=0)); i++) {
+		for (i=first; i<=last; i++) {
 			if (!bit_test(switches_bitmap[best_fit_location], i))
 				continue;
 
-			bit_clear(switches_bitmap[best_fit_location], i);
-			switches_node_cnt[best_fit_location]--;
-			avail_cpus = _get_avail_cpus(job_ptr, i);
-			switches_cpu_cnt[best_fit_location] -= avail_cpus;
-
 			if (bit_test(bitmap, i)) {
 				/* node on multiple leaf switches
 				 * and already selected */
 				continue;
 			}
 
+			switches_required[best_fit_location] = 1;
 			bit_set(bitmap, i);
-			rem_nodes--;
-			max_nodes--;
-			rem_cpus   -= avail_cpus;
-			alloc_cpus += avail_cpus;
+			alloc_nodes++;
+			rem_cpus -= _get_avail_cpus(job_ptr, i);
 			total_cpus += _get_total_cpus(i);
-			if ((max_nodes <= 0) ||
-			    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+			if ((alloc_nodes > max_nodes) ||
+			    ((alloc_nodes >= want_nodes) && (rem_cpus <= 0)))
 				break;
 		}
 		switches_node_cnt[best_fit_location] = 0;
+		if (job_ptr->req_switch > 0) {
+			if (time_waiting > job_ptr->wait4switch) {
+				job_ptr->best_switch = true;
+				debug3("Job=%u Waited %ld sec for switches use=%d",
+					job_ptr->job_id, time_waiting,
+					leaf_switch_count);
+			} else if (leaf_switch_count > job_ptr->req_switch) {
+				/* Allocation is for more than requested number
+				 * of switches */
+				job_ptr->best_switch = false;
+				debug3("Job=%u waited %ld sec for switches=%u "
+					"found=%d wait %u",
+					job_ptr->job_id, time_waiting,
+					job_ptr->req_switch,
+					leaf_switch_count,
+					job_ptr->wait4switch);
+			} else {
+				job_ptr->best_switch = true;
+			}
+		}
 	}
-	if ((rem_cpus <= 0) &&
-	    _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) {
+	if ((alloc_nodes <= max_nodes) && (rem_cpus <= 0) &&
+	    (alloc_nodes >= min_nodes)) {
 		rc = SLURM_SUCCESS;
 	} else
 		rc = EINVAL;
@@ -1302,8 +1374,9 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 fini:	if (rc == SLURM_SUCCESS) {
 		/* Job's total_cpus is needed for SELECT_MODE_WILL_RUN */
 		job_ptr->total_cpus = total_cpus;
-	}
-	FREE_NULL_BITMAP(avail_nodes_bitmap);
+	} else if (alloc_nodes > max_nodes)
+		info("job %u requires more nodes than allowed",
+		     job_ptr->job_id);
 	FREE_NULL_BITMAP(req_nodes_bitmap);
 	for (i=0; i<switch_record_cnt; i++)
 		FREE_NULL_BITMAP(switches_bitmap[i]);
@@ -1476,93 +1549,212 @@ static int _rm_job_from_nodes(struct cr_record *cr_ptr,
 	return rc;
 }
 
-/*
- * deallocate resources that were assigned to this job on one node
- */
-static int _rm_job_from_one_node(struct job_record *job_ptr,
-				 struct node_record *node_ptr, char *pre_err)
+/* Move all resources from one job to another */
+static int _job_expand(struct job_record *from_job_ptr,
+		       struct job_record *to_job_ptr)
 {
-	int i, node_inx, node_offset, rc = SLURM_SUCCESS;
-	struct part_cr_record *part_cr_ptr;
-	job_resources_t *job_resrcs_ptr;
-	uint32_t job_memory, job_memory_cpu = 0, job_memory_node = 0;
-	bool exclusive, is_job_running;
+	int i, node_cnt, rc = SLURM_SUCCESS;
+	struct node_record *node_ptr;
+	job_resources_t *from_job_resrcs_ptr, *to_job_resrcs_ptr,
+		        *new_job_resrcs_ptr;
+	bool from_node_used, to_node_used;
+	int from_node_offset, to_node_offset, new_node_offset;
 	int first_bit, last_bit;
-	uint16_t cpu_cnt;
-	List gres_list;
+	bitstr_t *tmp_bitmap, *tmp_bitmap2;
 
+	xassert(from_job_ptr);
+	xassert(to_job_ptr);
 	if (cr_ptr == NULL) {
-		error("%s: cr_ptr not initialized", pre_err);
+		error("select/linear: cr_ptr not initialized");
 		return SLURM_ERROR;
 	}
 
-	if (_test_tot_job(cr_ptr, job_ptr->job_id) == 0) {
-		info("select/linear: job %u has no resources allocated",
-		     job_ptr->job_id);
+	if (from_job_ptr->job_id == to_job_ptr->job_id) {
+		error("select/linear: attempt to merge job %u with self",
+		      from_job_ptr->job_id);
 		return SLURM_ERROR;
 	}
-
-	if (job_ptr->details &&
-	    job_ptr->details->pn_min_memory && (cr_type == CR_MEMORY)) {
-		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
-			job_memory_cpu = job_ptr->details->pn_min_memory &
-				(~MEM_PER_CPU);
-		} else
-			job_memory_node = job_ptr->details->pn_min_memory;
-	}
-	if ((job_ptr->job_resrcs == NULL) ||
-	    (job_ptr->job_resrcs->cpus == NULL)) {
-		error("job %u lacks a job_resources struct", job_ptr->job_id);
+	if (_test_tot_job(cr_ptr, from_job_ptr->job_id) == 0) {
+		info("select/linear: job %u has no resources allocated",
+		     from_job_ptr->job_id);
 		return SLURM_ERROR;
 	}
-	job_resrcs_ptr = job_ptr->job_resrcs;
-	node_inx = node_ptr - node_record_table_ptr;
-	if (!bit_test(job_resrcs_ptr->node_bitmap, node_inx)) {
-		error("job %u allocated nodes (%s) which have been removed "
-		      "from slurm.conf",
-		      job_ptr->job_id, node_ptr->name);
+	if (_test_tot_job(cr_ptr, to_job_ptr->job_id) == 0) {
+		info("select/linear: job %u has no resources allocated",
+		     to_job_ptr->job_id);
 		return SLURM_ERROR;
 	}
-	first_bit = bit_ffs(job_resrcs_ptr->node_bitmap);
-	last_bit  = node_inx;
-	node_offset = -1;
-	for (i = first_bit; i <= node_inx; i++) {
-		if (!bit_test(job_resrcs_ptr->node_bitmap, i))
-			continue;
-		node_offset++;
+
+	from_job_resrcs_ptr = from_job_ptr->job_resrcs;
+	if ((from_job_resrcs_ptr == NULL) ||
+	    (from_job_resrcs_ptr->cpus == NULL) ||
+	    (from_job_resrcs_ptr->node_bitmap == NULL)) {
+		error("select/linear: job %u lacks a job_resources struct",
+		      from_job_ptr->job_id);
+		return SLURM_ERROR;
 	}
-	if (job_resrcs_ptr->cpus[node_offset] == 0) {
-		error("duplicate relinquish of node %s by job %u",
-		      node_ptr->name, job_ptr->job_id);
+	to_job_resrcs_ptr = to_job_ptr->job_resrcs;
+	if ((to_job_resrcs_ptr == NULL) ||
+	    (to_job_resrcs_ptr->cpus == NULL) ||
+	    (to_job_resrcs_ptr->node_bitmap == NULL)) {
+		error("select/linear: job %u lacks a job_resources struct",
+		      to_job_ptr->job_id);
 		return SLURM_ERROR;
 	}
-	job_resrcs_ptr->cpus[node_offset] = 0;
-	build_job_resources_cpu_array(job_resrcs_ptr);
 
-	is_job_running = _test_run_job(cr_ptr, job_ptr->job_id);
-	if (select_fast_schedule)
-		cpu_cnt = node_ptr->config_ptr->cpus;
-	else
-		cpu_cnt = node_ptr->cpus;
-	if (job_memory_cpu)
-		job_memory = job_memory_cpu * cpu_cnt;
-	else
-		job_memory = job_memory_node;
-	if (cr_ptr->nodes[node_inx].alloc_memory >= job_memory)
-		cr_ptr->nodes[node_inx].alloc_memory -= job_memory;
-	else {
-		cr_ptr->nodes[node_inx].alloc_memory = 0;
-		error("%s: memory underflow for node %s",
-		      pre_err, node_ptr->name);
+	(void) _rm_job_from_nodes(cr_ptr, from_job_ptr, "select_p_job_expand",
+				  true);
+	(void) _rm_job_from_nodes(cr_ptr, to_job_ptr,   "select_p_job_expand",
+				  true);
+
+	if (to_job_resrcs_ptr->core_bitmap_used) {
+		i = bit_size(to_job_resrcs_ptr->core_bitmap_used);
+		bit_nclear(to_job_resrcs_ptr->core_bitmap_used, 0, i-1);
 	}
 
-	if (cr_ptr->nodes[i].gres_list)
-		gres_list = cr_ptr->nodes[i].gres_list;
-	else
-		gres_list = node_ptr->gres_list;
-	gres_plugin_job_dealloc(job_ptr->gres_list, gres_list, node_offset,
-				job_ptr->job_id, node_ptr->name);
-	gres_plugin_node_state_log(gres_list, node_ptr->name);
+	tmp_bitmap = bit_copy(to_job_resrcs_ptr->node_bitmap);
+	if (!tmp_bitmap)
+		fatal("bit_copy: malloc failure");
+	bit_or(tmp_bitmap, from_job_resrcs_ptr->node_bitmap);
+	tmp_bitmap2 = bit_copy(to_job_ptr->node_bitmap);
+	if (!tmp_bitmap)
+		fatal("bit_copy: malloc failure");
+	bit_or(tmp_bitmap2, from_job_ptr->node_bitmap);
+	bit_and(tmp_bitmap, tmp_bitmap2);
+	bit_free(tmp_bitmap2);
+	node_cnt = bit_set_count(tmp_bitmap);
+	new_job_resrcs_ptr = _create_job_resources(node_cnt);
+	new_job_resrcs_ptr->ncpus = from_job_resrcs_ptr->ncpus +
+				    to_job_resrcs_ptr->ncpus;
+	new_job_resrcs_ptr->node_req = to_job_resrcs_ptr->node_req;
+	new_job_resrcs_ptr->node_bitmap = tmp_bitmap;
+	new_job_resrcs_ptr->nodes = bitmap2node_name(new_job_resrcs_ptr->
+						     node_bitmap);
+	build_job_resources(new_job_resrcs_ptr, node_record_table_ptr,
+			    select_fast_schedule);
+	xfree(to_job_ptr->node_addr);
+	to_job_ptr->node_addr = xmalloc(sizeof(slurm_addr_t) * node_cnt);
+	to_job_ptr->total_cpus = 0;
+
+	first_bit = MIN(bit_ffs(from_job_resrcs_ptr->node_bitmap),
+			bit_ffs(to_job_resrcs_ptr->node_bitmap));
+	last_bit  = MAX(bit_fls(from_job_resrcs_ptr->node_bitmap),
+			bit_fls(to_job_resrcs_ptr->node_bitmap));
+	from_node_offset = to_node_offset = new_node_offset = -1;
+	for (i = first_bit; i <= last_bit; i++) {
+		from_node_used = to_node_used = false;
+		if (bit_test(from_job_resrcs_ptr->node_bitmap, i)) {
+			from_node_used = bit_test(from_job_ptr->node_bitmap,i);
+			from_node_offset++;
+		}
+		if (bit_test(to_job_resrcs_ptr->node_bitmap, i)) {
+			to_node_used = bit_test(to_job_ptr->node_bitmap, i);
+			to_node_offset++;
+		}
+		if (!from_node_used && !to_node_used)
+			continue;
+		new_node_offset++;
+		node_ptr = node_record_table_ptr + i;
+		memcpy(&to_job_ptr->node_addr[new_node_offset],
+                       &node_ptr->slurm_addr, sizeof(slurm_addr_t));
+		if (from_node_used) {
+			/* Merge alloc info from both "from" and "to" jobs,
+			 * leave "from" job with no allocated CPUs or memory */
+			new_job_resrcs_ptr->cpus[new_node_offset] =
+				from_job_resrcs_ptr->cpus[from_node_offset];
+			from_job_resrcs_ptr->cpus[from_node_offset] = 0;
+			/* new_job_resrcs_ptr->cpus_used[new_node_offset] =
+				from_job_resrcs_ptr->
+				cpus_used[from_node_offset]; Should be 0 */
+			new_job_resrcs_ptr->memory_allocated[new_node_offset] =
+				from_job_resrcs_ptr->
+				memory_allocated[from_node_offset];
+			/* new_job_resrcs_ptr->memory_used[new_node_offset] =
+				from_job_resrcs_ptr->
+				memory_used[from_node_offset]; Should be 0 */
+			job_resources_bits_copy(new_job_resrcs_ptr,
+						new_node_offset,
+						from_job_resrcs_ptr,
+						from_node_offset);
+		}
+		if (to_node_used) {
+			/* Merge alloc info from both "from" and "to" jobs */
+
+			/* DO NOT double count the allocated CPUs in partition
+			 * with Shared nodes */
+			new_job_resrcs_ptr->cpus[new_node_offset] =
+				to_job_resrcs_ptr->cpus[to_node_offset];
+			new_job_resrcs_ptr->cpus_used[new_node_offset] +=
+				to_job_resrcs_ptr->cpus_used[to_node_offset];
+			new_job_resrcs_ptr->memory_allocated[new_node_offset]+=
+				to_job_resrcs_ptr->
+				memory_allocated[to_node_offset];
+			new_job_resrcs_ptr->memory_used[new_node_offset] +=
+				to_job_resrcs_ptr->memory_used[to_node_offset];
+			job_resources_bits_copy(new_job_resrcs_ptr,
+						new_node_offset,
+						to_job_resrcs_ptr,
+						to_node_offset);
+		}
+
+		to_job_ptr->total_cpus += new_job_resrcs_ptr->
+					  cpus[new_node_offset];
+	}
+	build_job_resources_cpu_array(new_job_resrcs_ptr);
+	gres_plugin_job_merge(from_job_ptr->gres_list,
+			      from_job_resrcs_ptr->node_bitmap,
+			      to_job_ptr->gres_list,
+			      to_job_resrcs_ptr->node_bitmap);
+
+	/* Now swap data: "new" -> "to" and clear "from" */
+	free_job_resources(&to_job_ptr->job_resrcs);
+	to_job_ptr->job_resrcs = new_job_resrcs_ptr;
+
+	to_job_ptr->cpu_cnt = to_job_ptr->total_cpus;
+	if (to_job_ptr->details) {
+		to_job_ptr->details->min_cpus = to_job_ptr->total_cpus;
+		to_job_ptr->details->max_cpus = to_job_ptr->total_cpus;
+	}
+	from_job_ptr->total_cpus   = 0;
+	from_job_resrcs_ptr->ncpus = 0;
+	if (from_job_ptr->details) {
+		from_job_ptr->details->min_cpus = 0;
+		from_job_ptr->details->max_cpus = 0;
+	}
+
+	from_job_ptr->total_nodes   = 0;
+	from_job_resrcs_ptr->nhosts = 0;
+	from_job_ptr->node_cnt      = 0;
+	if (from_job_ptr->details)
+		from_job_ptr->details->min_nodes = 0;
+	to_job_ptr->total_nodes     = new_job_resrcs_ptr->nhosts;
+	to_job_ptr->node_cnt        = new_job_resrcs_ptr->nhosts;
+
+	bit_or(to_job_ptr->node_bitmap, from_job_ptr->node_bitmap);
+	bit_nclear(from_job_ptr->node_bitmap, 0, (node_record_count - 1));
+	bit_nclear(from_job_resrcs_ptr->node_bitmap, 0,
+		  (node_record_count - 1));
+
+	xfree(to_job_ptr->nodes);
+	to_job_ptr->nodes = xstrdup(new_job_resrcs_ptr->nodes);
+	xfree(from_job_ptr->nodes);
+	from_job_ptr->nodes = xstrdup("");
+	xfree(from_job_resrcs_ptr->nodes);
+	from_job_resrcs_ptr->nodes = xstrdup("");
+
+	_add_job_to_nodes(cr_ptr, to_job_ptr, "select_p_job_expand", 1);
+
+	return rc;
+}
+
+/* Decrement a partitions running and total job counts as needed to enforce the
+ * limit of jobs per node per partition (the partition's Shared=# parameter) */
+static int _decr_node_job_cnt(int node_inx, struct job_record *job_ptr,
+			      char *pre_err)
+{
+	struct node_record *node_ptr = node_record_table_ptr + node_inx;
+	struct part_cr_record *part_cr_ptr;
+	bool exclusive, is_job_running;
 
 	exclusive = (job_ptr->details->shared == 0);
 	if (exclusive) {
@@ -1573,6 +1765,8 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 			      pre_err, node_ptr->name);
 		}
 	}
+
+	is_job_running = _test_run_job(cr_ptr, job_ptr->job_id);
 	part_cr_ptr = cr_ptr->nodes[node_inx].parts;
 	while (part_cr_ptr) {
 		if (part_cr_ptr->part_ptr != job_ptr->part_ptr) {
@@ -1599,20 +1793,104 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 			error("%s: run_job_cnt out of sync for node %s",
 			      pre_err, node_ptr->name);
 		}
-		break;
+		return SLURM_SUCCESS;
 	}
-	if (part_cr_ptr == NULL) {
-		if (job_ptr->part_ptr) {
-			error("%s: Could not find partition %s for node %s",
-			      pre_err, job_ptr->part_ptr->name, node_ptr->name);
-		} else {
-			error("%s: no partition ptr given for job %u and node %s",
-			      pre_err, job_ptr->job_id, node_ptr->name);
-		}
-		rc = SLURM_ERROR;
+
+	if (job_ptr->part_ptr) {
+		error("%s: Could not find partition %s for node %s",
+		      pre_err, job_ptr->part_ptr->name, node_ptr->name);
+	} else {
+		error("%s: no partition ptr given for job %u and node %s",
+		      pre_err, job_ptr->job_id, node_ptr->name);
 	}
+	return SLURM_ERROR;
+}
 
-	return rc;
+/*
+ * deallocate resources that were assigned to this job on one node
+ */
+static int _rm_job_from_one_node(struct job_record *job_ptr,
+				 struct node_record *node_ptr, char *pre_err)
+{
+	int i, node_inx, node_offset;
+	job_resources_t *job_resrcs_ptr;
+	uint32_t job_memory, job_memory_cpu = 0, job_memory_node = 0;
+	int first_bit;
+	uint16_t cpu_cnt;
+	List gres_list;
+
+	if (cr_ptr == NULL) {
+		error("%s: cr_ptr not initialized", pre_err);
+		return SLURM_ERROR;
+	}
+
+	if (_test_tot_job(cr_ptr, job_ptr->job_id) == 0) {
+		info("select/linear: job %u has no resources allocated",
+		     job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+
+	if (job_ptr->details &&
+	    job_ptr->details->pn_min_memory && (cr_type == CR_MEMORY)) {
+		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
+			job_memory_cpu = job_ptr->details->pn_min_memory &
+				(~MEM_PER_CPU);
+		} else
+			job_memory_node = job_ptr->details->pn_min_memory;
+	}
+	if ((job_ptr->job_resrcs == NULL) ||
+	    (job_ptr->job_resrcs->cpus == NULL)) {
+		error("job %u lacks a job_resources struct", job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	job_resrcs_ptr = job_ptr->job_resrcs;
+	node_inx = node_ptr - node_record_table_ptr;
+	if (!bit_test(job_resrcs_ptr->node_bitmap, node_inx)) {
+		error("job %u allocated nodes (%s) which have been removed "
+		      "from slurm.conf",
+		      job_ptr->job_id, node_ptr->name);
+		return SLURM_ERROR;
+	}
+	first_bit = bit_ffs(job_resrcs_ptr->node_bitmap);
+	node_offset = -1;
+	for (i = first_bit; i <= node_inx; i++) {
+		if (!bit_test(job_resrcs_ptr->node_bitmap, i))
+			continue;
+		node_offset++;
+	}
+	if (job_resrcs_ptr->cpus[node_offset] == 0) {
+		error("duplicate relinquish of node %s by job %u",
+		      node_ptr->name, job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	job_resrcs_ptr->cpus[node_offset] = 0;
+	build_job_resources_cpu_array(job_resrcs_ptr);
+
+	if (select_fast_schedule)
+		cpu_cnt = node_ptr->config_ptr->cpus;
+	else
+		cpu_cnt = node_ptr->cpus;
+	if (job_memory_cpu)
+		job_memory = job_memory_cpu * cpu_cnt;
+	else
+		job_memory = job_memory_node;
+	if (cr_ptr->nodes[node_inx].alloc_memory >= job_memory)
+		cr_ptr->nodes[node_inx].alloc_memory -= job_memory;
+	else {
+		cr_ptr->nodes[node_inx].alloc_memory = 0;
+		error("%s: memory underflow for node %s",
+		      pre_err, node_ptr->name);
+	}
+
+	if (cr_ptr->nodes[i].gres_list)
+		gres_list = cr_ptr->nodes[i].gres_list;
+	else
+		gres_list = node_ptr->gres_list;
+	gres_plugin_job_dealloc(job_ptr->gres_list, gres_list, node_offset,
+				job_ptr->job_id, node_ptr->name);
+	gres_plugin_node_state_log(gres_list, node_ptr->name);
+
+	return _decr_node_job_cnt(node_inx, job_ptr, pre_err);
 }
 
 /*
@@ -1863,6 +2141,8 @@ static void _init_node_cr(void)
 
 	/* build partition records */
 	part_iterator = list_iterator_create(part_list);
+	if (part_iterator == NULL)
+		fatal("list_iterator_create: malloc failure");
 	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
 		for (i = 0; i < select_node_cnt; i++) {
 			if (part_ptr->node_bitmap == NULL)
@@ -2033,6 +2313,21 @@ static int _test_only(struct job_record *job_ptr, bitstr_t *bitmap,
 	return rc;
 }
 
+/*
+ * Sort the usable_node element to put jobs in the correct
+ * preemption order.
+ */
+static int _sort_usable_nodes_dec(struct job_record *job_a,
+				  struct job_record *job_b)
+{
+	if (job_a->details->usable_nodes > job_b->details->usable_nodes)
+		return -1;
+	else if (job_a->details->usable_nodes < job_b->details->usable_nodes)
+		return 1;
+
+	return 0;
+}
+
 /* Allocate resources for a job now, if possible */
 static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		    uint32_t min_nodes, uint32_t max_nodes,
@@ -2046,6 +2341,7 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 	struct job_record *tmp_job_ptr;
 	ListIterator job_iterator, preemptee_iterator;
 	struct cr_record *exp_cr;
+	uint16_t pass_count = 0;
 
 	orig_map = bit_copy(bitmap);
 	if (!orig_map)
@@ -2089,39 +2385,59 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 		}
 	}
 
-	if ((rc != SLURM_SUCCESS) && preemptee_candidates &&
+top:	if ((rc != SLURM_SUCCESS) && preemptee_candidates &&
 	    (exp_cr = _dup_cr(cr_ptr))) {
 		/* Remove all preemptable jobs from simulated environment */
-		job_iterator = list_iterator_create(job_list);
+		job_iterator = list_iterator_create(preemptee_candidates);
+		if (job_iterator == NULL)
+			fatal ("memory allocation failure in linear");
 		while ((tmp_job_ptr = (struct job_record *)
 			list_next(job_iterator))) {
+			bool remove_all = false;
+			uint16_t mode;
+
 			if (!IS_JOB_RUNNING(tmp_job_ptr) &&
 			    !IS_JOB_SUSPENDED(tmp_job_ptr))
 				continue;
-			if (_is_preemptable(tmp_job_ptr,
-					    preemptee_candidates)) {
-				bool remove_all = false;
-				uint16_t mode;
-				mode = slurm_job_preempt_mode(tmp_job_ptr);
-				if ((mode == PREEMPT_MODE_REQUEUE)    ||
-				    (mode == PREEMPT_MODE_CHECKPOINT) ||
-				    (mode == PREEMPT_MODE_CANCEL))
-					remove_all = true;
-				/* Remove preemptable job now */
-				_rm_job_from_nodes(exp_cr, tmp_job_ptr,
-						   "_run_now",
-						   remove_all);
-				j = _job_count_bitmap(exp_cr, job_ptr,
-						      orig_map, bitmap,
-						      (max_share - 1),
-						      NO_SHARE_LIMIT,
-						      SELECT_MODE_RUN_NOW);
-				if (j < min_nodes)
-					continue;
-				rc = _job_test(job_ptr, bitmap, min_nodes,
-					       max_nodes, req_nodes);
-				if (rc == SLURM_SUCCESS)
+			mode = slurm_job_preempt_mode(tmp_job_ptr);
+			if ((mode == PREEMPT_MODE_REQUEUE)    ||
+			    (mode == PREEMPT_MODE_CHECKPOINT) ||
+			    (mode == PREEMPT_MODE_CANCEL))
+				remove_all = true;
+			/* Remove preemptable job now */
+			_rm_job_from_nodes(exp_cr, tmp_job_ptr,
+					   "_run_now",
+					   remove_all);
+			j = _job_count_bitmap(exp_cr, job_ptr,
+					      orig_map, bitmap,
+					      (max_share - 1),
+					      NO_SHARE_LIMIT,
+					      SELECT_MODE_RUN_NOW);
+			tmp_job_ptr->details->usable_nodes =
+				bit_overlap(bitmap, tmp_job_ptr->node_bitmap);
+			if (j < min_nodes)
+				continue;
+			rc = _job_test(job_ptr, bitmap, min_nodes,
+				       max_nodes, req_nodes);
+			/*
+			 * If successful, set the last job's usable count to a
+			 * large value so that it will be first after sorting.
+			 * Note: usable_count is only used for sorting purposes
+			 */
+			if (rc == SLURM_SUCCESS) {
+				if (pass_count++ ||
+				    (list_count(preemptee_candidates) == 1))
 					break;
+				tmp_job_ptr->details->usable_nodes = 9999;
+				while ((tmp_job_ptr = (struct job_record *)
+					list_next(job_iterator))) {
+					tmp_job_ptr->details->usable_nodes = 0;
+				}
+				list_sort(preemptee_candidates,
+					  (ListCmpF)_sort_usable_nodes_dec);
+				rc = EINVAL;
+				list_iterator_destroy(job_iterator);
+				goto top;
 			}
 		}
 		list_iterator_destroy(job_iterator);
@@ -2142,6 +2458,8 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 				if (bit_overlap(bitmap,
 						tmp_job_ptr->node_bitmap) == 0)
 					continue;
+				if (tmp_job_ptr->details->usable_nodes == 0)
+					continue;
 				list_append(*preemptee_job_list,
 					    tmp_job_ptr);
 			}
@@ -2347,11 +2665,20 @@ extern int select_p_state_restore(char *dir_name)
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_job_init(List job_list)
+/*
+ * Note the initialization of job records, issued upon restart of
+ * slurmctld and used to synchronize any job state.
+ */
+extern int select_p_job_init(List job_list_arg)
 {
 	return SLURM_SUCCESS;
 }
 
+extern bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)
+{
+	return false;
+}
+
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
 	if (node_ptr == NULL) {
@@ -2379,7 +2706,7 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_block_init(List part_list)
+extern int select_p_block_init(List block_list)
 {
 	return SLURM_SUCCESS;
 }
@@ -2451,6 +2778,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		rc = _will_run_test(job_ptr, bitmap, min_nodes, max_nodes,
 				    max_share, req_nodes,
 				    preemptee_candidates, preemptee_job_list);
+		if (!job_ptr->best_switch)
+			rc = SLURM_ERROR;
 	} else if (mode == SELECT_MODE_TEST_ONLY) {
 		rc = _test_only(job_ptr, bitmap, min_nodes, max_nodes,
 				req_nodes, max_share);
@@ -2458,6 +2787,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		rc = _run_now(job_ptr, bitmap, min_nodes, max_nodes,
 			      max_share, req_nodes,
 			      preemptee_candidates, preemptee_job_list);
+		if (!job_ptr->best_switch)
+			rc = SLURM_ERROR;
 	} else
 		fatal("select_p_job_test: Mode %d is invalid", mode);
 
@@ -2466,6 +2797,11 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	return rc;
 }
 
+/*
+ * Note initiation of job is about to begin. Called immediately
+ * after select_p_job_test(). Executed from slurmctld.
+ * IN job_ptr - pointer to job being initiated
+ */
 extern int select_p_job_begin(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
@@ -2495,13 +2831,18 @@ extern int select_p_job_begin(struct job_record *job_ptr)
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
-	_add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_begin", 1);
+	if (rc == SLURM_SUCCESS)
+		rc = _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_begin", 1);
 	gres_plugin_job_state_log(job_ptr->gres_list, job_ptr->job_id);
 	slurm_mutex_unlock(&cr_mutex);
 	return rc;
 }
 
-/* Determine if allocated nodes are usable (powered up) */
+/*
+ * Determine if allocated nodes are usable (powered up)
+ * IN job_ptr - pointer to job being tested
+ * RET -1 on error, 1 if ready to execute, 0 otherwise
+ */
 extern int select_p_job_ready(struct job_record *job_ptr)
 {
 	int i, i_first, i_last;
@@ -2528,6 +2869,30 @@ extern int select_p_job_ready(struct job_record *job_ptr)
 	return READY_NODE_STATE;
 }
 
+
+extern bool select_p_job_expand_allow(void)
+{
+	return true;
+}
+
+extern int select_p_job_expand(struct job_record *from_job_ptr,
+			       struct job_record *to_job_ptr)
+{
+	int rc;
+
+	slurm_mutex_lock(&cr_mutex);
+	if (cr_ptr == NULL)
+		_init_node_cr();
+	rc = _job_expand(from_job_ptr, to_job_ptr);
+	slurm_mutex_unlock(&cr_mutex);
+	return rc;
+}
+
+/*
+ * Modify internal data structures for a job that has changed size
+ *      Only support jobs shrinking now.
+ * RET: 0 or an error code
+ */
 extern int select_p_job_resized(struct job_record *job_ptr,
 				struct node_record *node_ptr)
 {
@@ -2555,6 +2920,15 @@ extern int select_p_job_resized(struct job_record *job_ptr,
 	return rc;
 }
 
+extern int select_p_job_signal(struct job_record *job_ptr, int signal)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Note termination of job is starting. Executed from slurmctld.
+ * IN job_ptr - pointer to job being terminated
+ */
 extern int select_p_job_fini(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
@@ -2578,28 +2952,66 @@ extern int select_p_job_fini(struct job_record *job_ptr)
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
-	_rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_fini", true);
+	if (_rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_fini", true) !=
+	    SLURM_SUCCESS)
+		rc = SLURM_ERROR;
 	slurm_mutex_unlock(&cr_mutex);
 	return rc;
 }
 
-extern int select_p_job_suspend(struct job_record *job_ptr)
+/*
+ * Suspend a job. Executed from slurmctld.
+ * IN job_ptr - pointer to job being suspended
+ * IN indf_susp - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
+ * RET SLURM_SUCCESS or error code
+ */
+extern int select_p_job_suspend(struct job_record *job_ptr, bool indf_susp)
 {
+	int rc;
+
+	if (!indf_susp)
+		return SLURM_SUCCESS;
+
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
-	_rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_suspend", false);
+	rc = _rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_suspend", false);
 	slurm_mutex_unlock(&cr_mutex);
-	return SLURM_SUCCESS;
+	return rc;
 }
 
-extern int select_p_job_resume(struct job_record *job_ptr)
+/*
+ * Resume a job. Executed from slurmctld.
+ * IN job_ptr - pointer to job being resumed
+ * IN indf_susp - set if job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
+ * RET SLURM_SUCCESS or error code
+ */
+extern int select_p_job_resume(struct job_record *job_ptr, bool indf_susp)
 {
+	int rc;
+
+	if (!indf_susp)
+		return SLURM_SUCCESS;
+
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
-	_add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_resume", 0);
+	rc = _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_resume", 0);
 	slurm_mutex_unlock(&cr_mutex);
+	return rc;
+}
+
+extern bitstr_t *select_p_step_pick_nodes(struct job_record *job_ptr,
+					  select_jobinfo_t *jobinfo,
+					  uint32_t node_count)
+{
+	return NULL;
+}
+
+extern int select_p_step_finish(struct step_record *step_ptr)
+{
 	return SLURM_SUCCESS;
 }
 
@@ -2626,7 +3038,7 @@ extern int select_p_select_nodeinfo_unpack(select_nodeinfo_t **nodeinfo,
 {
 	select_nodeinfo_t *nodeinfo_ptr = NULL;
 
-	nodeinfo_ptr = select_p_select_nodeinfo_alloc(NO_VAL);
+	nodeinfo_ptr = select_p_select_nodeinfo_alloc();
 	*nodeinfo = nodeinfo_ptr;
 
 	safe_unpack16(&nodeinfo_ptr->alloc_cpus, buffer);
@@ -2641,7 +3053,7 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(uint32_t size)
+extern select_nodeinfo_t *select_p_select_nodeinfo_alloc(void)
 {
 	select_nodeinfo_t *nodeinfo = xmalloc(sizeof(struct select_nodeinfo));
 
@@ -2763,11 +3175,22 @@ extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
 	return rc;
 }
 
+/*
+ * allocate storage for a select job credential
+ * RET        - storage for a select job credential
+ * NOTE: storage must be freed using select_p_select_jobinfo_free
+ */
 extern select_jobinfo_t *select_p_select_jobinfo_alloc(void)
 {
-	return SLURM_SUCCESS;
+	return NULL;
 }
 
+/*
+ * fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
 extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
 				       enum select_jobdata_type data_type,
 				       void *data)
@@ -2775,6 +3198,13 @@ extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
 	return SLURM_SUCCESS;
 }
 
+/*
+ * get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * OUT data - the data to get from job credential, caller must xfree
+ *      data for data_type == SELECT_JOBDATA_PART_ID
+ */
 extern int select_p_select_jobinfo_get (select_jobinfo_t *jobinfo,
 					enum select_jobdata_type data_type,
 					void *data)
@@ -2782,23 +3212,49 @@ extern int select_p_select_jobinfo_get (select_jobinfo_t *jobinfo,
 	return SLURM_ERROR;
 }
 
+/*
+ * copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_p_select_jobinfo_free
+ */
 extern select_jobinfo_t *select_p_select_jobinfo_copy(
 	select_jobinfo_t *jobinfo)
 {
 	return NULL;
 }
 
+/*
+ * free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ * RET         - slurm error code
+ */
 extern int select_p_select_jobinfo_free  (select_jobinfo_t *jobinfo)
 {
 	return SLURM_SUCCESS;
 }
 
+/*
+ * pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * IN protocol_version - slurm protocol version of client
+ * RET         - slurm error code
+ */
 extern int  select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
 					 uint16_t protocol_version)
 {
 	return SLURM_SUCCESS;
 }
 
+/*
+ * unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * IN protocol_version - slurm protocol version of client
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_p_select_jobinfo_free
+ */
 extern int  select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo,
 					   Buf buffer,
 					   uint16_t protocol_version)
@@ -2822,17 +3278,17 @@ extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
 	return NULL;
 }
 
-extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
+extern int select_p_update_block (update_block_msg_t *block_desc_ptr)
 {
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
+extern int select_p_update_sub_node (update_block_msg_t *block_desc_ptr)
 {
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_get_info_from_plugin (enum select_jobdata_type info,
+extern int select_p_get_info_from_plugin (enum select_plugindata_info dinfo,
 					  struct job_record *job_ptr,
 					  void *data)
 {
@@ -2844,7 +3300,7 @@ extern int select_p_update_node_config (int index)
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_update_node_state (int index, uint16_t state)
+extern int select_p_update_node_state (struct node_record *node_ptr)
 {
 	return SLURM_SUCCESS;
 }
@@ -2864,3 +3320,166 @@ extern int select_p_reconfigure(void)
 
 	return SLURM_SUCCESS;
 }
+
+/*
+ * select_p_resv_test - Identify the nodes which "best" satisfy a reservation
+ *	request. "best" is defined as either single set of consecutive nodes
+ *	satisfying the request and leaving the minimum number of unused nodes
+ *	OR the fewest number of consecutive node sets
+ * IN avail_bitmap - nodes available for the reservation
+ * IN node_cnt - count of required nodes
+ * RET - nodes selected for use by the reservation
+ */
+extern bitstr_t * select_p_resv_test(bitstr_t *avail_bitmap, uint32_t node_cnt)
+{
+	bitstr_t **switches_bitmap;		/* nodes on this switch */
+	int       *switches_cpu_cnt;		/* total CPUs on switch */
+	int       *switches_node_cnt;		/* total nodes on switch */
+	int       *switches_required;		/* set if has required node */
+
+	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
+	int rem_nodes;			/* remaining resources desired */
+	int i, j;
+	int best_fit_inx, first, last;
+	int best_fit_nodes;
+	int best_fit_location = 0, best_fit_sufficient;
+	bool sufficient;
+
+	xassert(avail_bitmap);
+	if (!switch_record_cnt || !switch_record_table)
+		return bit_pick_cnt(avail_bitmap, node_cnt);
+
+	/* Use topology state information */
+	if (bit_set_count(avail_bitmap) < node_cnt)
+		return avail_nodes_bitmap;
+	rem_nodes = node_cnt;
+
+	/* Construct a set of switch array entries,
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
+	for (i=0; i<switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], avail_bitmap);
+		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+	}
+
+#if SELECT_DEBUG
+	/* Don't compile this, it slows things down too much */
+	for (i=0; i<switch_record_cnt; i++) {
+		char *node_names = NULL;
+		if (switches_node_cnt[i])
+			node_names = bitmap2node_name(switches_bitmap[i]);
+		debug("switch=%s nodes=%u:%s required:%u speed=%u",
+		      switch_record_table[i].name,
+		      switches_node_cnt[i], node_names,
+		      switches_required[i],
+		      switch_record_table[i].link_speed);
+		xfree(node_names);
+	}
+#endif
+
+	/* Determine lowest level switch satifying request with best fit */
+	best_fit_inx = -1;
+	for (j=0; j<switch_record_cnt; j++) {
+		if (switches_node_cnt[j] < rem_nodes)
+			continue;
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx])))
+			best_fit_inx = j;
+	}
+	if (best_fit_inx == -1) {
+		debug("select_p_resv_test: could not find resources for "
+		      "reservation");
+		goto fini;
+	}
+
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switch_record_table[j].level != 0) ||
+		    (!bit_super_set(switches_bitmap[j],
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		}
+	}
+
+	/* Select resources from these leafs on a best-fit basis */
+	avail_nodes_bitmap = bit_alloc(node_record_count);
+	while (rem_nodes > 0) {
+		best_fit_nodes = best_fit_sufficient = 0;
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			sufficient = (switches_node_cnt[j] >= rem_nodes);
+			/* If first possibility OR */
+			/* first set large enough for request OR */
+			/* tightest fit (less resource waste) OR */
+			/* nothing yet large enough, but this is biggest */
+			if ((best_fit_nodes == 0) ||
+			    (sufficient && (best_fit_sufficient == 0)) ||
+			    (sufficient &&
+			     (switches_node_cnt[j] < best_fit_nodes)) ||
+			    ((sufficient == 0) &&
+			     (switches_node_cnt[j] > best_fit_nodes))) {
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+				best_fit_sufficient = sufficient;
+			}
+		}
+		if (best_fit_nodes == 0)
+			break;
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(switches_bitmap[best_fit_location], i))
+				continue;
+
+			bit_clear(switches_bitmap[best_fit_location], i);
+			switches_node_cnt[best_fit_location]--;
+
+			if (bit_test(avail_nodes_bitmap, i)) {
+				/* node on multiple leaf switches
+				 * and already selected */
+				continue;
+			}
+
+			bit_set(avail_nodes_bitmap, i);
+			if (--rem_nodes <= 0)
+				break;
+		}
+		switches_node_cnt[best_fit_location] = 0;
+	}
+	if (rem_nodes > 0)	/* insufficient resources */
+		FREE_NULL_BITMAP(avail_nodes_bitmap);
+
+fini:	for (i=0; i<switch_record_cnt; i++)
+		FREE_NULL_BITMAP(switches_bitmap[i]);
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_required);
+
+	return avail_nodes_bitmap;
+}
+
+extern void select_p_ba_init(void)
+{
+	return;
+}
+extern void select_p_ba_fini(void)
+{
+	return;
+}
+
+extern int *select_p_ba_get_dims(void)
+{
+	return NULL;
+}
diff --git a/src/plugins/select/linear/select_linear.h b/src/plugins/select/linear/select_linear.h
index 956b56261..db146f19c 100644
--- a/src/plugins/select/linear/select_linear.h
+++ b/src/plugins/select/linear/select_linear.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/switch/Makefile.in b/src/plugins/switch/Makefile.in
index cc3dd2a13..71d3d0ba5 100644
--- a/src/plugins/switch/Makefile.in
+++ b/src/plugins/switch/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/switch/elan/Makefile.in b/src/plugins/switch/elan/Makefile.in
index 5c1403650..6bc8e38d7 100644
--- a/src/plugins/switch/elan/Makefile.in
+++ b/src/plugins/switch/elan/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -144,7 +146,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -181,6 +186,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -238,6 +244,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -273,6 +280,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/switch/elan/qsw.c b/src/plugins/switch/elan/qsw.c
index 0ae92b8e5..6516f2719 100644
--- a/src/plugins/switch/elan/qsw.c
+++ b/src/plugins/switch/elan/qsw.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -104,7 +104,7 @@
 #include <rms/rmscall.h>
 #include <elanhosts.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 
diff --git a/src/plugins/switch/elan/qsw.h b/src/plugins/switch/elan/qsw.h
index c0eb3b558..d488ec067 100644
--- a/src/plugins/switch/elan/qsw.h
+++ b/src/plugins/switch/elan/qsw.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/switch/elan/switch_elan.c b/src/plugins/switch/elan/switch_elan.c
index 7701ab983..8e1415aa4 100644
--- a/src/plugins/switch/elan/switch_elan.c
+++ b/src/plugins/switch/elan/switch_elan.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #include <unistd.h>
 #include <dlfcn.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/slurm_xlator.h"
 
diff --git a/src/plugins/switch/federation/Makefile.in b/src/plugins/switch/federation/Makefile.in
index 2557267c1..1946b68d9 100644
--- a/src/plugins/switch/federation/Makefile.in
+++ b/src/plugins/switch/federation/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -147,7 +149,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -184,6 +189,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -241,6 +247,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -276,6 +283,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/switch/federation/federation.c b/src/plugins/switch/federation/federation.c
index c58c6cf8a..749b4469d 100644
--- a/src/plugins/switch/federation/federation.c
+++ b/src/plugins/switch/federation/federation.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,8 @@
 #endif
 
 #include <sys/stat.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/common/read_config.h"
 #include "src/plugins/switch/federation/federation.h"
@@ -517,7 +518,7 @@ static int _parse_fed_file(hostlist_t *adapter_list)
 		fed_conf = _get_fed_conf();
 
 	tbl = s_p_hashtbl_create(options);
-	if(s_p_parse_file(tbl, NULL, fed_conf) == SLURM_ERROR)
+	if(s_p_parse_file(tbl, NULL, fed_conf, false) == SLURM_ERROR)
 		fatal("something wrong with opening/reading federation "
 		      "conf file");
 
diff --git a/src/plugins/switch/federation/federation.h b/src/plugins/switch/federation/federation.h
index 0ad4e8145..d9e3e0849 100644
--- a/src/plugins/switch/federation/federation.h
+++ b/src/plugins/switch/federation/federation.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/switch/federation/federation_keys.h b/src/plugins/switch/federation/federation_keys.h
index b0400284a..3553b2152 100644
--- a/src/plugins/switch/federation/federation_keys.h
+++ b/src/plugins/switch/federation/federation_keys.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/switch/federation/switch_federation.c b/src/plugins/switch/federation/switch_federation.c
index 60518421e..6a794f566 100644
--- a/src/plugins/switch/federation/switch_federation.c
+++ b/src/plugins/switch/federation/switch_federation.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,7 +49,7 @@
 #include <regex.h>
 #include <stdlib.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/macros.h"
 #include "src/common/slurm_xlator.h"
 #include "src/plugins/switch/federation/federation.h"
diff --git a/src/plugins/switch/none/Makefile.in b/src/plugins/switch/none/Makefile.in
index be393c916..31eed6fe6 100644
--- a/src/plugins/switch/none/Makefile.in
+++ b/src/plugins/switch/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/switch/none/switch_none.c b/src/plugins/switch/none/switch_none.c
index 0f3203102..1194b3600 100644
--- a/src/plugins/switch/none/switch_none.c
+++ b/src/plugins/switch/none/switch_none.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
 /*
diff --git a/src/plugins/task/Makefile.am b/src/plugins/task/Makefile.am
index 05e6cd8c9..58d5b9ef2 100644
--- a/src/plugins/task/Makefile.am
+++ b/src/plugins/task/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for task plugins
 
-SUBDIRS = affinity none
+SUBDIRS = affinity none cgroup
diff --git a/src/plugins/task/Makefile.in b/src/plugins/task/Makefile.in
index f947ba2d2..373a25ce6 100644
--- a/src/plugins/task/Makefile.in
+++ b/src/plugins/task/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,7 +322,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = affinity none
+SUBDIRS = affinity none cgroup
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/task/affinity/Makefile.in b/src/plugins/task/affinity/Makefile.in
index ab9fe2986..cc3b5976a 100644
--- a/src/plugins/task/affinity/Makefile.in
+++ b/src/plugins/task/affinity/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -150,7 +152,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -187,6 +192,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -244,6 +250,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -279,6 +286,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/task/affinity/affinity.c b/src/plugins/task/affinity/affinity.c
index 529cf1f38..cb474284f 100644
--- a/src/plugins/task/affinity/affinity.c
+++ b/src/plugins/task/affinity/affinity.c
@@ -5,7 +5,7 @@
  *  Copyright (C) 2005-2006 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/task/affinity/affinity.h b/src/plugins/task/affinity/affinity.h
index 8e96683bc..74a62df8c 100644
--- a/src/plugins/task/affinity/affinity.h
+++ b/src/plugins/task/affinity/affinity.h
@@ -5,7 +5,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -75,7 +75,7 @@
 #  include <stdlib.h>
 #endif
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/slurmd/slurmd/slurmd.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
diff --git a/src/plugins/task/affinity/cpuset.c b/src/plugins/task/affinity/cpuset.c
index 5ff993d9d..976c860ce 100644
--- a/src/plugins/task/affinity/cpuset.c
+++ b/src/plugins/task/affinity/cpuset.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/task/affinity/dist_tasks.c b/src/plugins/task/affinity/dist_tasks.c
index 578e698cb..34dc8d40c 100644
--- a/src/plugins/task/affinity/dist_tasks.c
+++ b/src/plugins/task/affinity/dist_tasks.c
@@ -5,7 +5,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -119,6 +119,7 @@ static void _lllp_free_masks(const uint32_t maxtasks, bitstr_t **masks)
 {
     	int i;
 	bitstr_t *bitmask;
+
 	for (i = 0; i < maxtasks; i++) {
 		bitmask = masks[i];
 		FREE_NULL_BITMAP(bitmask);
@@ -168,8 +169,7 @@ void batch_bind(batch_job_launch_msg_t *req)
 	bitstr_t *req_map, *hw_map;
 	slurm_cred_arg_t arg;
 	uint16_t sockets=0, cores=0, num_cpus;
-	int start, p, t, task_cnt=0;
-	char *str;
+	int start, task_cnt=0;
 
 	if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) {
 		error("task/affinity: job lacks a credential");
@@ -181,6 +181,11 @@ void batch_bind(batch_job_launch_msg_t *req)
 		slurm_cred_free_args(&arg);
 		return;
 	}
+	if ((sockets * cores) == 0) {
+		error("task/affinity: socket and core count both zero");
+		slurm_cred_free_args(&arg);
+		return;
+	}
 
 	num_cpus  = MIN((sockets * cores),
 			 (conf->sockets * conf->cores));
@@ -194,6 +199,19 @@ void batch_bind(batch_job_launch_msg_t *req)
 		return;
 	}
 
+#ifdef HAVE_FRONT_END
+{
+	/* Since the front-end nodes are a shared resource, we limit each job
+	 * to one CPU based upon monotonically increasing sequence number */
+	static int last_id = 0;
+	bit_set(hw_map, ((last_id++) % conf->block_map_size));
+	task_cnt = 1;
+}
+#else
+{
+	char *str;
+	int t, p;
+
 	/* Transfer core_bitmap data to local req_map.
 	 * The MOD function handles the case where fewer processes
 	 * physically exist than are configured (slurmd is out of
@@ -202,6 +220,7 @@ void batch_bind(batch_job_launch_msg_t *req)
 		if (bit_test(arg.job_core_bitmap, p))
 			bit_set(req_map, (p % num_cpus));
 	}
+
 	str = (char *)bit_fmt_hexmask(req_map);
 	debug3("task/affinity: job %u CPU mask from slurmctld: %s",
 		req->job_id, str);
@@ -224,6 +243,8 @@ void batch_bind(batch_job_launch_msg_t *req)
 			task_cnt++;
 		}
 	}
+}
+#endif
 	if (task_cnt) {
 		req->cpu_bind_type = CPU_BIND_MASK;
 		if (conf->task_plugin_param & CPU_BIND_VERBOSE)
@@ -319,6 +340,7 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 		      "nodes:%d sockets:%d:%d cores:%d:%d threads:%d",
 		      max_tasks, whole_nodes, whole_sockets ,part_sockets,
 		      whole_cores, part_cores, whole_threads);
+
 		if ((max_tasks == whole_sockets) && (part_sockets == 0)) {
 			req->cpu_bind_type |= CPU_BIND_TO_SOCKETS;
 			goto make_auto;
@@ -408,7 +430,8 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 		      req->job_id, buf_type);
 		error("Verify socket/core/thread counts in configuration");
 	}
-	_lllp_free_masks(maxtasks, masks);
+	if (masks)
+		_lllp_free_masks(maxtasks, masks);
 }
 
 
@@ -614,6 +637,7 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
 		 * requested */
 		for (t = 0; t < (*hw_threads); t++) {
 			uint16_t bit = new_p * (*hw_threads) + t;
+			bit %= conf->block_map_size;
 			bit_set(hw_map, bit);
 		}
 	}
@@ -649,39 +673,38 @@ static void _blot_mask(bitstr_t *mask, uint16_t blot)
 	}
 }
 
-/* helper function for _expand_masks() 
- * foreach task, consider which other tasks have set bits on the same socket */
+/* helper function for _expand_masks()
+ * for each task, consider which other bits are set in avail_map
+ * on the same socket */
 static void _blot_mask_sockets(const uint32_t maxtasks, const uint32_t task,
-			       bitstr_t **masks, uint16_t blot)
+			       bitstr_t **masks, uint16_t blot,
+			       bitstr_t *avail_map)
 {
-        uint16_t i, j, size = 0;
-        uint32_t q;
-
-        if (!masks[task])
-                return;
-
-        size = bit_size(masks[task]);
-        for (i = 0; i < size; i++) {
-                if (bit_test(masks[task], i)) {
-			/* check if other tasks have set bits on this socket */
-                        uint16_t start = (i / blot) * blot;
-                        for (j = start; j < start+blot; j++) {
-                                for (q = 0; q < maxtasks; q++) {
-                                        if ((q != task) &&
-					    bit_test(masks[q], j)) {
-						bit_set(masks[task], j);
-					}
-				}
+  	uint16_t i, j, size = 0;
+
+	if (!masks[task])
+ 		return;
+
+	size = bit_size(masks[task]);
+	for (i = 0; i < size; i++) {
+		if (bit_test(masks[task], i)) {
+			/* check if other bits are set in avail_map on this
+			 * socket and set each corresponding bit in masks */
+			uint16_t start = (i / blot) * blot;
+			for (j = start; j < start+blot; j++) {
+				if (bit_test(avail_map, j))
+					bit_set(masks[task], j);
 			}
 		}
 	}
 }
 
-/* foreach mask, expand the mask around the set bits to include the
+/* for each mask, expand the mask around the set bits to include the
  * complete resource to which the set bits are to be bound */
 static void _expand_masks(uint16_t cpu_bind_type, const uint32_t maxtasks,
 			  bitstr_t **masks, uint16_t hw_sockets,
-			  uint16_t hw_cores, uint16_t hw_threads)
+			  uint16_t hw_cores, uint16_t hw_threads,
+			  bitstr_t *avail_map)
 {
 	uint32_t i;
 
@@ -699,7 +722,8 @@ static void _expand_masks(uint16_t cpu_bind_type, const uint32_t maxtasks,
 		if (hw_threads*hw_cores < 2)
 			return;
 		for (i = 0; i < maxtasks; i++) {
-			_blot_mask_sockets(maxtasks, i, masks, hw_threads*hw_cores);
+   			_blot_mask_sockets(maxtasks, i, masks,
+					   hw_threads*hw_cores, avail_map);
 		}
 		return;
 	}
@@ -778,12 +802,12 @@ static int _task_layout_lllp_multi(launch_tasks_request_msg_t *req,
 				break;
 		}
 	}
-	FREE_NULL_BITMAP(avail_map);
 
 	/* last step: expand the masks to bind each task
 	 * to the requested resource */
 	_expand_masks(req->cpu_bind_type, max_tasks, masks,
-			hw_sockets, hw_cores, hw_threads);
+			hw_sockets, hw_cores, hw_threads, avail_map);
+	FREE_NULL_BITMAP(avail_map);
 
 	return SLURM_SUCCESS;
 }
@@ -794,7 +818,7 @@ static int _task_layout_lllp_multi(launch_tasks_request_msg_t *req,
  * task_layout_lllp_cyclic creates a cyclic distribution at the
  * lowest level of logical processor which is either socket, core or
  * thread depending on the system architecture. The Cyclic algorithm
- * is the same as the the Cyclic distribution performed in srun.
+ * is the same as the Cyclic distribution performed in srun.
  *
  *  Distribution at the lllp:
  *  -m hostfile|plane|block|cyclic:block|cyclic
@@ -817,6 +841,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 	uint16_t c, i, s, t, hw_sockets = 0, hw_cores = 0, hw_threads = 0;
 	int size, max_tasks = req->tasks_to_launch[(int)node_id];
 	int max_cpus = max_tasks * req->cpus_per_task;
+	int avail_size;
 	bitstr_t *avail_map;
 	bitstr_t **masks = NULL;
 
@@ -825,6 +850,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 	avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads);
 	if (!avail_map)
 		return SLURM_ERROR;
+	avail_size = bit_size(avail_map);
 
 	*masks_p = xmalloc(max_tasks * sizeof(bitstr_t*));
 	masks = *masks_p;
@@ -855,6 +881,8 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 				for (s = 0; s < hw_sockets; s++) {
 					uint16_t bit = s*(hw_cores*hw_threads) +
 						       c*(hw_threads) + t;
+					/* In case hardware and config differ */
+					bit %= avail_size;
 					if (bit_test(avail_map, bit) == 0)
 						continue;
 					if (masks[taskcount] == NULL) {
@@ -864,6 +892,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 								  block_map_size);
 					}
 					bit_set(masks[taskcount], bit);
+
 					if (++i < req->cpus_per_task)
 						continue;
 					i = 0;
@@ -877,12 +906,12 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 				break;
 		}
 	}
-	FREE_NULL_BITMAP(avail_map);
 
 	/* last step: expand the masks to bind each task
 	 * to the requested resource */
 	_expand_masks(req->cpu_bind_type, max_tasks, masks,
-			hw_sockets, hw_cores, hw_threads);
+			hw_sockets, hw_cores, hw_threads, avail_map);
+	FREE_NULL_BITMAP(avail_map);
 
 	return SLURM_SUCCESS;
 }
@@ -893,7 +922,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
  * task_layout_lllp_block will create a block distribution at the
  * lowest level of logical processor which is either socket, core or
  * thread depending on the system architecture. The Block algorithm
- * is the same as the the Block distribution performed in srun.
+ * is the same as the Block distribution performed in srun.
  *
  *  Distribution at the lllp:
  *  -m hostfile|plane|block|cyclic:block|cyclic
@@ -1021,12 +1050,12 @@ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req,
 	}
 
 	xfree(task_array);
-	FREE_NULL_BITMAP(avail_map);
 
 	/* last step: expand the masks to bind each task
 	 * to the requested resource */
 	_expand_masks(req->cpu_bind_type, max_tasks, masks,
-			hw_sockets, hw_cores, hw_threads);
+			hw_sockets, hw_cores, hw_threads, avail_map);
+	FREE_NULL_BITMAP(avail_map);
 
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/task/affinity/dist_tasks.h b/src/plugins/task/affinity/dist_tasks.h
index dc08e8af0..48efce041 100644
--- a/src/plugins/task/affinity/dist_tasks.h
+++ b/src/plugins/task/affinity/dist_tasks.h
@@ -4,7 +4,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/task/affinity/numa.c b/src/plugins/task/affinity/numa.c
index 902db390c..41b570c84 100644
--- a/src/plugins/task/affinity/numa.c
+++ b/src/plugins/task/affinity/numa.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/task/affinity/schedutils.c b/src/plugins/task/affinity/schedutils.c
index ebdf1c58f..d4e15a73c 100644
--- a/src/plugins/task/affinity/schedutils.c
+++ b/src/plugins/task/affinity/schedutils.c
@@ -6,7 +6,7 @@
  *  Copyright (C) 2004 Robert Love
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c
index a7f15d2cc..88d3db62e 100644
--- a/src/plugins/task/affinity/task_affinity.c
+++ b/src/plugins/task/affinity/task_affinity.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -434,3 +434,11 @@ extern int task_post_term (slurmd_job_t *job)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * task_post_step() is called after termination of the step
+ * (all the task)
+ */
+extern int task_post_step (slurmd_job_t *job)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/task/cgroup/Makefile.am b/src/plugins/task/cgroup/Makefile.am
new file mode 100644
index 000000000..1813b9a4f
--- /dev/null
+++ b/src/plugins/task/cgroup/Makefile.am
@@ -0,0 +1,17 @@
+# Makefile for task/cgroup plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = task_cgroup.la
+
+# cgroup task plugin.
+task_cgroup_la_SOURCES = 	task_cgroup.h task_cgroup.c \
+				task_cgroup_cpuset.h task_cgroup_cpuset.c \
+				task_cgroup_memory.h task_cgroup_memory.c \
+				task_cgroup_devices.h task_cgroup_devices.c
+task_cgroup_la_CPPFLAGS = $(HWLOC_CPPFLAGS)
+task_cgroup_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(HWLOC_LDFLAGS) $(HWLOC_LIBS)
diff --git a/src/plugins/select/bluegene/plugin/Makefile.in b/src/plugins/task/cgroup/Makefile.in
similarity index 55%
rename from src/plugins/select/bluegene/plugin/Makefile.in
rename to src/plugins/task/cgroup/Makefile.in
index ea6392831..fe8779529 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.in
+++ b/src/plugins/task/cgroup/Makefile.in
@@ -15,8 +15,7 @@
 
 @SET_MAKE@
 
-# Makefile for select/bluegene plugin
-
+# Makefile for task/cgroup plugin
 
 VPATH = @srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
@@ -38,9 +37,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
-@BLUEGENE_LOADED_TRUE@sbin_PROGRAMS = slurm_prolog$(EXEEXT) \
-@BLUEGENE_LOADED_TRUE@	slurm_epilog$(EXEEXT) sfree$(EXEEXT)
-subdir = src/plugins/select/bluegene/plugin
+subdir = src/plugins/task/cgroup
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
@@ -66,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -105,88 +104,17 @@ am__nobase_list = $(am__nobase_strip_setup); \
 am__base_list = \
   sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
   sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)"
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
-libsched_if_la_LIBADD =
-am__libsched_if_la_SOURCES_DIST = libsched_if64.c
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@am_libsched_if_la_OBJECTS =  \
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@	libsched_if64.lo
-libsched_if_la_OBJECTS = $(am_libsched_if_la_OBJECTS)
-libsched_if_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(libsched_if_la_LDFLAGS) $(LDFLAGS) -o $@
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@am_libsched_if_la_rpath =  \
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@	-rpath $(pkglibdir)
-libsched_if64_la_LIBADD =
-am__libsched_if64_la_SOURCES_DIST = libsched_if64.c
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if64_la_OBJECTS =  \
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	libsched_if64.lo
-libsched_if64_la_OBJECTS = $(am_libsched_if64_la_OBJECTS)
-libsched_if64_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+task_cgroup_la_LIBADD =
+am_task_cgroup_la_OBJECTS = task_cgroup_la-task_cgroup.lo \
+	task_cgroup_la-task_cgroup_cpuset.lo \
+	task_cgroup_la-task_cgroup_memory.lo \
+	task_cgroup_la-task_cgroup_devices.lo
+task_cgroup_la_OBJECTS = $(am_task_cgroup_la_OBJECTS)
+task_cgroup_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(libsched_if64_la_LDFLAGS) $(LDFLAGS) -o $@
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_libsched_if64_la_rpath =  \
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	-rpath $(pkglibdir)
-@BLUEGENE_LOADED_TRUE@select_bluegene_la_DEPENDENCIES = ../block_allocator/libbluegene_block_allocator.la
-am__select_bluegene_la_SOURCES_DIST = select_bluegene.c jobinfo.c \
-	jobinfo.h nodeinfo.c nodeinfo.h ../wrap_rm_api.h \
-	bg_boot_time.h bg_job_place.c bg_job_place.h bg_job_run.c \
-	bg_job_run.h bg_block_info.c bg_block_info.h \
-	bg_record_functions.c bg_record_functions.h bluegene.c \
-	bluegene.h state_test.c state_test.h bg_switch_connections.c \
-	block_sys.c dynamic_block.c dynamic_block.h defined_block.c \
-	defined_block.h
-@BLUEGENE_LOADED_FALSE@am_select_bluegene_la_OBJECTS =  \
-@BLUEGENE_LOADED_FALSE@	select_bluegene.lo jobinfo.lo \
-@BLUEGENE_LOADED_FALSE@	nodeinfo.lo
-@BLUEGENE_LOADED_TRUE@am_select_bluegene_la_OBJECTS =  \
-@BLUEGENE_LOADED_TRUE@	select_bluegene.lo bg_job_place.lo \
-@BLUEGENE_LOADED_TRUE@	bg_job_run.lo bg_block_info.lo \
-@BLUEGENE_LOADED_TRUE@	bg_record_functions.lo bluegene.lo \
-@BLUEGENE_LOADED_TRUE@	state_test.lo bg_switch_connections.lo \
-@BLUEGENE_LOADED_TRUE@	block_sys.lo dynamic_block.lo \
-@BLUEGENE_LOADED_TRUE@	defined_block.lo jobinfo.lo nodeinfo.lo
-select_bluegene_la_OBJECTS = $(am_select_bluegene_la_OBJECTS)
-select_bluegene_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(select_bluegene_la_LDFLAGS) $(LDFLAGS) -o $@
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@am_select_bluegene_la_rpath =  \
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@	-rpath $(pkglibdir)
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@am_select_bluegene_la_rpath =  \
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@	-rpath $(pkglibdir)
-@BLUEGENE_LOADED_FALSE@am_select_bluegene_la_rpath = -rpath \
-@BLUEGENE_LOADED_FALSE@	$(pkglibdir)
-PROGRAMS = $(sbin_PROGRAMS)
-am__sfree_SOURCES_DIST = sfree.c sfree.h opts.c \
-	../block_allocator/bridge_linker.c \
-	../block_allocator/bridge_linker.h
-@BLUEGENE_LOADED_TRUE@am_sfree_OBJECTS = sfree.$(OBJEXT) \
-@BLUEGENE_LOADED_TRUE@	opts.$(OBJEXT) bridge_linker.$(OBJEXT)
-sfree_OBJECTS = $(am_sfree_OBJECTS)
-@BLUEGENE_LOADED_TRUE@am__DEPENDENCIES_1 =  \
-@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o
-@BLUEGENE_LOADED_TRUE@sfree_DEPENDENCIES = $(am__DEPENDENCIES_1)
-sfree_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
-	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sfree_LDFLAGS) \
-	$(LDFLAGS) -o $@
-am__slurm_epilog_SOURCES_DIST = slurm_epilog.c
-@BLUEGENE_LOADED_TRUE@am_slurm_epilog_OBJECTS =  \
-@BLUEGENE_LOADED_TRUE@	slurm_epilog.$(OBJEXT)
-slurm_epilog_OBJECTS = $(am_slurm_epilog_OBJECTS)
-@BLUEGENE_LOADED_TRUE@slurm_epilog_DEPENDENCIES =  \
-@BLUEGENE_LOADED_TRUE@	$(am__DEPENDENCIES_1)
-slurm_epilog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(slurm_epilog_LDFLAGS) $(LDFLAGS) -o $@
-am__slurm_prolog_SOURCES_DIST = slurm_prolog.c
-@BLUEGENE_LOADED_TRUE@am_slurm_prolog_OBJECTS =  \
-@BLUEGENE_LOADED_TRUE@	slurm_prolog.$(OBJEXT)
-slurm_prolog_OBJECTS = $(am_slurm_prolog_OBJECTS)
-@BLUEGENE_LOADED_TRUE@slurm_prolog_DEPENDENCIES =  \
-@BLUEGENE_LOADED_TRUE@	$(am__DEPENDENCIES_1)
-slurm_prolog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
-	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
-	$(slurm_prolog_LDFLAGS) $(LDFLAGS) -o $@
+	$(task_cgroup_la_LDFLAGS) $(LDFLAGS) -o $@
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -200,14 +128,8 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(libsched_if_la_SOURCES) $(libsched_if64_la_SOURCES) \
-	$(select_bluegene_la_SOURCES) $(sfree_SOURCES) \
-	$(slurm_epilog_SOURCES) $(slurm_prolog_SOURCES)
-DIST_SOURCES = $(am__libsched_if_la_SOURCES_DIST) \
-	$(am__libsched_if64_la_SOURCES_DIST) \
-	$(am__select_bluegene_la_SOURCES_DIST) \
-	$(am__sfree_SOURCES_DIST) $(am__slurm_epilog_SOURCES_DIST) \
-	$(am__slurm_prolog_SOURCES_DIST)
+SOURCES = $(task_cgroup_la_SOURCES)
+DIST_SOURCES = $(task_cgroup_la_SOURCES)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -221,7 +143,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -232,7 +157,7 @@ CCDEPMODE = @CCDEPMODE@
 CFLAGS = @CFLAGS@
 CMD_LDFLAGS = @CMD_LDFLAGS@
 CPP = @CPP@
-CPPFLAGS = -DBLUEGENE_CONFIG_FILE=\"$(sysconfdir)/bluegene.conf\"
+CPPFLAGS = @CPPFLAGS@
 CXX = @CXX@
 CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
@@ -258,6 +183,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -315,6 +241,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -350,6 +277,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -403,60 +331,18 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic -lm
-INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(BG_INCLUDES)
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@pkglib_LTLIBRARIES = select_bluegene.la libsched_if.la
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@pkglib_LTLIBRARIES = select_bluegene.la libsched_if64.la
-
-# These are needed for pack/unpack of structures for cross-cluster stuff
-@BLUEGENE_LOADED_FALSE@pkglib_LTLIBRARIES = select_bluegene.la
-@BLUEGENE_LOADED_FALSE@select_bluegene_la_SOURCES = select_bluegene.c \
-@BLUEGENE_LOADED_FALSE@			jobinfo.c jobinfo.h\
-@BLUEGENE_LOADED_FALSE@			nodeinfo.c nodeinfo.h\
-@BLUEGENE_LOADED_FALSE@			../wrap_rm_api.h
-
-
-# Blue Gene node selection plugin.
-@BLUEGENE_LOADED_TRUE@select_bluegene_la_SOURCES = select_bluegene.c \
-@BLUEGENE_LOADED_TRUE@				bg_boot_time.h \
-@BLUEGENE_LOADED_TRUE@				bg_job_place.c bg_job_place.h \
-@BLUEGENE_LOADED_TRUE@				bg_job_run.c bg_job_run.h \
-@BLUEGENE_LOADED_TRUE@				bg_block_info.c bg_block_info.h \
-@BLUEGENE_LOADED_TRUE@				bg_record_functions.c bg_record_functions.h \
-@BLUEGENE_LOADED_TRUE@				bluegene.c bluegene.h \
-@BLUEGENE_LOADED_TRUE@				state_test.c state_test.h \
-@BLUEGENE_LOADED_TRUE@				bg_switch_connections.c \
-@BLUEGENE_LOADED_TRUE@				block_sys.c \
-@BLUEGENE_LOADED_TRUE@				dynamic_block.c dynamic_block.h \
-@BLUEGENE_LOADED_TRUE@				defined_block.c defined_block.h \
-@BLUEGENE_LOADED_TRUE@				jobinfo.c jobinfo.h\
-@BLUEGENE_LOADED_TRUE@				nodeinfo.c nodeinfo.h\
-@BLUEGENE_LOADED_TRUE@				../wrap_rm_api.h
-
-@BLUEGENE_LOADED_FALSE@select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-@BLUEGENE_LOADED_TRUE@select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-@BLUEGENE_LOADED_TRUE@select_bluegene_la_LIBADD = ../block_allocator/libbluegene_block_allocator.la
-
-# MPIRUN dynamic lib.
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if64_la_SOURCES = libsched_if64.c
-@BGL_LOADED_TRUE@@BLUEGENE_LOADED_TRUE@libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@libsched_if_la_SOURCES = libsched_if64.c
-@BGL_LOADED_FALSE@@BLUEGENE_LOADED_TRUE@libsched_if_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-@BLUEGENE_LOADED_TRUE@convenience_libs = \
-@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o -ldl
-
-@BLUEGENE_LOADED_TRUE@sfree_LDADD = $(convenience_libs)
-@BLUEGENE_LOADED_TRUE@slurm_prolog_LDADD = $(convenience_libs)
-@BLUEGENE_LOADED_TRUE@slurm_epilog_LDADD = $(convenience_libs)
-@BLUEGENE_LOADED_TRUE@sfree_SOURCES = sfree.c sfree.h opts.c \
-@BLUEGENE_LOADED_TRUE@		../block_allocator/bridge_linker.c \
-@BLUEGENE_LOADED_TRUE@		../block_allocator/bridge_linker.h
-
-@BLUEGENE_LOADED_TRUE@slurm_prolog_SOURCES = slurm_prolog.c
-@BLUEGENE_LOADED_TRUE@slurm_epilog_SOURCES = slurm_epilog.c
-@BLUEGENE_LOADED_TRUE@sfree_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
-@BLUEGENE_LOADED_TRUE@slurm_prolog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
-@BLUEGENE_LOADED_TRUE@slurm_epilog_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = task_cgroup.la
+
+# cgroup task plugin.
+task_cgroup_la_SOURCES = task_cgroup.h task_cgroup.c \
+				task_cgroup_cpuset.h task_cgroup_cpuset.c \
+				task_cgroup_memory.h task_cgroup_memory.c \
+				task_cgroup_devices.h task_cgroup_devices.c
+
+task_cgroup_la_CPPFLAGS = $(HWLOC_CPPFLAGS)
+task_cgroup_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(HWLOC_LDFLAGS) $(HWLOC_LIBS)
 all: all-am
 
 .SUFFIXES:
@@ -470,9 +356,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/select/bluegene/plugin/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/task/cgroup/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign src/plugins/select/bluegene/plugin/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/task/cgroup/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -522,64 +408,8 @@ clean-pkglibLTLIBRARIES:
 	  echo "rm -f \"$${dir}/so_locations\""; \
 	  rm -f "$${dir}/so_locations"; \
 	done
-libsched_if.la: $(libsched_if_la_OBJECTS) $(libsched_if_la_DEPENDENCIES) 
-	$(libsched_if_la_LINK) $(am_libsched_if_la_rpath) $(libsched_if_la_OBJECTS) $(libsched_if_la_LIBADD) $(LIBS)
-libsched_if64.la: $(libsched_if64_la_OBJECTS) $(libsched_if64_la_DEPENDENCIES) 
-	$(libsched_if64_la_LINK) $(am_libsched_if64_la_rpath) $(libsched_if64_la_OBJECTS) $(libsched_if64_la_LIBADD) $(LIBS)
-select_bluegene.la: $(select_bluegene_la_OBJECTS) $(select_bluegene_la_DEPENDENCIES) 
-	$(select_bluegene_la_LINK) $(am_select_bluegene_la_rpath) $(select_bluegene_la_OBJECTS) $(select_bluegene_la_LIBADD) $(LIBS)
-install-sbinPROGRAMS: $(sbin_PROGRAMS)
-	@$(NORMAL_INSTALL)
-	test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)"
-	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
-	for p in $$list; do echo "$$p $$p"; done | \
-	sed 's/$(EXEEXT)$$//' | \
-	while read p p1; do if test -f $$p || test -f $$p1; \
-	  then echo "$$p"; echo "$$p"; else :; fi; \
-	done | \
-	sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
-	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
-	sed 'N;N;N;s,\n, ,g' | \
-	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
-	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
-	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
-	    else { print "f", $$3 "/" $$4, $$1; } } \
-	  END { for (d in files) print "f", d, files[d] }' | \
-	while read type dir files; do \
-	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
-	    test -z "$$files" || { \
-	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(sbindir)$$dir'"; \
-	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(sbindir)$$dir" || exit $$?; \
-	    } \
-	; done
-
-uninstall-sbinPROGRAMS:
-	@$(NORMAL_UNINSTALL)
-	@list='$(sbin_PROGRAMS)'; test -n "$(sbindir)" || list=; \
-	files=`for p in $$list; do echo "$$p"; done | \
-	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
-	      -e 's/$$/$(EXEEXT)/' `; \
-	test -n "$$list" || exit 0; \
-	echo " ( cd '$(DESTDIR)$(sbindir)' && rm -f" $$files ")"; \
-	cd "$(DESTDIR)$(sbindir)" && rm -f $$files
-
-clean-sbinPROGRAMS:
-	@list='$(sbin_PROGRAMS)'; test -n "$$list" || exit 0; \
-	echo " rm -f" $$list; \
-	rm -f $$list || exit $$?; \
-	test -n "$(EXEEXT)" || exit 0; \
-	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
-	echo " rm -f" $$list; \
-	rm -f $$list
-sfree$(EXEEXT): $(sfree_OBJECTS) $(sfree_DEPENDENCIES) 
-	@rm -f sfree$(EXEEXT)
-	$(sfree_LINK) $(sfree_OBJECTS) $(sfree_LDADD) $(LIBS)
-slurm_epilog$(EXEEXT): $(slurm_epilog_OBJECTS) $(slurm_epilog_DEPENDENCIES) 
-	@rm -f slurm_epilog$(EXEEXT)
-	$(slurm_epilog_LINK) $(slurm_epilog_OBJECTS) $(slurm_epilog_LDADD) $(LIBS)
-slurm_prolog$(EXEEXT): $(slurm_prolog_OBJECTS) $(slurm_prolog_DEPENDENCIES) 
-	@rm -f slurm_prolog$(EXEEXT)
-	$(slurm_prolog_LINK) $(slurm_prolog_OBJECTS) $(slurm_prolog_LDADD) $(LIBS)
+task_cgroup.la: $(task_cgroup_la_OBJECTS) $(task_cgroup_la_DEPENDENCIES) 
+	$(task_cgroup_la_LINK) -rpath $(pkglibdir) $(task_cgroup_la_OBJECTS) $(task_cgroup_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -587,25 +417,10 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_block_info.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_place.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_run.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_record_functions.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_switch_connections.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_sys.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bluegene.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/defined_block.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dynamic_block.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobinfo.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsched_if64.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nodeinfo.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_bluegene.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sfree.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_epilog.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_prolog.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/state_test.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_cgroup_la-task_cgroup.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_cgroup_la-task_cgroup_cpuset.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_cgroup_la-task_cgroup_devices.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_cgroup_la-task_cgroup_memory.Plo@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -628,19 +443,33 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
-bridge_linker.o: ../block_allocator/bridge_linker.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bridge_linker.o -MD -MP -MF $(DEPDIR)/bridge_linker.Tpo -c -o bridge_linker.o `test -f '../block_allocator/bridge_linker.c' || echo '$(srcdir)/'`../block_allocator/bridge_linker.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/bridge_linker.Tpo $(DEPDIR)/bridge_linker.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='../block_allocator/bridge_linker.c' object='bridge_linker.o' libtool=no @AMDEPBACKSLASH@
+task_cgroup_la-task_cgroup.lo: task_cgroup.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_cgroup_la-task_cgroup.lo -MD -MP -MF $(DEPDIR)/task_cgroup_la-task_cgroup.Tpo -c -o task_cgroup_la-task_cgroup.lo `test -f 'task_cgroup.c' || echo '$(srcdir)/'`task_cgroup.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_cgroup_la-task_cgroup.Tpo $(DEPDIR)/task_cgroup_la-task_cgroup.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='task_cgroup.c' object='task_cgroup_la-task_cgroup.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_cgroup_la-task_cgroup.lo `test -f 'task_cgroup.c' || echo '$(srcdir)/'`task_cgroup.c
+
+task_cgroup_la-task_cgroup_cpuset.lo: task_cgroup_cpuset.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_cgroup_la-task_cgroup_cpuset.lo -MD -MP -MF $(DEPDIR)/task_cgroup_la-task_cgroup_cpuset.Tpo -c -o task_cgroup_la-task_cgroup_cpuset.lo `test -f 'task_cgroup_cpuset.c' || echo '$(srcdir)/'`task_cgroup_cpuset.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_cgroup_la-task_cgroup_cpuset.Tpo $(DEPDIR)/task_cgroup_la-task_cgroup_cpuset.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='task_cgroup_cpuset.c' object='task_cgroup_la-task_cgroup_cpuset.lo' libtool=yes @AMDEPBACKSLASH@
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bridge_linker.o `test -f '../block_allocator/bridge_linker.c' || echo '$(srcdir)/'`../block_allocator/bridge_linker.c
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_cgroup_la-task_cgroup_cpuset.lo `test -f 'task_cgroup_cpuset.c' || echo '$(srcdir)/'`task_cgroup_cpuset.c
 
-bridge_linker.obj: ../block_allocator/bridge_linker.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT bridge_linker.obj -MD -MP -MF $(DEPDIR)/bridge_linker.Tpo -c -o bridge_linker.obj `if test -f '../block_allocator/bridge_linker.c'; then $(CYGPATH_W) '../block_allocator/bridge_linker.c'; else $(CYGPATH_W) '$(srcdir)/../block_allocator/bridge_linker.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/bridge_linker.Tpo $(DEPDIR)/bridge_linker.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='../block_allocator/bridge_linker.c' object='bridge_linker.obj' libtool=no @AMDEPBACKSLASH@
+task_cgroup_la-task_cgroup_memory.lo: task_cgroup_memory.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_cgroup_la-task_cgroup_memory.lo -MD -MP -MF $(DEPDIR)/task_cgroup_la-task_cgroup_memory.Tpo -c -o task_cgroup_la-task_cgroup_memory.lo `test -f 'task_cgroup_memory.c' || echo '$(srcdir)/'`task_cgroup_memory.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_cgroup_la-task_cgroup_memory.Tpo $(DEPDIR)/task_cgroup_la-task_cgroup_memory.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='task_cgroup_memory.c' object='task_cgroup_la-task_cgroup_memory.lo' libtool=yes @AMDEPBACKSLASH@
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o bridge_linker.obj `if test -f '../block_allocator/bridge_linker.c'; then $(CYGPATH_W) '../block_allocator/bridge_linker.c'; else $(CYGPATH_W) '$(srcdir)/../block_allocator/bridge_linker.c'; fi`
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_cgroup_la-task_cgroup_memory.lo `test -f 'task_cgroup_memory.c' || echo '$(srcdir)/'`task_cgroup_memory.c
+
+task_cgroup_la-task_cgroup_devices.lo: task_cgroup_devices.c
+@am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_cgroup_la-task_cgroup_devices.lo -MD -MP -MF $(DEPDIR)/task_cgroup_la-task_cgroup_devices.Tpo -c -o task_cgroup_la-task_cgroup_devices.lo `test -f 'task_cgroup_devices.c' || echo '$(srcdir)/'`task_cgroup_devices.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_cgroup_la-task_cgroup_devices.Tpo $(DEPDIR)/task_cgroup_la-task_cgroup_devices.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='task_cgroup_devices.c' object='task_cgroup_la-task_cgroup_devices.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(task_cgroup_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_cgroup_la-task_cgroup_devices.lo `test -f 'task_cgroup_devices.c' || echo '$(srcdir)/'`task_cgroup_devices.c
 
 mostlyclean-libtool:
 	-rm -f *.lo
@@ -732,9 +561,9 @@ distdir: $(DISTFILES)
 	done
 check-am: all-am
 check: check-am
-all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
+all-am: Makefile $(LTLIBRARIES)
 installdirs:
-	for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(sbindir)"; do \
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
 	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
 	done
 install: install-am
@@ -765,7 +594,7 @@ maintainer-clean-generic:
 clean: clean-am
 
 clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
-	clean-sbinPROGRAMS mostlyclean-am
+	mostlyclean-am
 
 distclean: distclean-am
 	-rm -rf ./$(DEPDIR)
@@ -791,7 +620,7 @@ install-dvi: install-dvi-am
 
 install-dvi-am:
 
-install-exec-am: install-pkglibLTLIBRARIES install-sbinPROGRAMS
+install-exec-am: install-pkglibLTLIBRARIES
 
 install-html: install-html-am
 
@@ -831,30 +660,24 @@ ps: ps-am
 
 ps-am:
 
-uninstall-am: uninstall-pkglibLTLIBRARIES uninstall-sbinPROGRAMS
+uninstall-am: uninstall-pkglibLTLIBRARIES
 
 .MAKE: install-am install-strip
 
 .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-pkglibLTLIBRARIES clean-sbinPROGRAMS ctags \
-	distclean distclean-compile distclean-generic \
-	distclean-libtool distclean-tags distdir dvi dvi-am html \
-	html-am info info-am install install-am install-data \
-	install-data-am install-dvi install-dvi-am install-exec \
-	install-exec-am install-html install-html-am install-info \
-	install-info-am install-man install-pdf install-pdf-am \
-	install-pkglibLTLIBRARIES install-ps install-ps-am \
-	install-sbinPROGRAMS install-strip installcheck \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
 	installcheck-am installdirs maintainer-clean \
 	maintainer-clean-generic mostlyclean mostlyclean-compile \
 	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES \
-	uninstall-sbinPROGRAMS
-
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
 
-@BLUEGENE_LOADED_TRUE@force:
-@BLUEGENE_LOADED_TRUE@$(select_bluegene_la_LIBADD) $(sfree_LDADD) : force
-@BLUEGENE_LOADED_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/src/plugins/task/cgroup/task_cgroup.c b/src/plugins/task/cgroup/task_cgroup.c
new file mode 100644
index 000000000..3bea7f401
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup.c
@@ -0,0 +1,319 @@
+/*****************************************************************************\
+ *  task_cgroup.c - Library for task pre-launch and post_termination functions
+ *	            for containment using linux cgroup subsystems
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <signal.h>
+#include <sys/types.h>
+
+#include "slurm/slurm_errno.h"
+#include "src/common/slurm_xlator.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+#include "src/slurmd/slurmd/slurmd.h"
+#include "src/common/xcgroup.h"
+#include "src/common/xstring.h"
+#include "src/common/xcgroup_read_config.h"
+
+#include "task_cgroup.h"
+#include "task_cgroup_cpuset.h"
+#include "task_cgroup_memory.h"
+#include "task_cgroup_devices.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "task" for task control) and <method> is a description
+ * of how this plugin satisfies that application.  SLURM will only load
+ * a task plugin if the plugin_type string has a prefix of "task/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as this API matures.
+ */
+const char plugin_name[]        = "Tasks containment using linux cgroup";
+const char plugin_type[]        = "task/cgroup";
+const uint32_t plugin_version   = 100;
+
+static bool use_cpuset  = false;
+static bool use_memory  = false;
+static bool use_devices = false;
+
+static slurm_cgroup_conf_t slurm_cgroup_conf;
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ *	are called.  Put global initialization here.
+ */
+extern int init (void)
+{
+
+	/* read cgroup configuration */
+	if (read_slurm_cgroup_conf(&slurm_cgroup_conf))
+		return SLURM_ERROR;
+
+	/* enable subsystems based on conf */
+	if (slurm_cgroup_conf.constrain_cores) {
+		use_cpuset = true;
+		task_cgroup_cpuset_init(&slurm_cgroup_conf);
+		debug("%s: now constraining jobs allocated cores",
+		      plugin_type);
+	}
+
+	if (slurm_cgroup_conf.constrain_ram_space ||
+	     slurm_cgroup_conf.constrain_swap_space) {
+		use_memory = true;
+		task_cgroup_memory_init(&slurm_cgroup_conf);
+		debug("%s: now constraining jobs allocated memory",
+		      plugin_type);
+	}
+
+	if (slurm_cgroup_conf.constrain_devices) {
+		use_devices = true;
+		task_cgroup_devices_init(&slurm_cgroup_conf);
+		debug("%s: now constraining jobs allocated devices",
+		      plugin_type);
+	}
+
+	verbose("%s: loaded", plugin_type);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is removed. Clear any allocated
+ *	storage here.
+ */
+extern int fini (void)
+{
+
+	if (use_cpuset) {
+		task_cgroup_cpuset_fini(&slurm_cgroup_conf);
+	}
+	if (use_memory) {
+		task_cgroup_memory_fini(&slurm_cgroup_conf);
+	}
+	if (use_devices) {
+		task_cgroup_devices_fini(&slurm_cgroup_conf);
+	}
+
+	/* unload configuration */
+	free_slurm_cgroup_conf(&slurm_cgroup_conf);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_batch_request()
+ */
+extern int task_slurmd_batch_request (uint32_t job_id,
+				      batch_job_launch_msg_t *req)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_launch_request()
+ */
+extern int task_slurmd_launch_request (uint32_t job_id,
+				       launch_tasks_request_msg_t *req,
+				       uint32_t node_id)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_reserve_resources()
+ */
+extern int task_slurmd_reserve_resources (uint32_t job_id,
+					  launch_tasks_request_msg_t *req,
+					  uint32_t node_id)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_suspend_job()
+ */
+extern int task_slurmd_suspend_job (uint32_t job_id)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_resume_job()
+ */
+extern int task_slurmd_resume_job (uint32_t job_id)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_slurmd_release_resources()
+ */
+extern int task_slurmd_release_resources (uint32_t job_id)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_pre_setuid() is called before setting the UID for the
+ * user to launch his jobs. Use this to create the CPUSET directory
+ * and set the owner appropriately.
+ */
+extern int task_pre_setuid (slurmd_job_t *job)
+{
+
+	if (use_cpuset) {
+		/* we create the cpuset container as we are still root */
+		task_cgroup_cpuset_create(job);
+	}
+
+	if (use_memory) {
+		/* we create the memory container as we are still root */
+		task_cgroup_memory_create(job);
+	}
+
+	if (use_devices) {
+		task_cgroup_devices_create(job);
+		/* here we should create the devices container as we are root */
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_pre_launch() is called prior to exec of application task.
+ *	It is followed by TaskProlog program (from slurm.conf) and
+ *	--task-prolog (from srun command line).
+ */
+extern int task_pre_launch (slurmd_job_t *job)
+{
+
+	if (use_cpuset) {
+		/* attach the task ? not necessary but in case of future mods */
+		task_cgroup_cpuset_attach_task(job);
+
+		/* set affinity if requested */
+		if (slurm_cgroup_conf.task_affinity)
+			task_cgroup_cpuset_set_task_affinity(job);
+	}
+
+	if (use_memory) {
+		/* attach the task ? not necessary but in case of future mods */
+		task_cgroup_memory_attach_task(job);
+	}
+
+	if (use_devices) {
+		task_cgroup_devices_attach_task(job);
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_term() is called after termination of application task.
+ *	It is preceded by --task-epilog (from srun command line)
+ *	followed by TaskEpilog program (from slurm.conf).
+ */
+extern int task_post_term (slurmd_job_t *job)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * task_post_step() is called after termination of the step
+ * (all the task)
+ */
+extern int task_post_step (slurmd_job_t *job)
+{
+	fini();
+	return SLURM_SUCCESS;
+}
+
+extern char* task_cgroup_create_slurm_cg (xcgroup_ns_t* ns) {
+
+	/* we do it here as we do not have access to the conf structure */
+	/* in libslurm (src/common/xcgroup.c) */
+	xcgroup_t slurm_cg;
+	char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);
+#ifdef MULTIPLE_SLURMD
+	if ( conf->node_name != NULL )
+		xstrsubstitute(pre,"%n", conf->node_name);
+	else {
+		xfree(pre);
+		pre = (char*) xstrdup("/slurm");
+	}
+#endif
+
+	/* create slurm cgroup in the ns (it could already exist) */
+	if (xcgroup_create(ns,&slurm_cg,pre,
+			   getuid(), getgid()) != XCGROUP_SUCCESS) {
+		xfree(pre);
+		return pre;
+	}
+	if (xcgroup_instanciate(&slurm_cg) != XCGROUP_SUCCESS) {
+		error("unable to build slurm cgroup for ns %s: %m",
+		      ns->subsystems);
+		xcgroup_destroy(&slurm_cg);
+		xfree(pre);
+		return pre;
+	}
+	else {
+		debug3("slurm cgroup %s successfully created for ns %s: %m",
+		       pre,ns->subsystems);
+		xcgroup_destroy(&slurm_cg);
+	}
+
+	return pre;
+}
diff --git a/src/plugins/task/cgroup/task_cgroup.h b/src/plugins/task/cgroup/task_cgroup.h
new file mode 100644
index 000000000..a65d3a4f2
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup.h
@@ -0,0 +1,46 @@
+/*****************************************************************************\
+ *  task_cgroup.h - cgroup common primitives for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#ifndef _TASK_CGROUP_H_
+#define _TASK_CGROUP_H_
+
+extern char* task_cgroup_create_slurm_cg (xcgroup_ns_t* ns);
+
+#endif
diff --git a/src/plugins/task/cgroup/task_cgroup_cpuset.c b/src/plugins/task/cgroup/task_cgroup_cpuset.c
new file mode 100644
index 000000000..78df78aea
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_cpuset.c
@@ -0,0 +1,749 @@
+/***************************************************************************** \
+ *  task_cgroup_cpuset.c - cpuset cgroup subsystem for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <sys/types.h>
+
+#include "slurm/slurm_errno.h"
+#include "slurm/slurm.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+#include "src/slurmd/slurmd/slurmd.h"
+
+#include "src/common/xstring.h"
+#include "src/common/xcgroup_read_config.h"
+#include "src/common/xcgroup.h"
+#include "src/common/xcpuinfo.h"
+
+#include "task_cgroup.h"
+
+#ifdef HAVE_HWLOC
+#include <hwloc.h>
+#include <hwloc/glibc-sched.h>
+#endif
+
+#ifndef PATH_MAX
+#define PATH_MAX 256
+#endif
+
+static char user_cgroup_path[PATH_MAX];
+static char job_cgroup_path[PATH_MAX];
+static char jobstep_cgroup_path[PATH_MAX];
+
+static xcgroup_ns_t cpuset_ns;
+
+static xcgroup_t user_cpuset_cg;
+static xcgroup_t job_cpuset_cg;
+static xcgroup_t step_cpuset_cg;
+
+static int _xcgroup_cpuset_init(xcgroup_t* cg);
+
+extern int task_cgroup_cpuset_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+	char release_agent_path[PATH_MAX];
+
+	/* initialize cpuinfo internal data */
+	if (xcpuinfo_init() != XCPUINFO_SUCCESS) {
+		return SLURM_ERROR;
+	}
+
+	/* initialize user/job/jobstep cgroup relative paths */
+	user_cgroup_path[0]='\0';
+	job_cgroup_path[0]='\0';
+	jobstep_cgroup_path[0]='\0';
+
+	/* initialize cpuset cgroup namespace */
+	release_agent_path[0]='\0';
+	if (snprintf(release_agent_path,PATH_MAX,"%s/release_cpuset",
+		      slurm_cgroup_conf->cgroup_release_agent) >= PATH_MAX) {
+		error("task/cgroup: unable to build cpuset release agent path");
+		goto error;
+	}
+	if (xcgroup_ns_create(slurm_cgroup_conf, &cpuset_ns, "/cpuset", "",
+			       "cpuset",release_agent_path) !=
+	     XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to create cpuset namespace");
+		goto error;
+	}
+
+	/* check that cpuset cgroup namespace is available */
+	if (! xcgroup_ns_is_available(&cpuset_ns)) {
+		if (slurm_cgroup_conf->cgroup_automount) {
+			if (xcgroup_ns_mount(&cpuset_ns)) {
+				error("task/cgroup: unable to mount cpuset "
+				      "namespace");
+				goto clean;
+			}
+			info("task/cgroup: cpuset namespace is now mounted");
+		} else {
+			error("task/cgroup: cpuset namespace not mounted. "
+			      "aborting");
+			goto clean;
+		}
+	}
+
+	return SLURM_SUCCESS;
+
+clean:
+	xcgroup_ns_destroy(&cpuset_ns);
+
+error:
+	xcpuinfo_fini();
+	return SLURM_ERROR;
+}
+
+extern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+
+	if (user_cgroup_path[0] != '\0')
+		xcgroup_destroy(&user_cpuset_cg);
+	if (job_cgroup_path[0] != '\0')
+		xcgroup_destroy(&job_cpuset_cg);
+	if (jobstep_cgroup_path[0] != '\0')
+		xcgroup_destroy(&step_cpuset_cg);
+
+	user_cgroup_path[0]='\0';
+	job_cgroup_path[0]='\0';
+	jobstep_cgroup_path[0]='\0';
+
+	xcgroup_ns_destroy(&cpuset_ns);
+
+	xcpuinfo_fini();
+	return SLURM_SUCCESS;
+}
+
+extern int task_cgroup_cpuset_create(slurmd_job_t *job)
+{
+	int rc;
+	int fstatus = SLURM_ERROR;
+
+	xcgroup_t cpuset_cg;
+
+	uint32_t jobid = job->jobid;
+	uint32_t stepid = job->stepid;
+	uid_t uid = job->uid;
+	uid_t gid = job->gid;
+	char* user_alloc_cores = NULL;
+	char* job_alloc_cores = NULL;
+	char* step_alloc_cores = NULL;
+
+	char* cpus = NULL;
+	size_t cpus_size;
+
+	char* slurm_cgpath ;
+	xcgroup_t slurm_cg;
+
+	/* create slurm root cg in this cg namespace */
+	slurm_cgpath = task_cgroup_create_slurm_cg(&cpuset_ns);
+	if ( slurm_cgpath == NULL ) {
+		return SLURM_ERROR;
+	}
+
+	/* check that this cgroup has cpus allowed or initialize them */
+	if (xcgroup_load(&cpuset_ns,&slurm_cg,slurm_cgpath)
+	    != XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to load slurm cpuset xcgroup");
+		xfree(slurm_cgpath);
+		return SLURM_ERROR;
+	}
+	rc = xcgroup_get_param(&slurm_cg,"cpuset.cpus",&cpus,&cpus_size);
+	if (rc != XCGROUP_SUCCESS || cpus_size == 1) {
+		/* initialize the cpusets as it was inexistant */
+		if (_xcgroup_cpuset_init(&slurm_cg) !=
+		    XCGROUP_SUCCESS) {
+			xfree(slurm_cgpath);
+			xcgroup_destroy(&slurm_cg);
+			return SLURM_ERROR;
+		}
+	}
+	xfree(cpus);
+
+	/* build user cgroup relative path if not set (should not be) */
+	if (*user_cgroup_path == '\0') {
+		if (snprintf(user_cgroup_path, PATH_MAX,
+			     "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) {
+			error("unable to build uid %u cgroup relative "
+			      "path : %m", uid);
+			xfree(slurm_cgpath);
+			return SLURM_ERROR;
+		}
+	}
+	xfree(slurm_cgpath);
+
+	/* build job cgroup relative path if no set (should not be) */
+	if (*job_cgroup_path == '\0') {
+		if (snprintf(job_cgroup_path,PATH_MAX,"%s/job_%u",
+			      user_cgroup_path,jobid) >= PATH_MAX) {
+			error("task/cgroup: unable to build job %u cpuset "
+			      "cg relative path : %m",jobid);
+			return SLURM_ERROR;
+		}
+	}
+
+	/* build job step cgroup relative path (should not be) */
+	if (*jobstep_cgroup_path == '\0') {
+		if (stepid == NO_VAL) {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX,
+				     "%s/step_batch", job_cgroup_path)
+			    >= PATH_MAX) {
+				error("task/cgroup: unable to build job step"
+				      " %u.batch cpuset cg relative path: %m",
+				      jobid);
+				return SLURM_ERROR;
+			}
+		} else {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u",
+				     job_cgroup_path, stepid) >= PATH_MAX) {
+				error("task/cgroup: unable to build job step"
+				      " %u.%u cpuset cg relative path: %m",
+				      jobid, stepid);
+				return SLURM_ERROR;
+			}
+		}
+	}
+
+	/*
+	 * create cpuset root cg and lock it
+	 *
+	 * we will keep the lock until the end to avoid the effect of a release
+	 * agent that would remove an existing cgroup hierarchy while we are
+	 * setting it up. As soon as the step cgroup is created, we can release
+	 * the lock.
+	 * Indeed, consecutive slurm steps could result in cg being removed
+	 * between the next EEXIST instanciation and the first addition of
+	 * a task. The release_agent will have to lock the root cpuset cgroup
+	 * to avoid this scenario.
+	 */
+	if (xcgroup_create(&cpuset_ns,&cpuset_cg,"",0,0) != XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to create root cpuset xcgroup");
+		return SLURM_ERROR;
+	}
+	if (xcgroup_lock(&cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&cpuset_cg);
+		error("task/cgroup: unable to lock root cpuset cg");
+		return SLURM_ERROR;
+	}
+
+	/*
+	 * build job and job steps allocated cores lists
+	 */
+	debug("task/cgroup: job abstract cores are '%s'",
+	      job->job_alloc_cores);
+	debug("task/cgroup: step abstract cores are '%s'",
+	      job->step_alloc_cores);
+	if (xcpuinfo_abs_to_mac(job->job_alloc_cores,
+				 &job_alloc_cores) != XCPUINFO_SUCCESS) {
+		error("task/cgroup: unable to build job physical cores");
+		goto error;
+	}
+	if (xcpuinfo_abs_to_mac(job->step_alloc_cores,
+				 &step_alloc_cores) != XCPUINFO_SUCCESS) {
+		error("task/cgroup: unable to build step physical cores");
+		goto error;
+	}
+	debug("task/cgroup: job physical cores are '%s'",
+	      job->job_alloc_cores);
+	debug("task/cgroup: step physical cores are '%s'",
+	      job->step_alloc_cores);
+
+	/*
+	 * create user cgroup in the cpuset ns (it could already exist)
+	 */
+	if (xcgroup_create(&cpuset_ns,&user_cpuset_cg,
+			    user_cgroup_path,
+			    getuid(),getgid()) != XCGROUP_SUCCESS) {
+		goto error;
+	}
+	if (xcgroup_instanciate(&user_cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		goto error;
+	}
+
+	/*
+	 * check that user's cpuset cgroup is consistant and add the job cores
+	 */
+	rc = xcgroup_get_param(&user_cpuset_cg,"cpuset.cpus",&cpus,&cpus_size);
+	if (rc != XCGROUP_SUCCESS || cpus_size == 1) {
+		/* initialize the cpusets as it was inexistant */
+		if (_xcgroup_cpuset_init(&user_cpuset_cg) !=
+		     XCGROUP_SUCCESS) {
+			xcgroup_delete(&user_cpuset_cg);
+			xcgroup_destroy(&user_cpuset_cg);
+			goto error;
+		}
+	}
+	user_alloc_cores = xstrdup(job_alloc_cores);
+	if (cpus != NULL && cpus_size > 1) {
+		cpus[cpus_size-1]='\0';
+		xstrcat(user_alloc_cores,",");
+		xstrcat(user_alloc_cores,cpus);
+	}
+	xcgroup_set_param(&user_cpuset_cg,"cpuset.cpus",user_alloc_cores);
+	xfree(cpus);
+
+	/*
+	 * create job cgroup in the cpuset ns (it could already exist)
+	 */
+	if (xcgroup_create(&cpuset_ns,&job_cpuset_cg,
+			    job_cgroup_path,
+			    getuid(),getgid()) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		goto error;
+	}
+	if (xcgroup_instanciate(&job_cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		xcgroup_destroy(&job_cpuset_cg);
+		goto error;
+	}
+	if (_xcgroup_cpuset_init(&job_cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		xcgroup_destroy(&job_cpuset_cg);
+		goto error;
+	}
+	xcgroup_set_param(&job_cpuset_cg,"cpuset.cpus",job_alloc_cores);
+
+	/*
+	 * create step cgroup in the cpuset ns (it should not exists)
+	 * use job's user uid/gid to enable tasks cgroups creation by
+	 * the user inside the step cgroup owned by root
+	 */
+	if (xcgroup_create(&cpuset_ns,&step_cpuset_cg,
+			    jobstep_cgroup_path,
+			    uid,gid) != XCGROUP_SUCCESS) {
+		/* do not delete user/job cgroup as */
+		/* they can exist for other steps */
+		xcgroup_destroy(&user_cpuset_cg);
+		xcgroup_destroy(&job_cpuset_cg);
+		goto error;
+	}
+	if (xcgroup_instanciate(&step_cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		xcgroup_destroy(&job_cpuset_cg);
+		xcgroup_destroy(&step_cpuset_cg);
+		goto error;
+	}
+	if (_xcgroup_cpuset_init(&step_cpuset_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_cpuset_cg);
+		xcgroup_destroy(&job_cpuset_cg);
+		xcgroup_delete(&step_cpuset_cg);
+		xcgroup_destroy(&step_cpuset_cg);
+		goto error;
+	}
+	xcgroup_set_param(&step_cpuset_cg,"cpuset.cpus",step_alloc_cores);
+
+	/* attach the slurmstepd to the step cpuset cgroup */
+	pid_t pid = getpid();
+	rc = xcgroup_add_pids(&step_cpuset_cg,&pid,1);
+	if (rc != XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to add slurmstepd to cpuset cg '%s'",
+		      step_cpuset_cg.path);
+		fstatus = SLURM_ERROR;
+	} else
+		fstatus = SLURM_SUCCESS;
+
+error:
+	xcgroup_unlock(&cpuset_cg);
+	xcgroup_destroy(&cpuset_cg);
+
+	xfree(user_alloc_cores);
+	xfree(job_alloc_cores);
+	xfree(step_alloc_cores);
+
+	return fstatus;
+}
+
+extern int task_cgroup_cpuset_attach_task(slurmd_job_t *job)
+{
+	int fstatus = SLURM_ERROR;
+
+	/* tasks are automatically attached as slurmstepd is in the step cg */
+	fstatus = SLURM_SUCCESS;
+
+	return fstatus;
+}
+
+/* affinity should be set using sched_setaffinity to not force */
+/* user to have to play with the cgroup hierarchy to modify it */
+extern int task_cgroup_cpuset_set_task_affinity(slurmd_job_t *job)
+{
+	int fstatus = SLURM_ERROR;
+
+#ifndef HAVE_HWLOC
+
+	error("task/cgroup: plugin not compiled with hwloc support, "
+	      "skipping affinity.");
+	return fstatus;
+
+#else
+	uint32_t i;
+	uint32_t nldoms;
+	uint32_t nsockets;
+	uint32_t ncores;
+	uint32_t npus;
+	uint32_t nobj;
+
+	uint32_t pfirst,plast;
+	uint32_t taskid = job->envtp->localid;
+	uint32_t jntasks = job->node_tasks;
+	uint32_t jnpus = jntasks * job->cpus_per_task;
+	pid_t    pid = job->envtp->task_pid;
+
+	cpu_bind_type_t bind_type;
+	int verbose;
+
+	hwloc_topology_t topology;
+#if HWLOC_API_VERSION <= 0x00010000
+	hwloc_cpuset_t cpuset,ct;
+#else
+	hwloc_bitmap_t cpuset,ct;
+#endif
+	hwloc_obj_t obj;
+	struct hwloc_obj *pobj;
+	hwloc_obj_type_t hwtype;
+	hwloc_obj_type_t req_hwtype;
+	int hwdepth;
+
+	size_t tssize;
+	cpu_set_t ts;
+
+	bind_type = job->cpu_bind_type ;
+	if (conf->task_plugin_param & CPU_BIND_VERBOSE ||
+	    bind_type & CPU_BIND_VERBOSE)
+		verbose = 1 ;
+
+	if (bind_type & CPU_BIND_NONE) {
+		if (verbose)
+			info("task/cgroup: task[%u] is requesting no affinity",
+			     taskid);
+		return 0;
+	} else if (bind_type & CPU_BIND_TO_THREADS) {
+		if (verbose)
+			info("task/cgroup: task[%u] is requesting "
+			     "thread level binding",taskid);
+		req_hwtype = HWLOC_OBJ_PU;
+	} else if (bind_type & CPU_BIND_TO_CORES) {
+		if (verbose)
+			info("task/cgroup: task[%u] is requesting "
+			     "core level binding",taskid);
+		req_hwtype = HWLOC_OBJ_CORE;
+	} else if (bind_type & CPU_BIND_TO_SOCKETS) {
+		if (verbose)
+			info("task/cgroup: task[%u] is requesting "
+			     "socket level binding",taskid);
+		req_hwtype = HWLOC_OBJ_SOCKET;
+	} else if (bind_type & CPU_BIND_TO_LDOMS) {
+		if (verbose)
+			info("task/cgroup: task[%u] is requesting "
+			     "ldom level binding",taskid);
+		req_hwtype = HWLOC_OBJ_NODE;
+	} else {
+		if (verbose)
+			info("task/cgroup: task[%u] using core level binding"
+			     " by default",taskid);
+		req_hwtype = HWLOC_OBJ_CORE;
+	}
+
+	/* Allocate and initialize hwloc objects */
+	hwloc_topology_init(&topology);
+#if HWLOC_API_VERSION <= 0x00010000
+	cpuset = hwloc_cpuset_alloc() ;
+#else
+	cpuset = hwloc_bitmap_alloc() ;
+#endif
+
+	/*
+	 * Perform the topology detection. It will only get allowed PUs.
+	 * Detect in the same time the granularity to use for binding.
+	 * The granularity can be relaxed from threads to cores if enough
+	 * cores are available as with hyperthread support, ntasks-per-core
+	 * param can let us have access to more threads per core for each
+	 * task
+	 * Revert back to machine granularity if no finer-grained granularity
+	 * matching the request is found. This will result in no affinity
+	 * applied.
+	 * The detected granularity will be used to find where to best place
+	 * the task, then the cpu_bind option will be used to relax the
+	 * affinity constraint and use more PUs. (i.e. use a core granularity
+	 * to dispatch the tasks across the sockets and then provide access
+	 * to each task to the cores of its socket.)
+	 */
+	hwloc_topology_load(topology);
+	npus = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						   HWLOC_OBJ_PU);
+	ncores = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						     HWLOC_OBJ_CORE);
+	nsockets = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						       HWLOC_OBJ_SOCKET);
+	nldoms = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						     HWLOC_OBJ_NODE);
+	hwtype = HWLOC_OBJ_MACHINE;
+	nobj = 1;
+	if (npus >= jnpus || bind_type & CPU_BIND_TO_THREADS) {
+		hwtype = HWLOC_OBJ_PU;
+		nobj = npus;
+	}
+	if (ncores >= jnpus || bind_type & CPU_BIND_TO_CORES) {
+		hwtype = HWLOC_OBJ_CORE;
+		nobj = ncores;
+	}
+	if (nsockets >= jntasks &&
+	     bind_type & CPU_BIND_TO_SOCKETS) {
+		hwtype = HWLOC_OBJ_SOCKET;
+		nobj = nsockets;
+	}
+	/*
+	 * HWLOC returns all the NUMA nodes available regardless of the
+	 * number of underlying sockets available (regardless of the allowed
+	 * resources). So there is no guarantee that each ldom will be populated
+	 * with usable sockets. So add a simple check that at least ensure that
+	 * we have as many sockets as ldoms before moving to ldoms granularity
+	 */
+	if (nldoms >= jntasks &&
+	     nsockets >= nldoms &&
+	     bind_type & CPU_BIND_TO_LDOMS) {
+		hwtype = HWLOC_OBJ_NODE;
+		nobj = nldoms;
+	}
+
+	/*
+	 * Perform a block binding on the detected object respecting the
+	 * granularity.
+	 * If not enough objects to do the job, revert to no affinity mode
+	 */
+	if (hwloc_compare_types(hwtype,HWLOC_OBJ_MACHINE) == 0) {
+
+		info("task/cgroup: task[%u] disabling affinity because of %s "
+		     "granularity",taskid,hwloc_obj_type_string(hwtype));
+
+	} else if (hwloc_compare_types(hwtype,HWLOC_OBJ_CORE) >= 0 &&
+		    jnpus > nobj) {
+
+		info("task/cgroup: task[%u] not enough %s objects, disabling "
+		     "affinity",taskid,hwloc_obj_type_string(hwtype));
+
+	} else {
+
+		if (verbose) {
+			info("task/cgroup: task[%u] using %s granularity",
+			     taskid,hwloc_obj_type_string(hwtype));
+		}
+		if (hwloc_compare_types(hwtype,HWLOC_OBJ_CORE) >= 0) {
+			/* cores or threads granularity */
+			pfirst = taskid *  job->cpus_per_task ;
+			plast = pfirst + job->cpus_per_task - 1;
+		} else {
+			/* sockets or ldoms granularity */
+			pfirst = taskid;
+			plast = pfirst;
+		}
+
+		hwdepth = hwloc_get_type_depth(topology,hwtype);
+		for (i = pfirst; i <= plast && i < nobj ; i++) {
+			obj = hwloc_get_obj_by_depth(topology,hwdepth,(int)i);
+
+			/* if requested binding overlap the granularity */
+			/* use the ancestor cpuset instead of the object one */
+			if (hwloc_compare_types(hwtype,req_hwtype) > 0) {
+
+				/* Get the parent object of req_hwtype or the */
+				/* one just above if not found (meaning of >0)*/
+				/* (useful for ldoms binding with !NUMA nodes)*/
+				pobj = obj->parent;
+				while (pobj != NULL &&
+					hwloc_compare_types(pobj->type,
+							    req_hwtype) > 0)
+					pobj = pobj->parent;
+
+				if (pobj != NULL) {
+					if (verbose)
+						info("task/cgroup: task[%u] "
+						     "higher level %s found",
+						     taskid,
+						     hwloc_obj_type_string(
+							     pobj->type));
+#if HWLOC_API_VERSION <= 0x00010000
+					ct = hwloc_cpuset_dup(pobj->
+							      allowed_cpuset);
+					hwloc_cpuset_or(cpuset,cpuset,ct);
+					hwloc_cpuset_free(ct);
+#else
+					ct = hwloc_bitmap_dup(pobj->
+							      allowed_cpuset);
+					hwloc_bitmap_or(cpuset,cpuset,ct);
+					hwloc_bitmap_free(ct);
+#endif
+				} else {
+					/* should not be executed */
+					if (verbose)
+						info("task/cgroup: task[%u] "
+						     "no higher level found",
+						     taskid);
+#if HWLOC_API_VERSION <= 0x00010000
+					ct = hwloc_cpuset_dup(obj->
+							      allowed_cpuset);
+					hwloc_cpuset_or(cpuset,cpuset,ct);
+					hwloc_cpuset_free(ct);
+#else
+					ct = hwloc_bitmap_dup(obj->
+							      allowed_cpuset);
+					hwloc_bitmap_or(cpuset,cpuset,ct);
+					hwloc_bitmap_free(ct);
+#endif
+				}
+
+			} else {
+#if HWLOC_API_VERSION <= 0x00010000
+				ct = hwloc_cpuset_dup(obj->allowed_cpuset);
+				hwloc_cpuset_or(cpuset,cpuset,ct);
+				hwloc_cpuset_free(ct);
+#else
+				ct = hwloc_bitmap_dup(obj->allowed_cpuset);
+				hwloc_bitmap_or(cpuset,cpuset,ct);
+				hwloc_bitmap_free(ct);
+#endif
+			}
+		}
+
+		char *str;
+#if HWLOC_API_VERSION <= 0x00010000
+		hwloc_cpuset_asprintf(&str,cpuset);
+#else
+		hwloc_bitmap_asprintf(&str,cpuset);
+#endif
+		tssize = sizeof(cpu_set_t);
+		if (hwloc_cpuset_to_glibc_sched_affinity(topology,cpuset,
+							  &ts,tssize) == 0) {
+			fstatus = SLURM_SUCCESS;
+			if (sched_setaffinity(pid,tssize,&ts)) {
+				error("task/cgroup: task[%u] unable to set "
+				      "taskset '%s'",taskid,str);
+				fstatus = SLURM_ERROR;
+			} else if (verbose) {
+				info("task/cgroup: task[%u] taskset '%s' is set"
+				     ,taskid,str);
+			}
+		} else {
+			error("task/cgroup: task[%u] unable to build "
+			      "taskset '%s'",taskid,str);
+			fstatus = SLURM_ERROR;
+		}
+		free(str);
+
+	}
+
+	/* Destroy hwloc objects */
+#if HWLOC_API_VERSION <= 0x00010000
+	hwloc_cpuset_free(cpuset);
+#else
+	hwloc_bitmap_free(cpuset);
+#endif
+	hwloc_topology_destroy(topology);
+
+	return fstatus;
+#endif
+
+}
+
+
+/* when cgroups are configured with cpuset, at least
+ * cpuset.cpus and cpuset.mems must be set or the cgroup
+ * will not be available at all.
+ * we duplicate the ancestor configuration in the init step */
+static int _xcgroup_cpuset_init(xcgroup_t* cg)
+{
+	int fstatus,i;
+
+	char* cpuset_metafiles[] = {
+		"cpuset.cpus",
+		"cpuset.mems"
+	};
+	char* cpuset_meta;
+	char* cpuset_conf;
+	size_t csize;
+
+	xcgroup_t acg;
+	char* acg_name;
+	char* p;
+
+	fstatus = XCGROUP_ERROR;
+
+	/* load ancestor cg */
+	acg_name = (char*) xstrdup(cg->name);
+	p = rindex(acg_name,'/');
+	if (p == NULL) {
+		debug2("task/cgroup: unable to get ancestor path for "
+		       "cpuset cg '%s' : %m",cg->path);
+		return fstatus;
+	} else
+		*p = '\0';
+	if (xcgroup_load(cg->ns,&acg,acg_name) != XCGROUP_SUCCESS) {
+		debug2("task/cgroup: unable to load ancestor for "
+		       "cpuset cg '%s' : %m",cg->path);
+		return fstatus;
+	}
+
+	/* inherits ancestor params */
+	for (i = 0 ; i < 2 ; i++) {
+		cpuset_meta = cpuset_metafiles[i];
+		if (xcgroup_get_param(&acg,cpuset_meta,
+				       &cpuset_conf,&csize)
+		     != XCGROUP_SUCCESS) {
+			debug2("task/cgroup: assuming no cpuset cg "
+			       "support for '%s'",acg.path);
+			xcgroup_destroy(&acg);
+			return fstatus;
+		}
+		if (csize > 0)
+			cpuset_conf[csize-1]='\0';
+		if (xcgroup_set_param(cg,cpuset_meta,cpuset_conf)
+		     != XCGROUP_SUCCESS) {
+			debug2("task/cgroup: unable to write %s configuration "
+			       "(%s) for cpuset cg '%s'",cpuset_meta,
+			       cpuset_conf,cg->path);
+			xcgroup_destroy(&acg);
+			xfree(cpuset_conf);
+			return fstatus;
+		}
+		xfree(cpuset_conf);
+	}
+
+	xcgroup_destroy(&acg);
+	return XCGROUP_SUCCESS;
+}
diff --git a/src/plugins/task/cgroup/task_cgroup_cpuset.h b/src/plugins/task/cgroup/task_cgroup_cpuset.h
new file mode 100644
index 000000000..a2452b55c
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_cpuset.h
@@ -0,0 +1,61 @@
+/*****************************************************************************\
+ *  task_cgroup_cpuset.h - cpuset cgroup subsystem primitives for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#ifndef _TASK_CGROUP_CPUSET_H_
+#define _TASK_CGROUP_CPUSET_H_
+
+#include "src/common/xcgroup_read_config.h"
+
+/* initialize cpuset subsystem of task/cgroup */
+extern int task_cgroup_cpuset_init(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* release cpuset subsystem resources */
+extern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* create user/job/jobstep cpuset cgroups */
+extern int task_cgroup_cpuset_create(slurmd_job_t *job);
+
+/* create a task cgroup and attach the task to it */
+extern int task_cgroup_cpuset_attach_task(slurmd_job_t *job);
+
+/* set a task affinity based on its local id and job information */
+extern int task_cgroup_cpuset_set_task_affinity(slurmd_job_t *job);
+
+#endif
diff --git a/src/plugins/task/cgroup/task_cgroup_devices.c b/src/plugins/task/cgroup/task_cgroup_devices.c
new file mode 100644
index 000000000..6a39f87ea
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_devices.c
@@ -0,0 +1,509 @@
+/***************************************************************************** \
+ *  task_cgroup_devices.c - devices cgroup subsystem for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2011 BULL
+ *  Written by Yiannis Georgiou <yiannis.georgiou@bull.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <glob.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <slurm/slurm_errno.h>
+#include <slurm/slurm.h>
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+#include "src/slurmd/slurmd/slurmd.h"
+
+#include "src/common/xstring.h"
+#include "src/common/xcgroup_read_config.h"
+#include "src/common/xcgroup.h"
+#include "src/common/xcpuinfo.h"
+
+#include "src/common/gres.h"
+#include "src/common/list.h"
+
+#include "task_cgroup.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 256
+#endif
+
+static char user_cgroup_path[PATH_MAX];
+static char job_cgroup_path[PATH_MAX];
+static char jobstep_cgroup_path[PATH_MAX];
+static char cgroup_allowed_devices_file[PATH_MAX];
+
+static xcgroup_ns_t devices_ns;
+
+static xcgroup_t user_devices_cg;
+static xcgroup_t job_devices_cg;
+static xcgroup_t step_devices_cg;
+
+static void _calc_device_major(char *dev_path[PATH_MAX],
+			       char *dev_major[PATH_MAX],
+			       int lines);
+
+static int read_allowed_devices_file(char *allowed_devices[PATH_MAX]);
+
+extern int task_cgroup_devices_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+	char release_agent_path[PATH_MAX];
+
+	/* initialize cpuinfo internal data */
+	if ( xcpuinfo_init() != XCPUINFO_SUCCESS )
+		return SLURM_ERROR;
+
+	/* initialize user/job/jobstep cgroup relative paths */
+	user_cgroup_path[0] = '\0';
+	job_cgroup_path[0] = '\0';
+	jobstep_cgroup_path[0] = '\0';
+	/* initialize devices cgroup namespace */
+	release_agent_path[0] = '\0';
+	/* initialize allowed_devices_filename */
+	cgroup_allowed_devices_file[0] = '\0';
+
+	strcpy(cgroup_allowed_devices_file, slurm_cgroup_conf->allowed_devices_file);
+	
+	if ( snprintf(release_agent_path,PATH_MAX,"%s/release_devices",
+		      slurm_cgroup_conf->cgroup_release_agent) >= PATH_MAX ) {
+		error("task/cgroup: unable to build devices release agent path");
+		goto error;
+	}
+	if (xcgroup_ns_create(slurm_cgroup_conf, &devices_ns, "/devices","",
+			       "devices",release_agent_path) != 
+	     XCGROUP_SUCCESS ) {
+		error("task/cgroup: unable to create devices namespace");
+		goto error;
+	}
+
+	/* check that devices cgroup namespace is available */
+	if ( ! xcgroup_ns_is_available(&devices_ns) ) {
+		if ( slurm_cgroup_conf->cgroup_automount ) {
+			if ( xcgroup_ns_mount(&devices_ns) ) {
+				error("task/cgroup: unable to mount devices "
+				      "namespace");
+				goto clean;
+			}
+			info("task/cgroup: devices namespace is now mounted");
+		}
+		else {
+			error("task/cgroup: devices namespace not mounted. "
+			      "aborting");
+			goto clean;
+		}
+	}
+
+	return SLURM_SUCCESS;
+
+clean:
+	xcgroup_ns_destroy(&devices_ns);
+
+error:
+	xcpuinfo_fini();
+	return SLURM_ERROR;
+}
+
+extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+
+	if ( user_cgroup_path[0] != '\0' )
+		xcgroup_destroy(&user_devices_cg);
+	if ( job_cgroup_path[0] != '\0' )
+		xcgroup_destroy(&job_devices_cg);
+	if ( jobstep_cgroup_path[0] != '\0' )
+		xcgroup_destroy(&step_devices_cg);
+		
+	user_cgroup_path[0] = '\0';
+	job_cgroup_path[0] = '\0';
+	jobstep_cgroup_path[0] = '\0';
+
+	cgroup_allowed_devices_file[0] = '\0';
+
+	xcgroup_ns_destroy(&devices_ns);
+	
+	xcpuinfo_fini();
+	return SLURM_SUCCESS;
+}
+
+extern int task_cgroup_devices_create(slurmd_job_t *job)
+{
+	int f, k, rc, gres_conf_lines, allow_lines;
+	int fstatus = SLURM_ERROR;
+	char *gres_name[PATH_MAX];
+	char *gres_cgroup[PATH_MAX], *dev_path[PATH_MAX]; 
+	char *allowed_devices[PATH_MAX], *allowed_dev_major[PATH_MAX];
+	
+	int *gres_bit_alloc = NULL;
+	int *gres_step_bit_alloc = NULL;
+	int *gres_count = NULL;
+
+	xcgroup_t devices_cg;
+	uint32_t jobid = job->jobid;
+	uint32_t stepid = job->stepid;
+	uid_t uid = job->uid;
+	uid_t gid = job->gid;
+
+	List job_gres_list = job->job_gres_list;
+	List step_gres_list = job->step_gres_list;
+
+	char* slurm_cgpath ;
+
+	/* create slurm root cg in this cg namespace */
+	slurm_cgpath = task_cgroup_create_slurm_cg(&devices_ns);
+	if ( slurm_cgpath == NULL ) {
+		return SLURM_ERROR;
+	}
+
+	/* build user cgroup relative path if not set (should not be) */
+	if (*user_cgroup_path == '\0') {
+		if (snprintf(user_cgroup_path, PATH_MAX,
+			     "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) {
+			error("unable to build uid %u cgroup relative "
+			      "path : %m", uid);
+			xfree(slurm_cgpath);
+			return SLURM_ERROR;
+		}
+	}
+	xfree(slurm_cgpath);
+
+	/* build job cgroup relative path if no set (should not be) */
+	if ( *job_cgroup_path == '\0' ) {
+		if ( snprintf(job_cgroup_path,PATH_MAX, "%s/job_%u",
+			      user_cgroup_path,jobid) >= PATH_MAX ) {
+			error("task/cgroup: unable to build job %u devices "
+			      "cg relative path : %m", jobid);
+			return SLURM_ERROR;
+		}
+	}
+	
+	/* build job step cgroup relative path (should not be) */
+	if ( *jobstep_cgroup_path == '\0' ) {
+		if ( snprintf(jobstep_cgroup_path,PATH_MAX, "%s/step_%u",
+			      job_cgroup_path,stepid) >= PATH_MAX ) {
+			error("task/cgroup: unable to build job step %u "
+			      "devices cg relative path : %m",stepid);
+			return SLURM_ERROR;
+		}
+	}
+
+	/*
+	 * create devices root cg and lock it
+	 *
+	 * we will keep the lock until the end to avoid the effect of a release
+	 * agent that would remove an existing cgroup hierarchy while we are 
+	 * setting it up. As soon as the step cgroup is created, we can release
+	 * the lock.
+	 * Indeed, consecutive slurm steps could result in cg being removed 
+	 * between the next EEXIST instanciation and the first addition of 
+	 * a task. The release_agent will have to lock the root devices cgroup
+	 * to avoid this scenario.
+	 */
+	if ( xcgroup_create(&devices_ns, &devices_cg, "", 0, 0) !=
+	     XCGROUP_SUCCESS ) {
+		error("task/cgroup: unable to create root devices xcgroup");
+		return SLURM_ERROR;
+	}
+	if ( xcgroup_lock(&devices_cg) != XCGROUP_SUCCESS ) {
+		xcgroup_destroy(&devices_cg);
+		error("task/cgroup: unable to lock root devices cg");
+		return SLURM_ERROR;
+	}
+
+	info("task/cgroup: manage devices jor job '%u'",jobid);
+
+	 /* 
+	  * collect info concerning the gres.conf file 
+	  * the gres devices paths and the gres names
+	  */
+	gres_conf_lines = gres_plugin_node_config_devices_path(dev_path,
+							       gres_name,
+							       PATH_MAX);	
+
+	/* 
+	 * create the entry for cgroup devices subsystem with major minor
+	 */
+	_calc_device_major(dev_path,gres_cgroup,gres_conf_lines);
+
+	allow_lines = read_allowed_devices_file(allowed_devices);
+
+	/* 
+         * create the entry with major minor for the default allowed devices
+         * read from the file
+         */                      
+	_calc_device_major(allowed_devices,allowed_dev_major,allow_lines);
+
+	gres_count = xmalloc ( sizeof (int) * (gres_conf_lines) );
+
+	/* 
+	 * calculate the number of gres.conf records for each gres name
+	 *
+	 */			
+	f = 0;
+	gres_count[f] = 1;
+	for (k = 0; k < gres_conf_lines; k++) {
+		if ((k+1 < gres_conf_lines) &&
+		    (strcmp(gres_name[k],gres_name[k+1]) == 0))
+			gres_count[f]++;
+		if ((k+1 < gres_conf_lines) &&
+		    (strcmp(gres_name[k],gres_name[k+1]) != 0)) {
+			f++;
+			gres_count[f] = 1;
+		}
+	}
+
+	/* 
+	 * create user cgroup in the devices ns (it could already exist)
+	 */
+	if ( xcgroup_create(&devices_ns,&user_devices_cg,
+			    user_cgroup_path,
+			    getuid(),getgid()) != XCGROUP_SUCCESS ) {
+		goto error;
+	}
+	if ( xcgroup_instanciate(&user_devices_cg) != XCGROUP_SUCCESS ) {
+		xcgroup_destroy(&user_devices_cg);
+		goto error;
+	}
+
+
+	/* TODO
+	 * check that user's devices cgroup is consistant and allow the
+	 * appropriate devices
+	 */
+
+
+	/*
+	 * create job cgroup in the devices ns (it could already exist)
+	 */
+	if ( xcgroup_create(&devices_ns,&job_devices_cg,
+			    job_cgroup_path,
+			    getuid(), getgid()) != XCGROUP_SUCCESS ) {
+		xcgroup_destroy(&user_devices_cg);
+		goto error;
+	}
+	if ( xcgroup_instanciate(&job_devices_cg) != XCGROUP_SUCCESS ) {
+		xcgroup_destroy(&user_devices_cg);
+		xcgroup_destroy(&job_devices_cg);
+		goto error;
+	}
+
+	gres_bit_alloc = xmalloc ( sizeof (int) * (gres_conf_lines + 1));
+	
+	/* fetch information concerning the gres devices allocation for the job */
+	gres_plugin_job_state_file(job_gres_list, gres_bit_alloc, gres_count);
+
+	/* 
+	 * with the current cgroup devices subsystem design (whitelist only supported)
+	 * we need to allow all different devices that are supposed to be allowed by 
+	 * default. 	 
+	 */     
+	for (k = 0; k < allow_lines; k++) {
+		info("Default access allowed to device %s", allowed_dev_major[k]);
+		xcgroup_set_param(&job_devices_cg,"devices.allow",
+			allowed_dev_major[k]);
+	}
+
+	/* 
+         * allow or deny access to devices according to gres permissions for the job       
+         */			
+	for (k = 0; k < gres_conf_lines; k++) {
+		if (gres_bit_alloc[k] == 1) {
+			info("Allowing access to device %s", gres_cgroup[k]);
+			xcgroup_set_param(&job_devices_cg, "devices.allow",
+                                          gres_cgroup[k]);
+		} else {
+			info("Not allowing access to device %s", gres_cgroup[k]);
+			xcgroup_set_param(&job_devices_cg, "devices.deny",
+					  gres_cgroup[k]);
+		}
+	}
+
+	/* 
+	 * create step cgroup in the devices ns (it should not exists)
+	 * use job's user uid/gid to enable tasks cgroups creation by
+	 * the user inside the step cgroup owned by root
+	 */
+	if ( xcgroup_create(&devices_ns,&step_devices_cg,
+			    jobstep_cgroup_path,
+			    uid,gid) != XCGROUP_SUCCESS ) {
+		/* do not delete user/job cgroup as */
+		/* they can exist for other steps */
+		xcgroup_destroy(&user_devices_cg);
+		xcgroup_destroy(&job_devices_cg);
+		goto error;
+	}
+	if ( xcgroup_instanciate(&step_devices_cg) != XCGROUP_SUCCESS ) {
+		xcgroup_destroy(&user_devices_cg);
+		xcgroup_destroy(&job_devices_cg);
+		xcgroup_destroy(&step_devices_cg);
+		goto error;
+	}
+
+	
+	gres_step_bit_alloc = xmalloc ( sizeof (int) * (gres_conf_lines + 1));
+
+	/* fetch information concerning the gres devices allocation for the step */
+	gres_plugin_step_state_file(step_gres_list, gres_step_bit_alloc,
+				    gres_count);
+
+	
+	/* 
+         * with the current cgroup devices subsystem design (whitelist only supported)
+         * we need to allow all different devices that are supposed to be allowed by 
+         * default.      
+         */
+	for (k = 0; k < allow_lines; k++) {    
+		info("Default access allowed to device %s", allowed_dev_major[k]);
+                xcgroup_set_param(&step_devices_cg,"devices.allow",
+			allowed_dev_major[k]);
+        }
+
+	/* 
+     	 * allow or deny access to devices according to gres permissions for the step
+         */
+	for (k = 0; k < gres_conf_lines; k++) {
+		if (gres_step_bit_alloc[k] == 1){
+			info("Allowing access to device %s for step",
+			     gres_cgroup[k]);
+			xcgroup_set_param(&step_devices_cg, "devices.allow",
+                                          gres_cgroup[k]);
+		} else {
+			info("Not allowing access to device %s for step",
+			     gres_cgroup[k]);
+			xcgroup_set_param(&step_devices_cg, "devices.deny",
+					  gres_cgroup[k]);
+		}
+	}
+	
+	/* attach the slurmstepd to the step devices cgroup */
+	pid_t pid = getpid();
+	rc = xcgroup_add_pids(&step_devices_cg,&pid,1);
+	if ( rc != XCGROUP_SUCCESS ) {
+		error("task/cgroup: unable to add slurmstepd to devices cg '%s'",
+		      step_devices_cg.path);
+		fstatus = SLURM_ERROR;
+	} else {
+		fstatus = SLURM_SUCCESS;
+	}
+
+error:
+	xcgroup_unlock(&devices_cg);
+	xcgroup_destroy(&devices_cg);
+
+	xfree(gres_step_bit_alloc);
+	xfree(gres_bit_alloc);
+	return fstatus;
+}
+
+extern int task_cgroup_devices_attach_task(slurmd_job_t *job)
+{
+	int fstatus = SLURM_ERROR;
+
+	/* tasks are automatically attached as slurmstepd is in the step cg */
+	fstatus = SLURM_SUCCESS;
+
+	return fstatus;
+}
+
+static void _calc_device_major(char *dev_path[PATH_MAX],
+				char *dev_major[PATH_MAX],
+				int lines)
+{
+
+	int k, major, minor;
+	char str1[256], str2[256];
+	struct stat fs;
+
+	if (lines > PATH_MAX) {
+		error("task/cgroup: more devices configured than table size "
+		      "(%d > %d)", lines, PATH_MAX);
+		lines = PATH_MAX;
+	}
+	for (k = 0; k < lines; k++) {
+		stat(dev_path[k], &fs);		
+		major = (int)major(fs.st_rdev);
+		minor = (int)minor(fs.st_rdev);
+		debug3("device : %s major %d, minor %d\n", 
+			dev_path[k], major, minor);
+		if (S_ISBLK(fs.st_mode)) {
+			sprintf(str1, "b %d:", major);
+			//info("device is block ");
+		}
+		if (S_ISCHR(fs.st_mode)) {
+			sprintf(str1, "c %d:", major);
+			//info("device is character ");
+		}
+		sprintf(str2, "%d rwm", minor);
+		strcat(str1, str2);
+		dev_major[k] = xstrdup((char *)str1);		
+	}
+}
+
+
+static int read_allowed_devices_file(char **allowed_devices)
+{
+	
+	FILE *file = fopen (cgroup_allowed_devices_file, "r" );
+	int i, l, num_lines = 0;
+	char line[256];
+	glob_t globbuf;
+
+	for( i=0; i<256; i++ )
+		line[i] = '\0';
+
+	if ( file != NULL ){
+		while ( fgets ( line, sizeof line, file ) != NULL ){
+			line[strlen(line)-1] = '\0';
+			
+			/* global pattern matching and return the list of matches*/
+			if(glob(line, GLOB_NOSORT, NULL, &globbuf) != 0){
+				debug3("Device %s does not exist", line);	
+			}else{
+				for(l=0; l < globbuf.gl_pathc; l++){
+					allowed_devices[num_lines] =
+						xstrdup(globbuf.gl_pathv[l]);
+					num_lines++;
+				}
+			}
+		}
+		fclose ( file );
+	}
+	else
+		perror (cgroup_allowed_devices_file);
+
+	return num_lines;
+}
+
diff --git a/src/plugins/task/cgroup/task_cgroup_devices.h b/src/plugins/task/cgroup/task_cgroup_devices.h
new file mode 100644
index 000000000..f69a3d809
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_devices.h
@@ -0,0 +1,58 @@
+/*****************************************************************************\
+ *  task_cgroup_devices.h - devices cgroup subsystem primitives for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#ifndef _TASK_CGROUP_DEVICES_H_
+#define _TASK_CGROUP_DEVICES_H_
+
+#include "src/common/xcgroup_read_config.h"
+
+/* initialize devices subsystem of task/cgroup */
+extern int task_cgroup_devices_init(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* release devices subsystem resources */
+extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* create user/job/jobstep devices cgroups */
+extern int task_cgroup_devices_create(slurmd_job_t *job);
+
+/* create a task cgroup and attach the task to it */
+extern int task_cgroup_devices_attach_task(slurmd_job_t *job);
+
+#endif
diff --git a/src/plugins/task/cgroup/task_cgroup_memory.c b/src/plugins/task/cgroup/task_cgroup_memory.c
new file mode 100644
index 000000000..a7e0b0de7
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_memory.c
@@ -0,0 +1,424 @@
+/***************************************************************************** \
+ *  task_cgroup_memory.c - memory cgroup subsystem for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <stdlib.h>		/* getenv     */
+
+#include "slurm/slurm_errno.h"
+#include "slurm/slurm.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+#include "src/slurmd/slurmd/slurmd.h"
+
+#include "src/common/xstring.h"
+#include "src/common/xcgroup_read_config.h"
+#include "src/common/xcgroup.h"
+
+#include "task_cgroup.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 256
+#endif
+
+extern slurmd_conf_t *conf;
+
+static char user_cgroup_path[PATH_MAX];
+static char job_cgroup_path[PATH_MAX];
+static char jobstep_cgroup_path[PATH_MAX];
+
+static xcgroup_ns_t memory_ns;
+
+static xcgroup_t user_memory_cg;
+static xcgroup_t job_memory_cg;
+static xcgroup_t step_memory_cg;
+
+static float allowed_ram_space;   /* Allowed RAM in percent       */
+static float allowed_swap_space;  /* Allowed Swap percent         */
+
+static uint64_t max_ram;        /* Upper bound for memory.limit_in_bytes  */
+static uint64_t max_swap;       /* Upper bound for swap                   */
+static uint64_t totalram;       /* Total real memory available on node    */
+static uint64_t min_ram_space;  /* Don't constrain RAM below this value       */
+
+static uint64_t percent_in_bytes (uint64_t mb, float percent)
+{
+	return ((mb * 1024 * 1024) * (percent / 100.0));
+}
+
+extern int task_cgroup_memory_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+	char release_agent_path[PATH_MAX];
+
+	/* initialize user/job/jobstep cgroup relative paths */
+	user_cgroup_path[0]='\0';
+	job_cgroup_path[0]='\0';
+	jobstep_cgroup_path[0]='\0';
+
+	/* initialize memory cgroup namespace */
+	release_agent_path[0]='\0';
+	if (snprintf(release_agent_path,PATH_MAX,"%s/release_memory",
+		      slurm_cgroup_conf->cgroup_release_agent) >= PATH_MAX) {
+		error("task/cgroup: unable to build memory release agent path");
+		goto error;
+	}
+	if (xcgroup_ns_create(slurm_cgroup_conf, &memory_ns, "/memory", "",
+			       "memory",release_agent_path) !=
+	     XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to create memory namespace");
+		goto error;
+	}
+
+	/* check that memory cgroup namespace is available */
+	if (! xcgroup_ns_is_available(&memory_ns)) {
+		if (slurm_cgroup_conf->cgroup_automount) {
+			if (xcgroup_ns_mount(&memory_ns)) {
+				error("task/cgroup: unable to mount memory "
+				      "namespace");
+				goto clean;
+			}
+			info("task/cgroup: memory namespace is now mounted");
+		} else {
+			error("task/cgroup: memory namespace not mounted. "
+			      "aborting");
+			goto clean;
+		}
+	}
+
+	allowed_ram_space = slurm_cgroup_conf->allowed_ram_space;
+	allowed_swap_space = slurm_cgroup_conf->allowed_swap_space;
+
+	if ((totalram = (uint64_t) conf->real_memory_size) == 0)
+		error ("task/cgroup: Unable to get RealMemory size");
+
+	max_ram = percent_in_bytes(totalram, slurm_cgroup_conf->max_ram_percent);
+	max_swap = percent_in_bytes(totalram, slurm_cgroup_conf->max_swap_percent);
+	max_swap += max_ram;
+	min_ram_space = slurm_cgroup_conf->min_ram_space * 1024 * 1024;
+
+	debug ("task/cgroup/memory: total:%luM allowed:%.4g%%, swap:%.4g%%, "
+	      "max:%.4g%%(%luM) max+swap:%.4g%%(%luM) min:%uM",
+	      (unsigned long) totalram,
+	      allowed_ram_space,
+	      allowed_swap_space,
+	      slurm_cgroup_conf->max_ram_percent,
+	      (unsigned long) (max_ram/(1024*1024)),
+	      slurm_cgroup_conf->max_swap_percent,
+	      (unsigned long) (max_swap/(1024*1024)),
+	      (unsigned) slurm_cgroup_conf->min_ram_space);
+
+        /*
+         *  Warning: OOM Killer must be disabled for slurmstepd
+         *  or it would be destroyed if the application use
+         *  more memory than permitted
+         *
+         *  If an env value is already set for slurmstepd
+         *  OOM killer behavior, keep it, otherwise set the
+         *  -17 value, wich means do not let OOM killer kill it
+         *
+         *  FYI, setting "export SLURMSTEPD_OOM_ADJ=-17"
+         *  in /etc/sysconfig/slurm would be the same
+         */
+        setenv("SLURMSTEPD_OOM_ADJ","-17",0);
+
+	return SLURM_SUCCESS;
+
+clean:
+	xcgroup_ns_destroy(&memory_ns);
+
+error:
+	return SLURM_ERROR;
+}
+
+extern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
+{
+	xcgroup_t memory_cg;
+
+	if (user_cgroup_path[0] == '\0' ||
+	     job_cgroup_path[0] == '\0' ||
+	     jobstep_cgroup_path[0] == '\0')
+		return SLURM_SUCCESS;
+
+	/*
+	 * Move the slurmstepd back to the root memory cg and force empty
+	 * the step cgroup to move its allocated pages to its parent.
+	 * The release_agent will asynchroneously be called for the step
+	 * cgroup. It will do the necessary cleanup.
+	 * It should be good if this force_empty mech could be done directly
+	 * by the memcg implementation at the end of the last task managed
+	 * by a cgroup. It is too difficult and near impossible to handle
+	 * that cleanup correctly with current memcg.
+	 */
+	if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) {
+		xcgroup_set_uint32_param(&memory_cg,"tasks",getpid());
+		xcgroup_destroy(&memory_cg);
+		xcgroup_set_param(&step_memory_cg,"memory.force_empty","1");
+	}
+
+	xcgroup_destroy(&user_memory_cg);
+	xcgroup_destroy(&job_memory_cg);
+	xcgroup_destroy(&step_memory_cg);
+
+	user_cgroup_path[0]='\0';
+	job_cgroup_path[0]='\0';
+	jobstep_cgroup_path[0]='\0';
+
+	xcgroup_ns_destroy(&memory_ns);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ *  Return configured memory limit in bytes given a memory limit in MB.
+ */
+static uint64_t mem_limit_in_bytes (uint64_t mem)
+{
+	/* 
+	 *  If mem == 0 then assume there was no SLURM limit imposed
+	 *   on the amount of memory for job or step. Use the total
+	 *   amount of available RAM instead.
+	 */
+	if (mem == 0)
+		mem = totalram * 1024 * 1024;
+	else
+		mem = percent_in_bytes (mem, allowed_ram_space);
+	if (mem < min_ram_space)
+		return (min_ram_space);
+	if (mem > max_ram)
+		return (max_ram);
+	return (mem);
+}
+
+/*
+ *  Return configured swap limit in bytes given a memory limit in MB.
+ *
+ *   Swap limit is calculated as:
+ *
+ *     mem_limit_in_bytes + (configured_swap_percent * allocated_mem_in_bytes)
+ */
+static uint64_t swap_limit_in_bytes (uint64_t mem)
+{
+	uint64_t swap;
+	/*
+	 *  If mem == 0 assume "unlimited" and use totalram.
+	 */
+	swap = percent_in_bytes (mem ? mem : totalram, allowed_swap_space);
+	mem = mem_limit_in_bytes (mem) + swap;
+	if (mem < min_ram_space)
+		return (min_ram_space);
+	if (mem > max_swap)
+		return (max_swap);
+	return (mem);
+}
+
+static int memcg_initialize (xcgroup_ns_t *ns, xcgroup_t *cg,
+		char *path, uint64_t mem_limit, uid_t uid, gid_t gid)
+{
+	uint64_t mlb = mem_limit_in_bytes (mem_limit);
+	uint64_t mls = swap_limit_in_bytes  (mem_limit);
+
+	if (xcgroup_create (ns, cg, path, uid, gid) != XCGROUP_SUCCESS)
+		return -1;
+
+	if (xcgroup_instanciate (cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy (cg);
+		return -1;
+	}
+
+	xcgroup_set_param (cg, "memory.use_hierarchy","1");
+	xcgroup_set_uint64_param (cg, "memory.limit_in_bytes", mlb);
+	xcgroup_set_uint64_param (cg, "memory.memsw.limit_in_bytes", mls);
+
+	info ("task/cgroup: %s: alloc=%luMB mem.limit=%luMB memsw.limit=%luMB",
+		path,
+		(unsigned long) mem_limit,
+		(unsigned long) mlb/(1024*1024),
+		(unsigned long) mls/(1024*1024));
+
+	return 0;
+}
+
+extern int task_cgroup_memory_create(slurmd_job_t *job)
+{
+	int rc;
+	int fstatus = SLURM_ERROR;
+
+	xcgroup_t memory_cg;
+
+	uint32_t jobid = job->jobid;
+	uint32_t stepid = job->stepid;
+	uid_t uid = job->uid;
+	gid_t gid = job->gid;
+	pid_t pid;
+
+	char* slurm_cgpath ;
+
+	/* create slurm root cg in this cg namespace */
+	slurm_cgpath = task_cgroup_create_slurm_cg(&memory_ns);
+	if ( slurm_cgpath == NULL ) {
+		return SLURM_ERROR;
+	}
+
+	/* build user cgroup relative path if not set (should not be) */
+	if (*user_cgroup_path == '\0') {
+		if (snprintf(user_cgroup_path, PATH_MAX,
+			     "%s/uid_%u", slurm_cgpath, uid) >= PATH_MAX) {
+			error("unable to build uid %u cgroup relative "
+			      "path : %m", uid);
+			xfree(slurm_cgpath);
+			return SLURM_ERROR;
+		}
+	}
+	xfree(slurm_cgpath);
+
+	/* build job cgroup relative path if no set (should not be) */
+	if (*job_cgroup_path == '\0') {
+		if (snprintf(job_cgroup_path,PATH_MAX,"%s/job_%u",
+			      user_cgroup_path,jobid) >= PATH_MAX) {
+			error("task/cgroup: unable to build job %u memory "
+			      "cg relative path : %m",jobid);
+			return SLURM_ERROR;
+		}
+	}
+
+	/* build job step cgroup relative path (should not be) */
+	if (*jobstep_cgroup_path == '\0') {
+		if (snprintf(jobstep_cgroup_path,PATH_MAX,"%s/step_%u",
+			      job_cgroup_path,stepid) >= PATH_MAX) {
+			error("task/cgroup: unable to build job step %u memory "
+			      "cg relative path : %m",stepid);
+			return SLURM_ERROR;
+		}
+	}
+
+	/*
+	 * create memory root cg and lock it
+	 *
+	 * we will keep the lock until the end to avoid the effect of a release
+	 * agent that would remove an existing cgroup hierarchy while we are
+	 * setting it up. As soon as the step cgroup is created, we can release
+	 * the lock.
+	 * Indeed, consecutive slurm steps could result in cg being removed
+	 * between the next EEXIST instanciation and the first addition of
+	 * a task. The release_agent will have to lock the root memory cgroup
+	 * to avoid this scenario.
+	 */
+	if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) != XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to create root memory xcgroup");
+		return SLURM_ERROR;
+	}
+	if (xcgroup_lock(&memory_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&memory_cg);
+		error("task/cgroup: unable to lock root memory cg");
+		return SLURM_ERROR;
+	}
+
+	/*
+	 * Create user cgroup in the memory ns (it could already exist)
+	 * Ask for hierarchical memory accounting starting from the user
+	 * container in order to track the memory consumption up to the
+	 * user.
+	 * We do not set any limits at this level for now. It could be
+	 * interesting to do it in the future but memcg cleanup mech
+	 * are not working well so it will be really difficult to manage
+	 * addition/removal of memory amounts at this level. (kernel 2.6.34)
+	 */
+	if (xcgroup_create(&memory_ns,&user_memory_cg,
+			    user_cgroup_path,
+			    getuid(),getgid()) != XCGROUP_SUCCESS) {
+		goto error;
+	}
+	if (xcgroup_instanciate(&user_memory_cg) != XCGROUP_SUCCESS) {
+		xcgroup_destroy(&user_memory_cg);
+		goto error;
+	}
+	xcgroup_set_param(&user_memory_cg,"memory.use_hierarchy","1");
+
+	/*
+	 * Create job cgroup in the memory ns (it could already exist)
+	 * and set the associated memory limits.
+	 * Ask for hierarchical memory accounting starting from the job
+	 * container in order to guarantee that a job will stay on track
+	 * regardless of the consumption of each step.
+	 */
+	if (memcg_initialize (&memory_ns, &job_memory_cg, job_cgroup_path,
+	                      job->job_mem, getuid(), getgid()) < 0) {
+		xcgroup_destroy (&user_memory_cg);
+		goto error;
+	}
+
+	/*
+	 * Create step cgroup in the memory ns (it should not exists)
+	 * and set the associated memory limits.
+	 */
+	if (memcg_initialize (&memory_ns, &step_memory_cg, jobstep_cgroup_path,
+	                      job->step_mem, uid, gid) < 0) {
+		xcgroup_destroy(&user_memory_cg);
+		xcgroup_destroy(&job_memory_cg);
+		goto error;
+	}
+
+	/*
+	 * Attach the slurmstepd to the step memory cgroup
+	 */
+	pid = getpid();
+	rc = xcgroup_add_pids(&step_memory_cg,&pid,1);
+	if (rc != XCGROUP_SUCCESS) {
+		error("task/cgroup: unable to add slurmstepd to memory cg '%s'",
+		      step_memory_cg.path);
+		fstatus = SLURM_ERROR;
+	} else
+		fstatus = SLURM_SUCCESS;
+
+error:
+	xcgroup_unlock(&memory_cg);
+	xcgroup_destroy(&memory_cg);
+
+	return fstatus;
+}
+
+extern int task_cgroup_memory_attach_task(slurmd_job_t *job)
+{
+	int fstatus = SLURM_ERROR;
+
+	/* tasks are automatically attached as slurmstepd is in the step cg */
+	fstatus = SLURM_SUCCESS;
+
+	return fstatus;
+}
+
diff --git a/src/plugins/task/cgroup/task_cgroup_memory.h b/src/plugins/task/cgroup/task_cgroup_memory.h
new file mode 100644
index 000000000..27ab417c9
--- /dev/null
+++ b/src/plugins/task/cgroup/task_cgroup_memory.h
@@ -0,0 +1,58 @@
+/*****************************************************************************\
+ *  task_cgroup_memory.h - memory cgroup subsystem primitives for task/cgroup
+ *****************************************************************************
+ *  Copyright (C) 2009 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#ifndef _TASK_CGROUP_MEMORY_H_
+#define _TASK_CGROUP_MEMORY_H_
+
+#include "src/common/xcgroup_read_config.h"
+
+/* initialize memory subsystem of task/cgroup */
+extern int task_cgroup_memory_init(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* release memory subsystem resources */
+extern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf);
+
+/* create user/job/jobstep memory cgroups */
+extern int task_cgroup_memory_create(slurmd_job_t *job);
+
+/* create a task cgroup and attach the task to it */
+extern int task_cgroup_memory_attach_task(slurmd_job_t *job);
+
+#endif
diff --git a/src/plugins/task/none/Makefile.in b/src/plugins/task/none/Makefile.in
index 1243edbf8..04934397f 100644
--- a/src/plugins/task/none/Makefile.in
+++ b/src/plugins/task/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c
index e252e37f2..e2f3a035e 100644
--- a/src/plugins/task/none/task_none.c
+++ b/src/plugins/task/none/task_none.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
@@ -193,3 +193,11 @@ extern int task_post_term (slurmd_job_t *job)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * task_post_step() is called after termination of the step
+ * (all the task)
+ */
+extern int task_post_step (slurmd_job_t *job)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/topology/3d_torus/Makefile.in b/src/plugins/topology/3d_torus/Makefile.in
index 6a139572c..592dc2f36 100644
--- a/src/plugins/topology/3d_torus/Makefile.in
+++ b/src/plugins/topology/3d_torus/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -139,7 +141,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +181,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +239,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +275,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/topology/3d_torus/hilbert_slurm.c b/src/plugins/topology/3d_torus/hilbert_slurm.c
index 1ecf8ba10..f80984e41 100644
--- a/src/plugins/topology/3d_torus/hilbert_slurm.c
+++ b/src/plugins/topology/3d_torus/hilbert_slurm.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,27 +44,19 @@
 
 #include "src/plugins/topology/3d_torus/hilbert.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/common/node_select.h"
 
 #define _DEBUG 0
 
-static int _coord(char coord)
-{
-	if ((coord >= '0') && (coord <= '9'))
-		return (coord - '0');
-	if ((coord >= 'A') && (coord <= 'Z'))
-		return (coord - 'A' + 10);
-	return -1;
-}
-
 /* Using the node record table, generate a Hilbert integer for each node
  * based upon its coordinates and sort the records in that order. This must
  * be called once, immediately after reading the slurm.conf file. */
 extern void nodes_to_hilbert_curve(void)
 {
-	int coord_inx, i, j, k, max_coord = 0, min_inx;
-	uint32_t min_val;
+	static bool first_run = true;
+	int coord_inx, i, j, k, max_coord = 0;
 	int *coords;
-	struct node_record *node_ptr, *node_ptr2;
+	struct node_record *node_ptr;
 	coord_t hilbert[3];
 	int dims = 3;
 #ifdef HAVE_SUN_CONST
@@ -76,6 +68,12 @@ extern void nodes_to_hilbert_curve(void)
 #endif	/* SYSTEM_DIMENSIONS != 3) */
 #endif	/* !HAVE_SUN_CONST */
 
+	/* We can only re-order the nodes once at slurmctld startup.
+	 * After that time, many bitmaps are created based upon the
+	 * index of each node name in the array. */
+	if (!first_run)
+		return;
+
 	/* Get the coordinates for each node based upon its numeric suffix */
 	coords = xmalloc(sizeof(int) * node_record_count * dims);
 	for (i=0, coord_inx=0, node_ptr=node_record_table_ptr;
@@ -87,7 +85,8 @@ extern void nodes_to_hilbert_curve(void)
 		}
 		j -= offset;
 		for (k=dims; k; k--) {
-			coords[coord_inx] = _coord(node_ptr->name[j-k]);
+			coords[coord_inx] = select_char2coord(
+				node_ptr->name[j-k]);
 			if (coords[coord_inx] < 0) {
 				fatal("hostname %s lacks valid numeric suffix",
 				      node_ptr->name);
@@ -127,64 +126,4 @@ extern void nodes_to_hilbert_curve(void)
 			((hilbert[1]>>0 & 1) <<  1) +
 			((hilbert[2]>>0 & 1) <<  0);
 	}
-
-	/* Now we need to sort the node records. We only need to move a few
-	 * fields since the others were all initialized to identical values.
-	 * The fields needing to be copied are those set by the function
-	 * _build_single_nodeline_info() in src/common/read_conf.c */
-	for (i=0; i<node_record_count; i++) {
-		min_val = node_record_table_ptr[i].node_rank;
-		min_inx = i;
-		for (j=(i+1); j<node_record_count; j++) {
-			if (node_record_table_ptr[j].node_rank < min_val) {
-				min_val = node_record_table_ptr[j].node_rank;
-				min_inx = j;
-			}
-		}
-		if (min_inx != i) {	/* swap records */
-			char *tmp_str;
-			uint16_t tmp_uint16;
-			uint32_t tmp_uint32;
-
-			node_ptr =  node_record_table_ptr + i;
-			node_ptr2 = node_record_table_ptr + min_inx;
-
-			tmp_str = node_ptr->name;
-			node_ptr->name  = node_ptr2->name;
-			node_ptr2->name = tmp_str;
-
-			tmp_str = node_ptr->comm_name;
-			node_ptr->comm_name  = node_ptr2->comm_name;
-			node_ptr2->comm_name = tmp_str;
-
-			tmp_uint32 = node_ptr->node_rank;
-			node_ptr->node_rank  = node_ptr2->node_rank;
-			node_ptr2->node_rank = tmp_uint32;
-
-			tmp_str = node_ptr->features;
-			node_ptr->features  = node_ptr2->features;
-			node_ptr2->features = tmp_str;
-
-			tmp_uint16 = node_ptr->port;
-			node_ptr->port  = node_ptr2->port;
-			node_ptr2->port = tmp_uint16;
-
-			tmp_str = node_ptr->reason;
-			node_ptr->reason  = node_ptr2->reason;
-			node_ptr2->reason = tmp_str;
-
-			tmp_uint32 = node_ptr->weight;
-			node_ptr->weight  = node_ptr2->weight;
-			node_ptr2->weight = tmp_uint32;
-		}
-	}
-
-#if _DEBUG
-	/* Log the results */
-	for (i=0, node_ptr=node_record_table_ptr; i<node_record_count;
-	     i++, node_ptr++) {
-		info("%s: %u", node_ptr->name, node_ptr->node_rank);
-	}
-#endif
 }
-
diff --git a/src/plugins/topology/3d_torus/topology_3d_torus.c b/src/plugins/topology/3d_torus/topology_3d_torus.c
index 09e52d677..4f1d5102c 100644
--- a/src/plugins/topology/3d_torus/topology_3d_torus.c
+++ b/src/plugins/topology/3d_torus/topology_3d_torus.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/bitstring.h"
 #include "src/common/log.h"
 #include "src/common/node_conf.h"
@@ -79,7 +79,7 @@
  */
 const char plugin_name[]        = "topology 3d_torus plugin";
 const char plugin_type[]        = "topology/3d_torus";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = 101;
 
 extern void nodes_to_hilbert_curve(void);
 
@@ -107,19 +107,20 @@ extern int fini(void)
  *	after a system startup or reconfiguration.
  */
 extern int topo_build_config(void)
-{	static bool first_run = true;
-
-	/* We can only re-order the nodes once at slurmctld startup.
-	 * After that time, many bitmaps are created based upon the
-	 * index of each node name in the array. */
-	if (!first_run)
-		return SLURM_SUCCESS;
-	first_run = false;
+{
+	return SLURM_SUCCESS;
+}
 
-#ifndef HAVE_BG
-	nodes_to_hilbert_curve();
+/*
+ * topo_generate_node_ranking  -  populate node_rank fields
+ */
+extern bool topo_generate_node_ranking(void)
+{
+#ifdef HAVE_BG
+	return false;
 #endif
-	return SLURM_SUCCESS;
+	nodes_to_hilbert_curve();
+	return true;
 }
 
 /*
@@ -130,8 +131,10 @@ extern int topo_build_config(void)
  */
 extern int topo_get_node_addr(char* node_name, char** paddr, char** ppattern)
 {
+#ifndef HAVE_FRONT_END
 	if (find_node_record(node_name) == NULL)
 		return SLURM_ERROR;
+#endif
 
 	*paddr = xstrdup(node_name);
 	*ppattern = xstrdup("node");
diff --git a/src/plugins/topology/Makefile.in b/src/plugins/topology/Makefile.in
index ae1ee97d8..26e6592fb 100644
--- a/src/plugins/topology/Makefile.in
+++ b/src/plugins/topology/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -133,7 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -170,6 +175,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -227,6 +233,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -262,6 +269,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/topology/node_rank/Makefile.in b/src/plugins/topology/node_rank/Makefile.in
index d2b0cba2b..70c70aa43 100644
--- a/src/plugins/topology/node_rank/Makefile.in
+++ b/src/plugins/topology/node_rank/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/topology/node_rank/topology_node_rank.c b/src/plugins/topology/node_rank/topology_node_rank.c
index c740d4446..b9cabef6d 100644
--- a/src/plugins/topology/node_rank/topology_node_rank.c
+++ b/src/plugins/topology/node_rank/topology_node_rank.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,8 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
+
 #include "src/common/bitstring.h"
 #include "src/common/log.h"
 #include "src/common/slurm_topology.h"
@@ -84,7 +85,7 @@
  */
 const char plugin_name[]        = "topology node_rank plugin";
 const char plugin_type[]        = "topology/node_rank";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = 101;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -111,80 +112,17 @@ extern int fini(void)
  */
 extern int topo_build_config(void)
 {
-	static bool first_run = true;
-	struct node_record *node_ptr, *node_ptr2;
-	int i, j, min_inx;
-	uint32_t min_val;
-
-	/* We can only re-order the nodes once at slurmctld startup.
-	 * After that time, many bitmaps are created based upon the
-	 * index of each node name in the array. */
-	if (!first_run)
-		return SLURM_SUCCESS;
-	first_run = false;
-
-	/* Now we need to sort the node records. We only need to move a few
-	 * fields since the others were all initialized to identical values.
-	 * The fields needing to be copied are those set by the function
-	 * _build_single_nodeline_info() in src/common/read_conf.c */
-	for (i=0; i<node_record_count; i++) {
-		min_val = node_record_table_ptr[i].node_rank;
-		min_inx = i;
-		for (j=(i+1); j<node_record_count; j++) {
-			if (node_record_table_ptr[j].node_rank < min_val) {
-				min_val = node_record_table_ptr[j].node_rank;
-				min_inx = j;
-			}
-		}
-		if (min_inx != i) {	/* swap records */
-			char *tmp_str;
-			uint16_t tmp_uint16;
-			uint32_t tmp_uint32;
-
-			node_ptr =  node_record_table_ptr + i;
-			node_ptr2 = node_record_table_ptr + min_inx;
-
-			tmp_str = node_ptr->name;
-			node_ptr->name  = node_ptr2->name;
-			node_ptr2->name = tmp_str;
-
-			tmp_str = node_ptr->comm_name;
-			node_ptr->comm_name  = node_ptr2->comm_name;
-			node_ptr2->comm_name = tmp_str;
-
-			tmp_uint32 = node_ptr->node_rank;
-			node_ptr->node_rank  = node_ptr2->node_rank;
-			node_ptr2->node_rank = tmp_uint32;
-
-			tmp_str = node_ptr->features;
-			node_ptr->features  = node_ptr2->features;
-			node_ptr2->features = tmp_str;
-
-			tmp_uint16 = node_ptr->port;
-			node_ptr->port  = node_ptr2->port;
-			node_ptr2->port = tmp_uint16;
-
-			tmp_str = node_ptr->reason;
-			node_ptr->reason  = node_ptr2->reason;
-			node_ptr2->reason = tmp_str;
-
-			tmp_uint32 = node_ptr->weight;
-			node_ptr->weight  = node_ptr2->weight;
-			node_ptr2->weight = tmp_uint32;
-		}
-	}
-
-#if _DEBUG
-	/* Log the results */
-	for (i=0, node_ptr=node_record_table_ptr; i<node_record_count;
-	     i++, node_ptr++) {
-		info("%s: %u", node_ptr->name, node_ptr->node_rank);
-	}
-#endif
-
 	return SLURM_SUCCESS;
 }
 
+/*
+ * topo_generate_node_ranking  -  populate node_rank fields
+ */
+extern bool topo_generate_node_ranking(void)
+{
+	return false;		/* XXX nothing coded yet */
+}
+
 /*
  * topo_get_node_addr - build node address and the associated pattern
  *      based on the topology information
@@ -195,8 +133,10 @@ extern int topo_build_config(void)
  */
 extern int topo_get_node_addr(char* node_name, char** paddr, char** ppattern)
 {
+#ifndef HAVE_FRONT_END
 	if (find_node_record(node_name) == NULL)
 		return SLURM_ERROR;
+#endif
 
 	*paddr = xstrdup(node_name);
 	*ppattern = xstrdup("node");
diff --git a/src/plugins/topology/none/Makefile.in b/src/plugins/topology/none/Makefile.in
index 17785eeb1..5838ab1a7 100644
--- a/src/plugins/topology/none/Makefile.in
+++ b/src/plugins/topology/none/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/topology/none/topology_none.c b/src/plugins/topology/none/topology_none.c
index a6c77efd0..67ab6582f 100644
--- a/src/plugins/topology/none/topology_none.c
+++ b/src/plugins/topology/none/topology_none.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,7 @@
 #include <signal.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/common/node_conf.h"
 #include "src/common/xstring.h"
@@ -77,7 +77,7 @@
  */
 const char plugin_name[]        = "topology NONE plugin";
 const char plugin_type[]        = "topology/none";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = 101;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -107,6 +107,14 @@ extern int topo_build_config(void)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * topo_generate_node_ranking  -  this plugin does not set any node_rank fields
+ */
+extern bool topo_generate_node_ranking(void)
+{
+	return false;
+}
+
 /*
  * topo_get_node_addr - build node address and the associated pattern
  *      based on the topology information
@@ -115,8 +123,10 @@ extern int topo_build_config(void)
  */
 extern int topo_get_node_addr(char* node_name, char** paddr, char** ppattern)
 {
+#ifndef HAVE_FRONT_END
 	if (find_node_record(node_name) == NULL)
 		return SLURM_ERROR;
+#endif
 
 	*paddr = xstrdup(node_name);
 	*ppattern = xstrdup("node");
diff --git a/src/plugins/topology/tree/Makefile.in b/src/plugins/topology/tree/Makefile.in
index 2682ef926..d12e50563 100644
--- a/src/plugins/topology/tree/Makefile.in
+++ b/src/plugins/topology/tree/Makefile.in
@@ -63,6 +63,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,6 +74,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/plugins/topology/tree/topology_tree.c b/src/plugins/topology/tree/topology_tree.c
index 07ea2bab1..383d4729e 100644
--- a/src/plugins/topology/tree/topology_tree.c
+++ b/src/plugins/topology/tree/topology_tree.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 #include <stdlib.h>
 #include <sys/types.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 #include "src/common/bitstring.h"
 #include "src/common/log.h"
 #include "src/common/slurm_topology.h"
@@ -81,7 +81,7 @@
  */
 const char plugin_name[]        = "topology tree plugin";
 const char plugin_type[]        = "topology/tree";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = 101;
 
 typedef struct slurm_conf_switches {
 	uint32_t link_speed;	/* link speed, arbitrary units */
@@ -139,6 +139,13 @@ extern int topo_build_config(void)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * topo_generate_node_ranking  -  this plugin does not set any node_rank fields
+ */
+extern bool topo_generate_node_ranking(void)
+{
+	return false;
+}
 
 /*
  * topo_get_node_addr - build node address and the associated pattern
@@ -219,7 +226,7 @@ static void _validate_switches(void)
 {
 	slurm_conf_switches_t *ptr, **ptr_array;
 	int depth, i, j;
-	struct switch_record *switch_ptr;
+	struct switch_record *switch_ptr, *prior_ptr;
 	hostlist_t hl, invalid_hl = NULL;
 	char *child;
 	bitstr_t *multi_homed_bitmap = NULL;	/* nodes on >1 leaf switch */
@@ -242,6 +249,14 @@ static void _validate_switches(void)
 	for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
 		ptr = ptr_array[i];
 		switch_ptr->name = xstrdup(ptr->switch_name);
+		/* See if switch name has already been defined. */
+		prior_ptr = switch_record_table;
+		for (j=0; j<i; j++, prior_ptr++) {
+			if (strcmp(switch_ptr->name, prior_ptr->name) == 0) {
+				fatal("Switch (%s) has already been defined",
+				      prior_ptr->name);
+			}
+		}
 		switch_ptr->link_speed = ptr->link_speed;
 		if (ptr->nodes) {
 			switch_ptr->level = 0;	/* leaf switch */
@@ -449,7 +464,8 @@ extern int  _read_topo_file(slurm_conf_switches_t **ptr_array[])
 		topo_conf = _get_topo_conf();
 
 	conf_hashtbl = s_p_hashtbl_create(switch_options);
-	if (s_p_parse_file(conf_hashtbl, NULL, topo_conf) == SLURM_ERROR) {
+	if (s_p_parse_file(conf_hashtbl, NULL, topo_conf, false) ==
+	    SLURM_ERROR) {
 		fatal("something wrong with opening/reading %s: %m",
 		      topo_conf);
 	}
diff --git a/src/sacct/Makefile.in b/src/sacct/Makefile.in
index 8bc495557..82bdf5314 100644
--- a/src/sacct/Makefile.in
+++ b/src/sacct/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sacct/options.c b/src/sacct/options.c
index 5d3edce57..316f67d11 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacct/print.c b/src/sacct/print.c
index 398647e41..09ec1a888 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -208,7 +208,7 @@ void print_fields(type_t type, void *object)
 			default:
 				break;
 			}
-			if(tmp_dub != (double)NO_VAL)
+			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				tmp_char = _elapsed_time((long)tmp_dub, 0);
 
 			field->print_routine(field,
@@ -229,7 +229,7 @@ void print_fields(type_t type, void *object)
 			default:
 				break;
 			}
-			if(tmp_dub != (double)NO_VAL)
+			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				convert_num_unit((float)tmp_dub,
 						 outbuf, sizeof(outbuf),
 						 UNIT_KILO);
@@ -251,7 +251,7 @@ void print_fields(type_t type, void *object)
 			default:
 				break;
 			}
-			if(tmp_dub != (double)NO_VAL)
+			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				convert_num_unit((float)tmp_dub,
 						 outbuf, sizeof(outbuf),
 						 UNIT_KILO);
@@ -273,7 +273,7 @@ void print_fields(type_t type, void *object)
 			default:
 				break;
 			}
-			if(tmp_dub != (double)NO_VAL)
+			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				convert_num_unit((float)tmp_dub,
 						 outbuf, sizeof(outbuf),
 						 UNIT_KILO);
@@ -315,6 +315,21 @@ void print_fields(type_t type, void *object)
 					     tmp_char,
 					     (curr_inx == field_count));
 			break;
+		case PRINT_COMMENT:
+			switch(type) {
+			case JOB:
+				tmp_char = job->derived_es;
+				break;
+			case JOBSTEP:
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
 		case PRINT_CPU_TIME:
 			switch(type) {
 			case JOB:
@@ -374,21 +389,6 @@ void print_fields(type_t type, void *object)
 					     outbuf,
 					     (curr_inx == field_count));
 			break;
-		case PRINT_DERIVED_ES:
-			switch(type) {
-			case JOB:
-				tmp_char = job->derived_es;
-				break;
-			case JOBSTEP:
-			case JOBCOMP:
-			default:
-				tmp_char = NULL;
-				break;
-			}
-			field->print_routine(field,
-					     tmp_char,
-					     (curr_inx == field_count));
-			break;
 		case PRINT_ELAPSED:
 			switch(type) {
 			case JOB:
@@ -786,7 +786,7 @@ void print_fields(type_t type, void *object)
 			default:
 				break;
 			}
-			if(tmp_dub != (double)NO_VAL)
+			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				tmp_char = _elapsed_time((long)tmp_dub, 0);
 			field->print_routine(field,
 					     tmp_char,
@@ -1107,7 +1107,7 @@ void print_fields(type_t type, void *object)
 			}
 
 			if (((tmp_int & JOB_STATE_BASE) == JOB_CANCELLED) &&
-			    (tmp_int2 != NO_VAL))
+			    (tmp_int2 != -1))
 				snprintf(outbuf, FORMAT_STRING_SIZE,
 					 "%s by %d",
 					 job_state_string(tmp_int),
diff --git a/src/sacct/process.c b/src/sacct/process.c
index f81d28ede..899580321 100644
--- a/src/sacct/process.c
+++ b/src/sacct/process.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index 8a985d53c..769bae2ed 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -6,7 +6,7 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,10 +53,10 @@ print_field_t fields[] = {
 	{10, "AveVMSize", print_fields_str, PRINT_AVEVSIZE},
 	{16, "BlockID", print_fields_str, PRINT_BLOCKID},
 	{10, "Cluster", print_fields_str, PRINT_CLUSTER},
+	{14, "Comment", print_fields_str, PRINT_COMMENT},
 	{10, "CPUTime", print_fields_time_from_secs, PRINT_CPU_TIME},
 	{10, "CPUTimeRAW", print_fields_int, PRINT_CPU_TIME_RAW},
 	{15, "DerivedExitCode", print_fields_str, PRINT_DERIVED_EC},
-	{14, "DerivedExitStr", print_fields_str, PRINT_DERIVED_ES},
 	{10, "Elapsed", print_fields_time_from_secs, PRINT_ELAPSED},
 	{19, "Eligible", print_fields_date, PRINT_ELIGIBLE},
 	{19, "End", print_fields_date, PRINT_END},
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 870f70a95..74985e8d2 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -101,10 +101,10 @@ typedef enum {
 		PRINT_AVEVSIZE,
 		PRINT_BLOCKID,
 		PRINT_CLUSTER,
+		PRINT_COMMENT,
 		PRINT_CPU_TIME,
 		PRINT_CPU_TIME_RAW,
 		PRINT_DERIVED_EC,
-		PRINT_DERIVED_ES,
 		PRINT_ELAPSED,
 		PRINT_ELIGIBLE,
 		PRINT_END,
diff --git a/src/sacctmgr/Makefile.in b/src/sacctmgr/Makefile.in
index 054d932a5..d1c399533 100644
--- a/src/sacctmgr/Makefile.in
+++ b/src/sacctmgr/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -127,7 +129,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -164,6 +169,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -221,6 +227,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -256,6 +263,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 76a25c619..7f0a65b37 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacctmgr/archive_functions.c b/src/sacctmgr/archive_functions.c
index dcbd77b58..0dbafb0a7 100644
--- a/src/sacctmgr/archive_functions.c
+++ b/src/sacctmgr/archive_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -141,7 +141,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int command_len = 0;
-	int option = 0;
  	uint32_t tmp;
 	slurmdb_job_cond_t *job_cond = NULL;
 
@@ -159,8 +158,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -478,7 +476,7 @@ extern int sacctmgr_archive_load(int argc, char *argv[])
 	int rc = SLURM_SUCCESS;
 	slurmdb_archive_rec_t *arch_rec =
 		xmalloc(sizeof(slurmdb_archive_rec_t));
-	int i=0, command_len = 0, option = 0;
+	int i=0, command_len = 0;
 	struct stat st;
 
 	for (i=0; i<argc; i++) {
@@ -487,8 +485,7 @@ extern int sacctmgr_archive_load(int argc, char *argv[])
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index ad10cae8b..c0fd5f761 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 3f7d0b6b4..c558fb871 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -2,14 +2,14 @@
  *  cluster_functions.c - functions dealing with clusters in the
  *                        accounting system.
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int a_set = 0;
 	int end = 0;
 	int command_len = 0;
-	int option = 0;
 
 	with_deleted = 0;
 	without_limits = 0;
@@ -64,8 +63,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -927,23 +925,21 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	FILE *fd = NULL;
 	char *class_str = NULL;
 
-	for (i=0; i<argc; i++) {
+	for (i = 0; i < argc; i++) {
 		int end = parse_option_end(argv[i]);
-		int option = 0;
 
-		if(!end)
-			command_len=strlen(argv[i]);
+		if (!end)
+			command_len = strlen(argv[i]);
 		else {
-			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			command_len = end - 1;
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
-		if(!end || !strncasecmp(argv[i], "Cluster",
+		if (!end || !strncasecmp(argv[i], "Cluster",
 					 MAX(command_len, 1))) {
-			if(cluster_name) {
-				exit_code=1;
+			if (cluster_name) {
+				exit_code = 1;
 				fprintf(stderr,
 					" Can only do one cluster at a time.  "
 					"Already doing %s\n", cluster_name);
@@ -951,9 +947,9 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 			}
 			cluster_name = xstrdup(argv[i]+end);
 		} else if (!strncasecmp(argv[i], "File",
-					 MAX(command_len, 1))) {
-			if(file_name) {
-				exit_code=1;
+					MAX(command_len, 1))) {
+			if (file_name) {
+				exit_code = 1;
 				fprintf(stderr,
 					" File name already set to %s\n",
 					file_name);
@@ -961,14 +957,15 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 			}
 			file_name = xstrdup(argv[i]+end);
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown option: %s\n", argv[i]);
 		}
 	}
 
-	if(!cluster_name) {
-		exit_code=1;
+	if (!cluster_name) {
+		exit_code = 1;
 		fprintf(stderr, " We need a cluster to dump.\n");
+		xfree(file_name);
 		return SLURM_ERROR;
 	} else {
 		List temp_list = NULL;
@@ -981,30 +978,32 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 
 		temp_list = acct_storage_g_get_clusters(db_conn, my_uid,
 							&cluster_cond);
-		list_destroy(cluster_cond.cluster_list);
-		if(!temp_list) {
-			exit_code=1;
+		FREE_NULL_LIST(cluster_cond.cluster_list);
+		if (!temp_list) {
+			exit_code = 1;
 			fprintf(stderr,
 				" Problem getting clusters from database.  "
 				"Contact your admin.\n");
 			xfree(cluster_name);
+			xfree(file_name);
 			return SLURM_ERROR;
 		}
 
 		cluster_rec = list_peek(temp_list);
-		if(!cluster_rec) {
-			exit_code=1;
+		if (!cluster_rec) {
+			exit_code = 1;
 			fprintf(stderr, " Cluster %s doesn't exist.\n",
 				cluster_name);
 			xfree(cluster_name);
-			list_destroy(temp_list);
+			xfree(file_name);
+			FREE_NULL_LIST(temp_list);
 			return SLURM_ERROR;
 		}
 		class_str = get_classification_str(cluster_rec->classification);
-		list_destroy(temp_list);
+		FREE_NULL_LIST(temp_list);
 	}
 
-	if(!file_name) {
+	if (!file_name) {
 		file_name = xstrdup_printf("./%s.cfg", cluster_name);
 		printf(" No filename given, using %s.\n", file_name);
 	}
@@ -1023,38 +1022,38 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	user_cond.assoc_cond = &assoc_cond;
 
 	user_list = acct_storage_g_get_users(db_conn, my_uid, &user_cond);
-	/* If not running with the DBD this can be set which will mess
-	   other things up.
-	*/
+	/* If not running with the DBD assoc_cond.user_list can be set,
+	 * which will mess other things up.
+	 */
 	if (assoc_cond.user_list) {
-		list_destroy(assoc_cond.user_list);
+		FREE_NULL_LIST(assoc_cond.user_list);
 		assoc_cond.user_list = NULL;
 	}
 
 	/* make sure this person running is an admin */
 	user_name = uid_to_string(my_uid);
-	if(!(user = sacctmgr_find_user_from_list(user_list, user_name))) {
-		exit_code=1;
+	if (!(user = sacctmgr_find_user_from_list(user_list, user_name))) {
+		exit_code = 1;
 		fprintf(stderr, " Your uid (%u) is not in the "
 			"accounting system, can't dump cluster.\n", my_uid);
+		FREE_NULL_LIST(assoc_cond.cluster_list);
 		xfree(cluster_name);
+		xfree(file_name);
+		FREE_NULL_LIST(user_list);
 		xfree(user_name);
-		if(user_list)
-			list_destroy(user_list);
-		list_destroy(assoc_cond.cluster_list);
 		return SLURM_ERROR;
 
 	} else {
-		if(my_uid != slurm_get_slurm_user_id() && my_uid != 0
-		   && user->admin_level < SLURMDB_ADMIN_SUPER_USER) {
-			exit_code=1;
+		if (my_uid != slurm_get_slurm_user_id() && my_uid != 0
+		    && user->admin_level < SLURMDB_ADMIN_SUPER_USER) {
+			exit_code = 1;
 			fprintf(stderr, " Your user does not have sufficient "
 				"privileges to dump clusters.\n");
+			FREE_NULL_LIST(assoc_cond.cluster_list);
 			xfree(cluster_name);
-			if(user_list)
-				list_destroy(user_list);
+			xfree(file_name);
+			FREE_NULL_LIST(user_list);
 			xfree(user_name);
-			list_destroy(assoc_cond.cluster_list);
 			return SLURM_ERROR;
 		}
 	}
@@ -1063,18 +1062,20 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	/* assoc_cond is set up above */
 	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
 						     &assoc_cond);
-	list_destroy(assoc_cond.cluster_list);
-	if(!assoc_list) {
-		exit_code=1;
+	FREE_NULL_LIST(assoc_cond.cluster_list);
+	if (!assoc_list) {
+		exit_code = 1;
 		fprintf(stderr, " Problem with query.\n");
 		xfree(cluster_name);
+		xfree(file_name);
 		return SLURM_ERROR;
-	} else if(!list_count(assoc_list)) {
-		exit_code=1;
+	} else if (!list_count(assoc_list)) {
+		exit_code = 1;
 		fprintf(stderr, " Cluster %s returned nothing.\n",
 			cluster_name);
-		list_destroy(assoc_list);
+		FREE_NULL_LIST(assoc_list);
 		xfree(cluster_name);
+		xfree(file_name);
 		return SLURM_ERROR;
 	}
 
@@ -1085,60 +1086,74 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 
 	if ((fd = fopen(file_name,"w")) == NULL) {
 		fprintf(stderr, "Can't open file %s, %m\n", file_name);
-		list_destroy(assoc_list);
+		FREE_NULL_LIST(acct_list);
+		FREE_NULL_LIST(assoc_list);
 		xfree(cluster_name);
+		xfree(file_name);
+		FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 		return SLURM_ERROR;
 	}
 
 	/* Add header */
-	if(fprintf(fd,
-		   "# To edit this file start with a cluster line "
-		   "for the new cluster\n"
-		   "# Cluster - cluster_name:MaxNodesPerJob=50\n"
-		   "# Followed by Accounts you want in this fashion "
-		   "(root is created by default)...\n"
-		   "# Parent - root\n"
-		   "# Account - cs:MaxNodesPerJob=5:MaxJobs=4:"
-		   "MaxProcSecondsPerJob=20:FairShare=399:"
-		   "MaxWallDurationPerJob=40:Description='Computer Science':"
-		   "Organization='LC'\n"
-		   "# Any of the options after a ':' can be left out and "
-		   "they can be in any order.\n"
-		   "# If you want to add any sub accounts just list the "
-		   "Parent THAT HAS ALREADY \n"
-		   "# BEEN CREATED before the account line in this fashion...\n"
-		   "# Parent - cs\n"
-		   "# Account - test:MaxNodesPerJob=1:MaxJobs=1:"
-		   "MaxProcSecondsPerJob=1:FairShare=1:MaxWallDurationPerJob=1:"
-		   "Description='Test Account':Organization='Test'\n"
-		   "# To add users to a account add a line like this after a "
-		   "Parent - line\n"
-		   "# User - lipari:MaxNodesPerJob=2:MaxJobs=3:"
-		   "MaxProcSecondsPerJob=4:FairShare=1:"
-		   "MaxWallDurationPerJob=1\n") < 0) {
-		exit_code=1;
+	if (fprintf(fd,
+		    "# To edit this file start with a cluster line "
+		    "for the new cluster\n"
+		    "# Cluster - cluster_name:MaxNodesPerJob=50\n"
+		    "# Followed by Accounts you want in this fashion "
+		    "(root is created by default)...\n"
+		    "# Parent - root\n"
+		    "# Account - cs:MaxNodesPerJob=5:MaxJobs=4:"
+		    "MaxProcSecondsPerJob=20:FairShare=399:"
+		    "MaxWallDurationPerJob=40:Description='Computer Science':"
+		    "Organization='LC'\n"
+		    "# Any of the options after a ':' can be left out and "
+		    "they can be in any order.\n"
+		    "# If you want to add any sub accounts just list the "
+		    "Parent THAT HAS ALREADY \n"
+		    "# BEEN CREATED before the account line in this "
+		    "fashion...\n"
+		    "# Parent - cs\n"
+		    "# Account - test:MaxNodesPerJob=1:MaxJobs=1:"
+		    "MaxProcSecondsPerJob=1:FairShare=1:"
+		    "MaxWallDurationPerJob=1:"
+		    "Description='Test Account':Organization='Test'\n"
+		    "# To add users to a account add a line like this after a "
+		    "Parent - line\n"
+		    "# User - lipari:MaxNodesPerJob=2:MaxJobs=3:"
+		    "MaxProcSecondsPerJob=4:FairShare=1:"
+		    "MaxWallDurationPerJob=1\n") < 0) {
+		exit_code = 1;
 		fprintf(stderr, "Can't write to file");
+		FREE_NULL_LIST(acct_list);
+		FREE_NULL_LIST(assoc_list);
 		xfree(cluster_name);
+		xfree(file_name);
+		FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 		return SLURM_ERROR;
 	}
 
 	line = xstrdup_printf("Cluster - %s", cluster_name);
 
-	if(class_str)
+	if (class_str)
 		xstrfmtcat(line, ":Classification=%s", class_str);
 
 	slurmdb_hierarchical_rec = list_peek(slurmdb_hierarchical_rec_list);
 	assoc = slurmdb_hierarchical_rec->assoc;
-	if(strcmp(assoc->acct, "root"))
+	if (strcmp(assoc->acct, "root")) {
 		fprintf(stderr, "Root association not on the top it was %s\n",
 			assoc->acct);
-	else
+	} else
 		print_file_add_limits_to_line(&line, assoc);
 
-	if(fprintf(fd, "%s\n", line) < 0) {
-		exit_code=1;
+	if (fprintf(fd, "%s\n", line) < 0) {
+		exit_code = 1;
 		fprintf(stderr, " Can't write to file");
+		FREE_NULL_LIST(acct_list);
+		FREE_NULL_LIST(assoc_list);
+		xfree(cluster_name);
+		xfree(file_name);
 		xfree(line);
+		FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 		return SLURM_ERROR;
 	}
 	info("%s", line);
@@ -1147,10 +1162,11 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	print_file_slurmdb_hierarchical_rec_list(
 		fd, slurmdb_hierarchical_rec_list, user_list, acct_list);
 
+	FREE_NULL_LIST(acct_list);
+	FREE_NULL_LIST(assoc_list);
 	xfree(cluster_name);
 	xfree(file_name);
-	list_destroy(slurmdb_hierarchical_rec_list);
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 	fclose(fd);
 
 	return SLURM_SUCCESS;
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index 3379b57b8..b9c74902d 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -273,6 +273,11 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("Flags");
 		field->len = 20;
 		field->print_routine = print_fields_str;
+	} else if (!strncasecmp("GraceTime", object, MAX(command_len, 3))) {
+		field->type = PRINT_GRACE;
+		field->name = xstrdup("GraceTime");
+		field->len = 10;
+		field->print_routine = print_fields_time_from_secs;
 	} else if (!strncasecmp("GrpCPUMins", object, MAX(command_len, 7))) {
 		field->type = PRINT_GRPCM;
 		field->name = xstrdup("GrpCPUMins");
@@ -339,6 +344,12 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("MaxCPUs");
 		field->len = 8;
 		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("MaxCPUsPerUser", object,
+				MAX(command_len, 11))) {
+		field->type = PRINT_MAXCU;
+		field->name = xstrdup("MaxCPUsPerUser");
+		field->len = 14;
+		field->print_routine = print_fields_uint;
 	} else if (!strncasecmp("MaxJobs", object, MAX(command_len, 4))) {
 		field->type = PRINT_MAXJ;
 		field->name = xstrdup("MaxJobs");
@@ -350,6 +361,12 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("MaxNodes");
 		field->len = 8;
 		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("MaxNodesPerUser", object,
+				MAX(command_len, 12))) {
+		field->type = PRINT_MAXNU;
+		field->name = xstrdup("MaxNodesPerUser");
+		field->len = 15;
+		field->print_routine = print_fields_uint;
 	} else if (!strncasecmp("MaxSubmitJobs", object, MAX(command_len, 4))) {
 		field->type = PRINT_MAXS;
 		field->name = xstrdup("MaxSubmit");
@@ -566,10 +583,20 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 	slurmdb_cluster_cond_t cluster_cond;
 	int rc = SLURM_SUCCESS;
 
-	if (!assoc_cond->cluster_list ||
-	    !list_count(assoc_cond->cluster_list)) {
-		error("A cluster name is required to remove usage");
-		return SLURM_ERROR;
+	if (!assoc_cond->cluster_list)
+		assoc_cond->cluster_list = list_create(slurm_destroy_char);
+
+	if (!list_count(assoc_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if (temp) {
+			printf("No cluster specified, resetting "
+			       "on local cluster %s\n", temp);
+			list_append(assoc_cond->cluster_list, temp);
+		}
+		if (!list_count(assoc_cond->cluster_list)) {
+			error("A cluster name is required to remove usage");
+			return SLURM_ERROR;
+		}
 	}
 
 	if(!commit_check("Would you like to reset usage?")) {
@@ -1383,6 +1410,11 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 	if (qos->preempt_list && !g_qos_list)
 		g_qos_list = acct_storage_g_get_qos(db_conn, my_uid, NULL);
 
+	if (qos->grace_time != NO_VAL)
+		printf("  GraceTime       = %d\n", qos->grace_time);
+	else
+		printf("  GraceTime       = NONE\n");
+
 	if (qos->grp_cpu_mins == INFINITE)
 		printf("  GrpCPUMins     = NONE\n");
 	else if (qos->grp_cpu_mins != NO_VAL)
@@ -1430,6 +1462,11 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 	else if (qos->max_cpus_pj != NO_VAL)
 		printf("  MaxCPUs        = %u\n", qos->max_cpus_pj);
 
+	if (qos->max_cpus_pu == INFINITE)
+		printf("  MaxCPUsPerUser        = NONE\n");
+	else if (qos->max_cpus_pu != NO_VAL)
+		printf("  MaxCPUsPerUser        = %u\n", qos->max_cpus_pu);
+
 	if (qos->max_jobs_pu == INFINITE)
 		printf("  MaxJobs        = NONE\n");
 	else if (qos->max_jobs_pu != NO_VAL)
@@ -1440,6 +1477,11 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 	else if (qos->max_nodes_pj != NO_VAL)
 		printf("  MaxNodes       = %u\n", qos->max_nodes_pj);
 
+	if (qos->max_nodes_pu == INFINITE)
+		printf("  MaxNodesPerUser       = NONE\n");
+	else if (qos->max_nodes_pu != NO_VAL)
+		printf("  MaxNodesPerUser       = %u\n", qos->max_nodes_pu);
+
 	if (qos->max_submit_jobs_pu == INFINITE)
 		printf("  MaxSubmitJobs  = NONE\n");
 	else if (qos->max_submit_jobs_pu != NO_VAL)
@@ -1464,6 +1506,11 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 		}
 	}
 
+	if (qos->preempt_mode) {
+		printf("  PreemptMode    = %s\n",
+		       preempt_mode_string(qos->preempt_mode));
+	}
+
 	if (qos->priority == INFINITE)
 		printf("  Priority       = NONE\n");
 	else if (qos->priority != NO_VAL)
diff --git a/src/sacctmgr/config_functions.c b/src/sacctmgr/config_functions.c
index 0dd4c7a47..163d321d1 100644
--- a/src/sacctmgr/config_functions.c
+++ b/src/sacctmgr/config_functions.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacctmgr/event_functions.c b/src/sacctmgr/event_functions.c
index f5cbf720e..8936ff809 100644
--- a/src/sacctmgr/event_functions.c
+++ b/src/sacctmgr/event_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -276,7 +276,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int i, end = 0;
 	int set = 0;
 	int command_len = 0;
-	int option = 0;
 	int local_cluster_flag = 0;
 	int all_time_flag = 0;
 
@@ -288,8 +287,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 13ccade8f..0cf44e771 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -1151,7 +1151,6 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 static int _mod_user(sacctmgr_file_opts_t *file_opts,
 		     slurmdb_user_rec_t *user, char *cluster, char *parent)
 {
-	int rc;
 	int set = 0;
 	int changed = 0;
 	char *def_acct = NULL, *def_wckey = NULL, *my_info = NULL;
@@ -1253,9 +1252,9 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 		slurmdb_coord_rec_t *coord = NULL;
 		int first = 1;
 		notice_thread_init();
-		rc = acct_storage_g_add_coord(db_conn, my_uid,
-					      file_opts->coord_list,
-					      &user_cond);
+		(void) acct_storage_g_add_coord(db_conn, my_uid,
+					        file_opts->coord_list,
+					        &user_cond);
 		notice_thread_fini();
 
 		user->coord_accts = list_create(slurmdb_destroy_coord_rec);
@@ -1313,9 +1312,8 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 
 		if (list_count(add_list)) {
 			notice_thread_init();
-			rc = acct_storage_g_add_coord(db_conn, my_uid,
-						      add_list,
-						      &user_cond);
+			(void) acct_storage_g_add_coord(db_conn, my_uid,
+							add_list, &user_cond);
 			notice_thread_fini();
 			set = 1;
 		}
@@ -1352,8 +1350,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 		printf(" for user '%s'\n", user->name);
 		set = 1;
 		notice_thread_init();
-		rc = acct_storage_g_add_wckeys(db_conn, my_uid,
-					       user->wckey_list);
+		acct_storage_g_add_wckeys(db_conn, my_uid, user->wckey_list);
 		notice_thread_fini();
 	} else if ((user->wckey_list && list_count(user->wckey_list))
 		   && (file_opts->wckey_list
@@ -1392,8 +1389,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 
 		if (list_count(add_list)) {
 			notice_thread_init();
-			rc = acct_storage_g_add_wckeys(db_conn, my_uid,
-						       add_list);
+			acct_storage_g_add_wckeys(db_conn, my_uid, add_list);
 			notice_thread_fini();
 			set = 1;
 		}
@@ -1869,14 +1865,12 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	acct_storage_g_commit(db_conn, 0);
 
 	for (i=0; i<argc; i++) {
-		int option = 0;
 		int end = parse_option_end(argv[i]);
 		if (!end)
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
 			if (argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
 				end++;
 			}
 		}
diff --git a/src/sacctmgr/job_functions.c b/src/sacctmgr/job_functions.c
index 68b422623..5ccfe6104 100644
--- a/src/sacctmgr/job_functions.c
+++ b/src/sacctmgr/job_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -109,7 +109,6 @@ static int _set_rec(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int command_len = 0;
-	int option = 0;
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
@@ -117,8 +116,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -142,7 +140,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 				     "DerivedExitCode") == SLURM_SUCCESS) {
 				set = 1;
 			}
-		} else if ((!strncasecmp(argv[i], "DerivedExitString",
+		} else if ((!strncasecmp(argv[i], "Comment",
+					 MAX(command_len, 7))) ||
+			   (!strncasecmp(argv[i], "DerivedExitString",
 					 MAX(command_len, 12))) ||
 			   (!strncasecmp(argv[i], "DerivedES",
 					 MAX(command_len, 9)))) {
diff --git a/src/sacctmgr/problem_functions.c b/src/sacctmgr/problem_functions.c
index 32762a41a..653f385f2 100644
--- a/src/sacctmgr/problem_functions.c
+++ b/src/sacctmgr/problem_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,15 +46,14 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int i, end = 0;
 	int set = 0;
 	int command_len = 0;
-	int option = 0;
+
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
 		if(!end)
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index a2a7c71a9..5ce45a4f9 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -118,7 +118,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int command_len = 0;
-	int option = 0;
 
 	if(!qos_cond) {
 		error("No qos_cond given");
@@ -131,8 +130,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -205,6 +203,10 @@ static int _set_cond(int *start, int argc, char *argv[],
 					" Bad Preempt Mode given: %s\n",
 					argv[i]);
 				exit_code = 1;
+			} else if (qos_cond->preempt_mode == PREEMPT_MODE_SUSPEND) {
+				printf("PreemptType and PreemptMode "
+					"values incompatible\n");
+				exit_code = 1;
 			} else
 				set = 1;
 		} else {
@@ -277,6 +279,14 @@ static int _set_rec(int *start, int argc, char *argv[],
 				exit_code = 1;
 			} else
 				set = 1;
+		} else if (!strncasecmp (argv[i], "GraceTime",
+					 MAX(command_len, 3))) {
+			if (!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->grace_time,
+			             "GraceTime") == SLURM_SUCCESS) {
+				set = 1;
+			}
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
 					 MAX(command_len, 7))) {
 			if(!qos)
@@ -285,12 +295,19 @@ static int _set_rec(int *start, int argc, char *argv[],
 				       &qos->grp_cpu_mins,
 				       "GrpCPUMins") == SLURM_SUCCESS)
 				set = 1;
-		} else if (!strncasecmp (argv[i], "GrpCpus",
+		} else if (!strncasecmp (argv[i], "GrpCPURunMins",
+					 MAX(command_len, 7))) {
+			if(!qos)
+				continue;
+			if (get_uint64(argv[i]+end, &qos->grp_cpu_run_mins,
+				       "GrpCPURunMins") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "GrpCPUs",
 					 MAX(command_len, 7))) {
 			if(!qos)
 				continue;
 			if (get_uint(argv[i]+end, &qos->grp_cpus,
-			    "GrpCpus") == SLURM_SUCCESS)
+				     "GrpCPUs") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "GrpJobs",
 					 MAX(command_len, 4))) {
@@ -335,12 +352,19 @@ static int _set_rec(int *start, int argc, char *argv[],
 				       &qos->max_cpu_mins_pj,
 				       "MaxCPUMins") == SLURM_SUCCESS)
 				set = 1;
-		} else if (!strncasecmp (argv[i], "MaxCpusPerJob",
+		} else if (!strncasecmp (argv[i], "MaxCPUsPerJob",
 					 MAX(command_len, 7))) {
 			if(!qos)
 				continue;
 			if (get_uint(argv[i]+end, &qos->max_cpus_pj,
-			    "MaxCpus") == SLURM_SUCCESS)
+			    "MaxCPUs") == SLURM_SUCCESS)
+				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxCPUsPerUser",
+					 MAX(command_len, 11))) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end, &qos->max_cpus_pu,
+			    "MaxCPUsPerUser") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxJobsPerUser",
 					 MAX(command_len, 4))) {
@@ -357,6 +381,14 @@ static int _set_rec(int *start, int argc, char *argv[],
 			    &qos->max_nodes_pj,
 			    "MaxNodes") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp (argv[i], "MaxNodesPerUser",
+					 MAX(command_len, 8))) {
+			if(!qos)
+				continue;
+			if (get_uint(argv[i]+end,
+			    &qos->max_nodes_pu,
+			    "MaxNodesPerUser") == SLURM_SUCCESS)
+				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxSubmitJobsPerUser",
 					 MAX(command_len, 4))) {
 			if(!qos)
@@ -388,6 +420,10 @@ static int _set_rec(int *start, int argc, char *argv[],
 					" Bad Preempt Mode given: %s\n",
 					argv[i]);
 				exit_code = 1;
+			} else if (qos->preempt_mode == PREEMPT_MODE_SUSPEND) {
+				printf("PreemptType and PreemptMode "
+					"values incompatible\n");
+				exit_code = 1;
 			} else
 				set = 1;
 		/* Preempt needs to follow PreemptMode */
@@ -582,6 +618,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 			else
 				qos->description = xstrdup(name);
 
+			qos->grace_time = start_qos->grace_time;
 			qos->grp_cpu_mins = start_qos->grp_cpu_mins;
 			qos->grp_cpus = start_qos->grp_cpus;
 			qos->grp_jobs = start_qos->grp_jobs;
@@ -591,13 +628,16 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 
 			qos->max_cpu_mins_pj = start_qos->max_cpu_mins_pj;
 			qos->max_cpus_pj = start_qos->max_cpus_pj;
+			qos->max_cpus_pu = start_qos->max_cpus_pu;
 			qos->max_jobs_pu = start_qos->max_jobs_pu;
 			qos->max_nodes_pj = start_qos->max_nodes_pj;
+			qos->max_nodes_pu = start_qos->max_nodes_pu;
 			qos->max_submit_jobs_pu = start_qos->max_submit_jobs_pu;
 			qos->max_wall_pj = start_qos->max_wall_pj;
 
 			qos->preempt_list =
 				copy_char_list(start_qos->preempt_list);
+			qos->preempt_mode = start_qos->preempt_mode;
 
 			qos->priority = start_qos->priority;
 
@@ -687,7 +727,8 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		list_destroy(format_list);
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) {
-		slurm_addto_char_list(format_list, "Name,Prio,Preempt,PreemptM,"
+		slurm_addto_char_list(format_list,
+				      "Name,Prio,GraceT,Preempt,PreemptM,"
 				      "Flags%40,UsageThres,GrpCPUs,GrpCPUMins,"
 				      "GrpJ,GrpN,GrpS,GrpW,"
 				      "MaxCPUs,MaxCPUMins,MaxJ,MaxN,MaxS,MaxW");
@@ -740,12 +781,23 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					field, qos->usage_thres,
 					(curr_inx == field_count));
 				break;
+			case PRINT_GRACE:
+				field->print_routine(
+					field, qos->grace_time,
+					(curr_inx == field_count));
+				break;
 			case PRINT_GRPCM:
 				field->print_routine(
 					field,
 					qos->grp_cpu_mins,
 					(curr_inx == field_count));
 				break;
+			case PRINT_GRPCRM:
+				field->print_routine(
+					field,
+					qos->grp_cpu_run_mins,
+					(curr_inx == field_count));
+				break;
 			case PRINT_GRPC:
 				field->print_routine(field,
 						     qos->grp_cpus,
@@ -788,6 +840,11 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 						     qos->max_cpus_pj,
 						     (curr_inx == field_count));
 				break;
+			case PRINT_MAXCU:
+				field->print_routine(field,
+						     qos->max_cpus_pu,
+						     (curr_inx == field_count));
+				break;
 			case PRINT_MAXJ:
 				field->print_routine(field,
 						     qos->max_jobs_pu,
@@ -798,6 +855,11 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 						     qos->max_nodes_pj,
 						     (curr_inx == field_count));
 				break;
+			case PRINT_MAXNU:
+				field->print_routine(field,
+						     qos->max_nodes_pu,
+						     (curr_inx == field_count));
+				break;
 			case PRINT_MAXS:
 				field->print_routine(field,
 						     qos->max_submit_jobs_pu,
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index bfb40b190..4359e19fa 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -856,24 +856,26 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        delete coordinator - Accounts=, and Names=                          \n\
                                                                            \n\
        list events        - All_Clusters, All_Time, Clusters=, End=, Events=,\n\
-                            Format=, MaxCpus=, MinCpus=, Nodes=, Reason=,  \n\
+                            Format=, MaxCPUs=, MinCPUs=, Nodes=, Reason=,  \n\
                             Start=, States=, and User=                     \n\
                                                                            \n\
-       modify job         - (set options) DerivedExitCode=, DerivedExitString=\n\
+       modify job         - (set options) DerivedExitCode=, Comment=       \n\
                             (where options) JobID=, Cluster=               \n\
                                                                            \n\
        list qos           - Descriptions=, Format=, Id=, Names=,           \n\
                             PreemptMode=, and WithDeleted                  \n\
-       add qos            - Description=, Flags=, GrpCPUMins=, GrpCPUs=,   \n\
-                            GrpJobs=, GrpNodes=, GrpSubmitJob=, GrpWall=,  \n\
-                            MaxCPUMins=, MaxCPUs=, MaxJobs=, MaxNodes=,    \n\
-                            MaxSubmitJobs=, MaxWall=, Names=, Preempt=,    \n\
-                            PreemptMode=, Priority=, UsageFactor=,         \n\
-                            and UsageThreshold=                            \n\
-       modify qos         - (set options) Description=, Flags=,            \n\
+       add qos            - Description=, Flags=, GraceTime=, GrpCPUMins=, \n\
+                            GGrpCPUs=, GrpJobs=, GrpNodes=, GrpSubmitJob=, \n\
+                            GrpWall=,MaxCPUMins=, MaxCPUs=, MaxCPUsPerUser=,\n\
+                            MaxJobs=, MaxNodesPerUser=, MaxCPUsPerUser=,   \n\
+                            MaxNodes=, MaxSubmitJobs=, MaxWall=, Names=,   \n\
+                            Preempt=, PreemptMode=, Priority=,             \n\
+                            UsageFactor=, and UsageThreshold=              \n\
+       modify qos         - (set options) Description=, Flags=, GraceTime=,\n\
                             GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpNodes=,    \n\
                             GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxCPUs=,\n\
-                            MaxJobs=, MaxNodes=, MaxSubmitJobs=,           \n\
+                            MaxCPUsPerUser=, MaxJobs=, MaxNodes=,          \n\
+                            MaxNodesPerUser=, MaxSubmitJobs=,              \n\
                             MaxWall=, Names=, Preempt=, PreemptMode=,      \n\
                             Priority=, UsageFactor=, and UsageThreshold=   \n\
                             (where options) Descriptions=, ID=, Names=     \n\
@@ -939,10 +941,11 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             Event, EventRaw, NodeName, Reason, Start,      \n\
                             State, StateRaw, User                          \n\
                                                                            \n\
-       QOS                - Description, Flags, GrpCPUMins, GrpCPUs,       \n\
-                            GrpJobs, GrpNodes, GrpSubmitJob, GrpWall, ID,  \n\
-                            MaxCPUMins, MaxCPUs, MaxJobs, MaxNodes,        \n\
-                            MaxSubmitJobs, MaxWall, Name,                  \n\
+       QOS                - Description, Flags, GraceTime, GrpCPUMins,     \n\
+                            GrpCPUs,GrpJobs, GrpNodes, GrpSubmitJob,       \n\
+                            GrpWall, ID,MaxCPUMins, MaxCPUs,               \n\
+                            MaxCPUsPerUser, MaxJobs, MaxNodes,             \n\
+                            MaxNodesPerUser, MaxSubmitJobs, MaxWall, Name, \n\
                             Preempt, PreemptMode, Priority, UsageFactor,   \n\
                             UsageThreshold                                 \n\
                                                                            \n\
diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h
index 053c732a9..41ac729d8 100644
--- a/src/sacctmgr/sacctmgr.h
+++ b/src/sacctmgr/sacctmgr.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -76,7 +76,7 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/jobacct_common.h"
 #include "src/common/parse_time.h"
@@ -113,8 +113,10 @@ typedef enum {
 	PRINT_MAXCM,
 	PRINT_MAXCRM,
 	PRINT_MAXC,
+	PRINT_MAXCU,
 	PRINT_MAXJ,
 	PRINT_MAXN,
+	PRINT_MAXNU,
 	PRINT_MAXS,
 	PRINT_MAXW,
 
@@ -145,7 +147,8 @@ typedef enum {
 	PRINT_DWCKEY,
 
 	/* QOS */
-	PRINT_PREE = 6000,
+	PRINT_GRACE = 6000,
+	PRINT_PREE,
 	PRINT_PREEM,
 	PRINT_PRIO,
 	PRINT_UF,
diff --git a/src/sacctmgr/txn_functions.c b/src/sacctmgr/txn_functions.c
index 48f792560..a44cab648 100644
--- a/src/sacctmgr/txn_functions.c
+++ b/src/sacctmgr/txn_functions.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,7 +48,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int i, end = 0;
 	int set = 0;
 	int command_len = 0;
-	int option = 0;
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
@@ -56,8 +55,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index e59aa2d26..c3a82979b 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sacctmgr/wckey_functions.c b/src/sacctmgr/wckey_functions.c
index dde9f8d4b..31a9e958a 100644
--- a/src/sacctmgr/wckey_functions.c
+++ b/src/sacctmgr/wckey_functions.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,7 +48,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int command_len = 0;
-	int option = 0;
 
 	if(!wckey_cond) {
 		error("No wckey_cond given");
@@ -61,8 +60,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/salloc/Makefile.am b/src/salloc/Makefile.am
index 2461f542c..01ed5e93f 100644
--- a/src/salloc/Makefile.am
+++ b/src/salloc/Makefile.am
@@ -14,6 +14,10 @@ convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
 salloc_LDADD = \
 	$(convenience_libs)
 
+if HAVE_REAL_CRAY
+  salloc_LDADD += -ljob
+endif
+
 salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
 force:
diff --git a/src/salloc/Makefile.in b/src/salloc/Makefile.in
index 5b7ccfe92..27b84d078 100644
--- a/src/salloc/Makefile.in
+++ b/src/salloc/Makefile.in
@@ -38,6 +38,7 @@ build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
 bin_PROGRAMS = salloc$(EXEEXT)
+@HAVE_REAL_CRAY_TRUE@am__append_1 = -ljob
 subdir = src/salloc
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -64,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -87,7 +90,8 @@ PROGRAMS = $(bin_PROGRAMS)
 am_salloc_OBJECTS = salloc.$(OBJEXT) opt.$(OBJEXT)
 salloc_OBJECTS = $(am_salloc_OBJECTS)
 am__DEPENDENCIES_1 = $(top_builddir)/src/api/libslurm.o
-salloc_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am__DEPENDENCIES_2 =
+salloc_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
 salloc_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(salloc_LDFLAGS) \
 	$(LDFLAGS) -o $@
@@ -119,7 +123,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -156,6 +163,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -213,6 +221,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -248,6 +257,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -305,9 +315,7 @@ CLEANFILES = core.*
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 salloc_SOURCES = salloc.c salloc.h opt.c opt.h
 convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
-salloc_LDADD = \
-	$(convenience_libs)
-
+salloc_LDADD = $(convenience_libs) $(am__append_1)
 salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 all: all-am
 
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 1ba81d0f6..a546c4619 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -112,6 +112,7 @@
 #define OPT_IMMEDIATE   0x12
 #define OPT_WCKEY       0x14
 #define OPT_SIGNAL      0x15
+#define OPT_KILL_CMD    0x16
 
 /* generic getopt_long flags, integers and *not* valid characters */
 #define LONG_OPT_CPU_BIND    0x101
@@ -160,6 +161,7 @@
 #define LONG_OPT_TIME_MIN        0x140
 #define LONG_OPT_GRES            0x141
 #define LONG_OPT_WAIT_ALL_NODES  0x142
+#define LONG_OPT_REQ_SWITCH      0x143
 
 /*---- global variables, defined in opt.h ----*/
 opt_t opt;
@@ -281,7 +283,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -338,11 +340,12 @@ static void _opt_default()
 	opt.nodelist	    = NULL;
 	opt.exc_nodes	    = NULL;
 
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		opt.conn_type[i]    = (uint16_t) NO_VAL;
 		opt.geometry[i]	    = (uint16_t) NO_VAL;
+	}
 	opt.reboot          = false;
 	opt.no_rotate	    = false;
-	opt.conn_type	    = (uint16_t) NO_VAL;
 
 	opt.euid	    = (uid_t) -1;
 	opt.egid	    = (gid_t) -1;
@@ -355,6 +358,8 @@ static void _opt_default()
 	opt.reservation     = NULL;
 	opt.wait_all_nodes  = (uint16_t) NO_VAL;
 	opt.wckey           = NULL;
+	opt.req_switch      = -1;
+	opt.wait4switch     = -1;
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -386,6 +391,7 @@ env_vars_t env_vars[] = {
   {"SALLOC_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL          },
   {"SALLOC_IMMEDIATE",     OPT_IMMEDIATE,  NULL,               NULL          },
   {"SALLOC_JOBID",         OPT_JOBID,      NULL,               NULL          },
+  {"SALLOC_KILL_CMD",      OPT_KILL_CMD,   NULL,               NULL          },
   {"SALLOC_MEM_BIND",      OPT_MEM_BIND,   NULL,               NULL          },
   {"SALLOC_NETWORK",       OPT_STRING    , &opt.network,       NULL          },
   {"SALLOC_NO_BELL",       OPT_NO_BELL,    NULL,               NULL          },
@@ -398,6 +404,8 @@ env_vars_t env_vars[] = {
   {"SALLOC_WAIT",          OPT_IMMEDIATE,  NULL,               NULL          },
   {"SALLOC_WAIT_ALL_NODES",OPT_INT,        &opt.wait_all_nodes,NULL          },
   {"SALLOC_WCKEY",         OPT_STRING,     &opt.wckey,         NULL          },
+  {"SALLOC_REQ_SWITCH",    OPT_INT,        &opt.req_switch,    NULL          },
+  {"SALLOC_WAIT4SWITCH",   OPT_INT,        &opt.wait4switch,   NULL          },
   {NULL, 0, NULL, NULL}
 };
 
@@ -481,7 +489,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_CONN_TYPE:
-		opt.conn_type = verify_conn_type(val);
+		verify_conn_type(val, opt.conn_type);
 		break;
 
 	case OPT_NO_ROTATE:
@@ -540,6 +548,16 @@ _process_env_var(env_vars_t *e, const char *val)
 			exit(error_exit);
 		}
 		break;
+	case OPT_KILL_CMD:
+		if (val) {
+			opt.kill_command_signal = sig_name2num((char *) val);
+			if (opt.kill_command_signal == 0) {
+				error("Invalid signal name %s", val);
+				exit(error_exit);
+			}
+		}
+		opt.kill_command_signal_set = true;
+		break;
 	default:
 		/* do nothing */
 		break;
@@ -654,10 +672,12 @@ void set_options(const int argc, char **argv)
 		{"uid",           required_argument, 0, LONG_OPT_UID},
 		{"wait-all-nodes",required_argument, 0, LONG_OPT_WAIT_ALL_NODES},
 		{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
+		{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
 		{NULL,            0,                 0, 0}
 	};
 	char *opt_string =
 		"+A:B:c:C:d:D:F:g:hHIJ:kK::L:m:n:N:Op:P:QRst:uU:vVw:W:x:";
+	char *pos_delimit;
 
 	struct option *optz = spank_option_table_create(long_options);
 
@@ -698,8 +718,7 @@ void set_options(const int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task =
-				_get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -932,7 +951,7 @@ void set_options(const int argc, char **argv)
 			}
 			break;
 		case LONG_OPT_CONNTYPE:
-			opt.conn_type = verify_conn_type(optarg);
+			verify_conn_type(optarg, opt.conn_type);
 			break;
 		case LONG_OPT_BEGIN:
 			opt.begin = parse_time(optarg, 0);
@@ -1117,6 +1136,16 @@ void set_options(const int argc, char **argv)
 		case LONG_OPT_WAIT_ALL_NODES:
 			opt.wait_all_nodes = strtol(optarg, NULL, 10);
 			break;
+		case LONG_OPT_REQ_SWITCH:
+			pos_delimit = strstr(optarg,"@");
+			if (pos_delimit != NULL) {
+				pos_delimit[0] = '\0';
+				pos_delimit++;
+				opt.wait4switch = time_str2mins(pos_delimit) *
+						   60;
+			}
+			opt.req_switch = _get_int(optarg, "switches");
+			break;
 		default:
 			if (spank_process_option(opt_char, optarg) < 0) {
 				error("Unrecognized command line parameter %c",
@@ -1231,7 +1260,7 @@ static bool _opt_verify(void)
 		opt.ntasks_set = 1;
 	}
 
-	if (opt.mincpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.mincpus < opt.cpus_per_task))
 		opt.mincpus = opt.cpus_per_task;
 
 	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid))
@@ -1253,7 +1282,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task <= 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
@@ -1266,7 +1295,40 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-#ifdef HAVE_BGL
+#if defined(HAVE_CRAY)
+	if (getenv("BASIL_RESERVATION_ID") != NULL) {
+		error("BASIL_RESERVATION_ID already set - running salloc "
+		      "within salloc?");
+		return false;
+	}
+	if (opt.no_shell) {
+		/*
+		 * As long as we are not using srun instead of aprun, this flag
+		 * makes no difference for the operational behaviour of aprun.
+		 */
+		error("--no-shell mode is not supported on Cray (due to srun)");
+		return false;
+	}
+	if (opt.shared && opt.shared != (uint16_t)NO_VAL) {
+		info("Space sharing nodes is not supported on Cray systems");
+		opt.shared = false;
+	}
+	if (opt.overcommit) {
+		info("Oversubscribing is not supported on Cray.");
+		opt.overcommit = false;
+	}
+	if (!opt.wait_all_nodes)
+		info("Cray needs --wait-all-nodes to wait on ALPS reservation");
+	opt.wait_all_nodes = true;
+	if (opt.kill_command_signal_set) {
+		/*
+		 * Disabled to avoid that the user supplies a weaker signal that
+		 * could cause the child processes not to terminate.
+		 */
+		info("The --kill-command is not supported on Cray.");
+		opt.kill_command_signal_set = false;
+	}
+#elif defined(HAVE_BGL)
 	if (opt.blrtsimage && strchr(opt.blrtsimage, ' ')) {
 		error("invalid BlrtsImage given '%s'", opt.blrtsimage);
 		verified = false;
@@ -1620,8 +1682,9 @@ static char *print_constraints()
 
 #define tf_(b) (b == true) ? "true" : "false"
 
-static void _opt_list()
+static void _opt_list(void)
 {
+	int i;
 	char *str;
 
 	info("defined options for program `%s'", opt.progname);
@@ -1674,8 +1737,11 @@ static void _opt_list()
 	str = print_constraints();
 	info("constraints    : %s", str);
 	xfree(str);
-	if (opt.conn_type != (uint16_t) NO_VAL)
-		info("conn_type      : %u", opt.conn_type);
+	for (i = 0; i < HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t) NO_VAL)
+			break;
+		info("conn_type[%d]   : %u", i, opt.conn_type[i]);
+	}
 	str = print_geometry(opt.geometry);
 	info("geometry       : %s", str);
 	xfree(str);
@@ -1720,6 +1786,8 @@ static void _opt_list()
 	     opt.mem_bind == NULL ? "default" : opt.mem_bind);
 	str = print_commandline(command_argc, command_argv);
 	info("user command   : `%s'", str);
+	info("switch         : %d", opt.req_switch);
+	info("wait-for-switch: %d", opt.wait4switch);
 	xfree(str);
 
 }
@@ -1750,6 +1818,7 @@ static void _usage(void)
 "              [--network=type] [--mem-per-cpu=MB] [--qos=qos]\n"
 "              [--cpu_bind=...] [--mem_bind=...] [--reservation=name]\n"
 "              [--time-min=minutes] [--gres=list]\n"
+"              [--switch=max-switches[@max-time-to-wait]]\n"
 "              [executable [args...]]\n");
 }
 
@@ -1784,7 +1853,7 @@ static void _help(void)
 "      --mail-user=user        who to send email notification for job state\n"
 "                              changes\n"
 "  -n, --tasks=N               number of processors required\n"
-"      --nice[=value]          decrease secheduling priority by value\n"
+"      --nice[=value]          decrease scheduling priority by value\n"
 "      --no-bell               do NOT ring the terminal bell\n"
 "      --ntasks-per-node=n     number of tasks to invoke on each node\n"
 "  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
@@ -1797,6 +1866,8 @@ static void _help(void)
 "      --time-min=minutes      minimum time limit (if distinct)\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
 "  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
+"      --switch=max-switches{@max-time-to-wait}\n"
+"                              Optimum switches and max time to wait for optimum\n"
 "\n"
 "Constraint options:\n"
 "      --contiguous            demand a contiguous range of nodes\n"
diff --git a/src/salloc/opt.h b/src/salloc/opt.h
index a7904c180..119d9a57c 100644
--- a/src/salloc/opt.h
+++ b/src/salloc/opt.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -134,10 +134,10 @@ typedef struct salloc_options {
 	char *network;		/* --network=			*/
 
 	/* BLUEGENE SPECIFIC */
-	uint16_t geometry[SYSTEM_DIMENSIONS]; /* --geometry, -g	*/
+	uint16_t geometry[HIGHEST_DIMENSIONS]; /* --geometry, -g */
 	bool reboot;		/* --reboot			*/
 	bool no_rotate;		/* --no_rotate, -R		*/
-	uint16_t conn_type;	/* --conn-type 			*/
+	uint16_t conn_type[HIGHEST_DIMENSIONS];	/* --conn-type 	*/
 	char *blrtsimage;       /* --blrts-image BlrtsImage for block */
 	char *linuximage;       /* --linux-image LinuxImage for block */
 	char *mloaderimage;     /* --mloader-image mloaderImage for block */
@@ -155,6 +155,8 @@ typedef struct salloc_options {
 	char *reservation;	/* --reservation		*/
 	uint16_t wait_all_nodes;  /* --wait-nodes-ready=val	*/
 	char *wckey;            /* --wckey workload characterization key */
+	int req_switch;		/* Minimum number of switches	*/
+	int wait4switch;	/* Maximum time to wait for minimum switches */
 	char **spank_job_env;	/* SPANK controlled environment for job
 				 * Prolog and Epilog		*/
 	int spank_job_env_size;	/* size of spank_job_env	*/
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index 1112ed6d5..83e51f13b 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,6 +42,8 @@
 #  include "config.h"
 #endif
 
+#include <dirent.h>
+#include <fcntl.h>
 #include <pwd.h>
 #include <stdbool.h>
 #include <stdio.h>
@@ -53,9 +55,8 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
-#include "src/common/basil_resv_conf.h"
 #include "src/common/env.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_rlimits_info.h"
@@ -70,18 +71,25 @@
 
 #ifdef HAVE_BG
 #include "src/common/node_select.h"
-#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
-#endif
-
-#ifdef HAVE_CRAY
+#include "src/plugins/select/bluegene/bg_enums.h"
+#elif defined(HAVE_CRAY)
 #include "src/common/node_select.h"
+
+#ifdef HAVE_REAL_CRAY
+/*
+ * On Cray installations, the libjob headers are not automatically installed
+ * by default, while libjob.so always is, and kernels are > 2.6. Hence it is
+ * simpler to just duplicate the single declaration here.
+ */
+extern uint64_t job_getjid(pid_t pid);
+#endif
 #endif
 
 #ifndef __USE_XOPEN_EXTENDED
-extern pid_t getsid(pid_t pid);		/* missing from <unistd.h> */
+extern pid_t getpgid(pid_t pid);
 #endif
 
+#define HASH_RECS	100
 #define MAX_RETRIES	10
 #define POLL_SLEEP	3	/* retry interval in seconds  */
 
@@ -92,9 +100,11 @@ char *work_dir = NULL;
 static int is_interactive;
 
 enum possible_allocation_states allocation_state = NOT_GRANTED;
+pthread_cond_t  allocation_state_cond = PTHREAD_COND_INITIALIZER;
 pthread_mutex_t allocation_state_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static bool exit_flag = false;
+static bool suspend_flag = false;
 static bool allocation_interrupted = false;
 static uint32_t pending_job_id = 0;
 static time_t last_timeout = 0;
@@ -105,6 +115,7 @@ static int  _fill_job_desc_from_opts(job_desc_msg_t *desc);
 static pid_t  _fork_command(char **command);
 static void _forward_signal(int signo);
 static void _job_complete_handler(srun_job_complete_msg_t *msg);
+static void _job_suspend_handler(suspend_msg_t *msg);
 static void _node_fail_handler(srun_node_fail_msg_t *msg);
 static void _pending_callback(uint32_t job_id);
 static void _ping_handler(srun_ping_msg_t *msg);
@@ -125,10 +136,6 @@ static int _blocks_dealloc(void);
 static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc);
 #endif
 
-#ifdef HAVE_CRAY
-static int  _claim_reservation(resource_allocation_response_msg_t *alloc);
-#endif
-
 bool salloc_shutdown = false;
 /* Signals that are considered terminal before resource allocation. */
 int sig_array[] = {
@@ -144,6 +151,11 @@ static void _reset_input_mode (void)
 	int sig_block[] = { SIGTTOU, SIGTTIN, 0 };
 	xsignal_block (sig_block);
 	tcsetattr (STDIN_FILENO, TCSANOW, &saved_tty_attributes);
+	/* If salloc was run as interactive, with job control, reset the
+	 * foreground process group of the terminal to the process group of
+	 * the parent pid before exiting */
+	if (is_interactive)
+		tcsetpgrp(STDIN_FILENO, getpgid(getppid()));
 }
 
 int main(int argc, char *argv[])
@@ -252,7 +264,7 @@ int main(int argc, char *argv[])
 		}
 	}
 #else
-	} else if (!opt.no_shell) {
+	} else if ((!opt.no_shell) && (getpgrp() == tcgetpgrp(STDIN_FILENO))) {
 		is_interactive = true;
 	}
 #endif
@@ -280,6 +292,7 @@ int main(int argc, char *argv[])
 	callbacks.ping = _ping_handler;
 	callbacks.timeout = _timeout_handler;
 	callbacks.job_complete = _job_complete_handler;
+	callbacks.job_suspend = _job_suspend_handler;
 	callbacks.user_msg = _user_msg_handler;
 	callbacks.node_fail = _node_fail_handler;
 	/* create message thread to handle pings and such from slurmctld */
@@ -348,14 +361,6 @@ int main(int argc, char *argv[])
 				error("Something is wrong with the "
 				      "boot of the nodes.");
 			goto relinquish;
-		}	
-#endif
-#ifdef HAVE_CRAY
-		if (!_claim_reservation(alloc)) {
-			if(!allocation_interrupted)
-				error("Something is wrong with the ALPS "
-				      "resource reservation.");
-			goto relinquish;
 		}
 #endif
 	}
@@ -389,7 +394,7 @@ int main(int argc, char *argv[])
 		/* keep around for old scripts */
 		env_array_append_fmt(&env, "SLURM_NPROCS", "%d", opt.ntasks);
 	}
-	if (opt.cpus_per_task > 1) {
+	if (opt.cpus_set) {
 		env_array_append_fmt(&env, "SLURM_CPUS_PER_TASK", "%d",
 				     opt.cpus_per_task);
 	}
@@ -410,6 +415,7 @@ int main(int argc, char *argv[])
 	if (allocation_state == REVOKED) {
 		error("Allocation was revoked for job %u before command could "
 		      "be run", alloc->job_id);
+		pthread_cond_broadcast(&allocation_state_cond);
 		pthread_mutex_unlock(&allocation_state_lock);
 		if (slurm_complete_job(alloc->job_id, status) != 0) {
 			error("Unable to clean up allocation for job %u: %m",
@@ -418,6 +424,7 @@ int main(int argc, char *argv[])
 		return 1;
  	}
 	allocation_state = GRANTED;
+	pthread_cond_broadcast(&allocation_state_cond);
 	pthread_mutex_unlock(&allocation_state_lock);
 
 	/*  Ensure that salloc has initial terminal foreground control.  */
@@ -435,8 +442,13 @@ int main(int argc, char *argv[])
 
 		tcsetpgrp(STDIN_FILENO, pid);
 	}
-
+	pthread_mutex_lock(&allocation_state_lock);
+	if (suspend_flag)
+		pthread_cond_wait(&allocation_state_cond, &allocation_state_lock);
 	command_pid = _fork_command(command_argv);
+	pthread_cond_broadcast(&allocation_state_cond);
+	pthread_mutex_unlock(&allocation_state_lock);
+
 	/*
 	 * Wait for command to exit, OR for waitpid to be interrupted by a
 	 * signal.  Either way, we are going to release the allocation next.
@@ -449,12 +461,10 @@ int main(int argc, char *argv[])
 		/* NOTE: Do not process signals in separate pthread.
 		 * The signal will cause waitpid() to exit immediately. */
 		xsignal(SIGHUP,  _exit_on_signal);
-
 		/* Use WUNTRACED to treat stopped children like terminated ones */
 		do {
 			rc_pid = waitpid(command_pid, &status, WUNTRACED);
-		} while ((rc_pid == -1) && (!exit_flag));
-
+		} while (WIFSTOPPED(status) || ((rc_pid == -1) && (!exit_flag)));
 		if ((rc_pid == -1) && (errno != EINTR))
 			error("waitpid for %s failed: %m", command_argv[0]);
 	}
@@ -478,6 +488,7 @@ relinquish:
 		pthread_mutex_lock(&allocation_state_lock);
 		allocation_state = REVOKED;
 	}
+	pthread_cond_broadcast(&allocation_state_cond);
 	pthread_mutex_unlock(&allocation_state_lock);
 
 	slurm_free_resource_allocation_response_msg(alloc);
@@ -569,6 +580,26 @@ static void _set_submit_dir_env(void)
 /* Returns 0 on success, -1 on failure */
 static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
+	int i;
+#ifdef HAVE_REAL_CRAY
+	uint64_t pagg_id = job_getjid(getpid());
+	/*
+	 * Interactive sessions require pam_job.so in /etc/pam.d/common-session
+	 * since creating sgi_job containers requires root permissions. This is
+	 * the only exception where we allow the fallback of using the SID to
+	 * confirm the reservation (caught later, in do_basil_confirm).
+	 */
+	if (pagg_id == (uint64_t)-1) {
+		error("No SGI job container ID detected - please enable the "
+		      "Cray job service via /etc/init.d/job");
+	} else {
+		if (!desc->select_jobinfo)
+			desc->select_jobinfo = select_g_select_jobinfo_alloc();
+
+		select_g_select_jobinfo_set(desc->select_jobinfo,
+					    SELECT_JOBDATA_PAGG_ID, &pagg_id);
+	}
+#endif
 	desc->contiguous = opt.contiguous ? 1 : 0;
 	desc->features = opt.constraints;
 	desc->gres = opt.gres;
@@ -577,6 +608,10 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->name = xstrdup(opt.job_name);
 	desc->reservation = xstrdup(opt.reservation);
 	desc->wckey  = xstrdup(opt.wckey);
+	if (opt.req_switch >= 0)
+		desc->req_switch = opt.req_switch;
+	if (opt.wait4switch >= 0)
+		desc->wait4switch = opt.wait4switch;
 
 	desc->req_nodes = opt.nodelist;
 	desc->exc_nodes = opt.exc_nodes;
@@ -631,13 +666,15 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->priority     = 0;
 #ifdef HAVE_BG
 	if (opt.geometry[0] > 0) {
-		int i;
 		for (i=0; i<SYSTEM_DIMENSIONS; i++)
 			desc->geometry[i] = opt.geometry[i];
 	}
 #endif
-	if (opt.conn_type != (uint16_t)NO_VAL)
-		desc->conn_type = opt.conn_type;
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t)NO_VAL)
+			break;
+		desc->conn_type[i] = opt.conn_type[i];
+	}
 	if (opt.reboot)
 		desc->reboot = 1;
 	if (opt.no_rotate)
@@ -663,8 +700,10 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.overcommit) {
 		desc->min_cpus = opt.min_nodes;
 		desc->overcommit = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		desc->min_cpus = opt.ntasks * opt.cpus_per_task;
+	else
+		desc->min_cpus = opt.ntasks;
 	if (opt.ntasks_set)
 		desc->num_tasks = opt.ntasks;
 	if (opt.cpus_set)
@@ -802,6 +841,7 @@ static void _job_complete_handler(srun_job_complete_msg_t *comp)
 			}
 		}
 		allocation_state = REVOKED;
+		pthread_cond_broadcast(&allocation_state_cond);
 		pthread_mutex_unlock(&allocation_state_lock);
 		/*
 		 * Clean up child process: only if the forked process has not
@@ -823,16 +863,21 @@ static void _job_complete_handler(srun_job_complete_msg_t *comp)
 				if (tpgid != command_pid && tpgid != getpgrp())
 					killpg(tpgid, SIGHUP);
 			}
-#if defined(HAVE_CRAY)
-			signal = SIGTERM;
-#else
+
 			if (opt.kill_command_signal_set)
 				signal = opt.kill_command_signal;
+#ifdef SALLOC_KILL_CMD
+			else if (is_interactive)
+				signal = SIGHUP;
+			else
+				signal = SIGTERM;
 #endif
 			if (signal) {
 				 verbose("Sending signal %d to command \"%s\","
 					 " pid %d",
 					 signal, command_argv[0], command_pid);
+				if (suspend_flag)
+					_forward_signal(SIGCONT);
 				_forward_signal(signal);
 			}
 		}
@@ -842,6 +887,15 @@ static void _job_complete_handler(srun_job_complete_msg_t *comp)
 	}
 }
 
+static void _job_suspend_handler(suspend_msg_t *msg)
+{
+	if (msg->op == SUSPEND_JOB) {
+		verbose("job has been suspended");
+	} else if (msg->op == RESUME_JOB) {
+		verbose("job has been resumed");
+	}
+}
+
 /*
  * Job has been notified of it's approaching time limit.
  * Job will be killed shortly after timeout.
@@ -999,8 +1053,7 @@ static int _blocks_dealloc(void)
 		return -1;
 	}
 	for (i=0; i<new_bg_ptr->record_count; i++) {
-		if(new_bg_ptr->block_array[i].state
-		   == RM_PARTITION_DEALLOCATING) {
+		if(new_bg_ptr->block_array[i].state == BG_BLOCK_TERM) {
 			rc = 1;
 			break;
 		}
@@ -1071,22 +1124,3 @@ static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc)
 	return is_ready;
 }
 #endif	/* HAVE_BG */
-
-#ifdef HAVE_CRAY
-/* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
-static int _claim_reservation(resource_allocation_response_msg_t *alloc)
-{
-	int rc = 0;
-	uint32_t resv_id = 0;
-
-	select_g_select_jobinfo_get(alloc->select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID,
-				    &resv_id);
-	if (!resv_id)
-		return rc;
-	if (basil_resv_conf(resv_id, alloc->job_id) == SLURM_SUCCESS)
-		rc = 1;
-	xfree(resv_id);
-	return rc;
-}
-#endif
diff --git a/src/salloc/salloc.h b/src/salloc/salloc.h
index 3267b9ba1..fe3b8bba8 100644
--- a/src/salloc/salloc.h
+++ b/src/salloc/salloc.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sattach/Makefile.in b/src/sattach/Makefile.in
index a429f8eb7..4ba52db4a 100644
--- a/src/sattach/Makefile.in
+++ b/src/sattach/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -120,7 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -157,6 +162,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -214,6 +220,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -249,6 +256,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sattach/attach.c b/src/sattach/attach.c
index 31f9f28d6..5ca53f944 100644
--- a/src/sattach/attach.c
+++ b/src/sattach/attach.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sattach/opt.c b/src/sattach/opt.c
index 81999b3b0..806688411 100644
--- a/src/sattach/opt.c
+++ b/src/sattach/opt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sattach/opt.h b/src/sattach/opt.h
index c434c4249..2bd3e0978 100644
--- a/src/sattach/opt.h
+++ b/src/sattach/opt.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/macros.h" /* true and false */
 #include "src/common/env.h"
diff --git a/src/sattach/sattach.c b/src/sattach/sattach.c
index accaa113b..697f424af 100644
--- a/src/sattach/sattach.c
+++ b/src/sattach/sattach.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/api/step_io.h"
 
@@ -107,9 +107,9 @@ static void _msg_thr_destroy(message_thread_state_t *mts);
 static void _handle_msg(void *arg, slurm_msg_t *msg);
 
 static struct io_operations message_socket_ops = {
-	readable:	&eio_message_socket_readable,
-	handle_read:	&eio_message_socket_accept,
-	handle_msg:     &_handle_msg
+	.readable = &eio_message_socket_readable,
+	.handle_read = &eio_message_socket_accept,
+	.handle_msg = &_handle_msg
 };
 
 static struct termios termdefaults;
@@ -124,6 +124,7 @@ int sattach(int argc, char *argv[])
 	slurm_cred_t *fake_cred;
 	message_thread_state_t *mts;
 	client_io_t *io;
+	char *hosts;
 
 	log_init(xbasename(argv[0]), logopt, 0, NULL);
 	_set_exit_code();
@@ -157,10 +158,12 @@ int sattach(int argc, char *argv[])
 			_nodeid_from_layout(layout, opt.fds.in.taskid);
 	}
 
+	if (layout->front_end)
+		hosts = layout->front_end;
+	else
+		hosts = layout->node_list;
 	fake_cred = _generate_fake_cred(opt.jobid, opt.stepid,
-					opt.uid, layout->node_list,
-					layout->node_cnt);
-
+					opt.uid, hosts, layout->node_cnt);
 	mts = _msg_thr_create(layout->node_cnt, layout->task_cnt);
 
 	io = client_io_handler_create(opt.fds, layout->task_cnt,
@@ -380,6 +383,7 @@ static int _attach_to_tasks(uint32_t jobid,
 	List nodes_resp = NULL;
 	int timeout;
 	reattach_tasks_request_msg_t reattach_msg;
+	char *hosts;
 
 	slurm_msg_t_init(&msg);
 
@@ -396,8 +400,11 @@ static int _attach_to_tasks(uint32_t jobid,
 	msg.msg_type = REQUEST_REATTACH_TASKS;
 	msg.data = &reattach_msg;
 
-	nodes_resp = slurm_send_recv_msgs(layout->node_list, &msg,
-					  timeout, false);
+	if (layout->front_end)
+		hosts = layout->front_end;
+	else
+		hosts = layout->node_list;
+	nodes_resp = slurm_send_recv_msgs(hosts, &msg, timeout, false);
 	if (nodes_resp == NULL) {
 		error("slurm_send_recv_msgs failed: %m");
 		return SLURM_ERROR;
diff --git a/src/sbatch/Makefile.in b/src/sbatch/Makefile.in
index 3ddd40a62..5ae3ad0cf 100644
--- a/src/sbatch/Makefile.in
+++ b/src/sbatch/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -120,7 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -157,6 +162,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -214,6 +220,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -249,6 +256,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sbatch/mult_cluster.c b/src/sbatch/mult_cluster.c
index cd0dd3f7f..6f4f3eb46 100644
--- a/src/sbatch/mult_cluster.c
+++ b/src/sbatch/mult_cluster.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sbatch/mult_cluster.h b/src/sbatch/mult_cluster.h
index 4c7208595..6ff21ce41 100644
--- a/src/sbatch/mult_cluster.h
+++ b/src/sbatch/mult_cluster.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 5f48e2b63..d60ebe449 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -167,6 +167,7 @@
 #define LONG_OPT_GRES            0x14f
 #define LONG_OPT_WAIT_ALL_NODES  0x150
 #define LONG_OPT_EXPORT          0x151
+#define LONG_OPT_REQ_SWITCH      0x152
 
 /*---- global variables, defined in opt.h ----*/
 opt_t opt;
@@ -291,7 +292,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -347,11 +348,12 @@ static void _opt_default()
 	opt.nodelist	    = NULL;
 	opt.exc_nodes	    = NULL;
 
-	for (i=0; i<HIGHEST_DIMENSIONS; i++)
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		opt.conn_type[i]    = (uint16_t) NO_VAL;
 		opt.geometry[i]	    = (uint16_t) NO_VAL;
+	}
 	opt.reboot          = false;
 	opt.no_rotate	    = false;
-	opt.conn_type	    = (uint16_t) NO_VAL;
 
 	opt.euid	    = (uid_t) -1;
 	opt.egid	    = (gid_t) -1;
@@ -368,6 +370,8 @@ static void _opt_default()
 	opt.acctg_freq        = -1;
 	opt.reservation       = NULL;
 	opt.wckey             = NULL;
+	opt.req_switch        = -1;
+	opt.wait4switch       = -1;
 
 	opt.ckpt_interval = 0;
 	opt.ckpt_interval_str = NULL;
@@ -472,6 +476,8 @@ env_vars_t env_vars[] = {
   {"SBATCH_WCKEY",         OPT_STRING,     &opt.wckey,         NULL          },
   {"SBATCH_GET_USER_ENV",  OPT_GET_USER_ENV, NULL,             NULL          },
   {"SBATCH_EXPORT",        OPT_STRING,     &opt.export_env,    NULL          },
+  {"SBATCH_REQ_SWITCH",    OPT_INT,        &opt.req_switch,    NULL          },
+  {"SBATCH_WAIT4SWITCH",   OPT_INT,        &opt.wait4switch,   NULL          },
 
   {NULL, 0, NULL, NULL}
 };
@@ -575,7 +581,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_CONN_TYPE:
-		opt.conn_type = verify_conn_type(val);
+		verify_conn_type(val, opt.conn_type);
 		break;
 
 	case OPT_NO_ROTATE:
@@ -632,7 +638,11 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 	case OPT_CLUSTERS:
 		if (!(opt.clusters = slurmdb_get_info_cluster((char *)val))) {
-			error("'%s' invalid entry for --clusters", val);
+			error("'%s' can't be reached now, "
+			      "or it is an invalid entry for "
+			      "--cluster.  Use 'sacctmgr --list "
+			      "cluster' to see available clusters.",
+			      optarg);
 			exit(1);
 		}
 		break;
@@ -737,11 +747,13 @@ static struct option long_options[] = {
 	{"wait-all-nodes",required_argument, 0, LONG_OPT_WAIT_ALL_NODES},
 	{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
 	{"wrap",          required_argument, 0, LONG_OPT_WRAP},
+	{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
 	{NULL,            0,                 0, 0}
 };
 
 static char *opt_string =
 	"+bA:B:c:C:d:D:e:F:g:hHi:IJ:kL:m:M:n:N:o:Op:P:QRst:uU:vVw:x:";
+char *pos_delimit;
 
 
 /*
@@ -1148,8 +1160,7 @@ static void _set_options(int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task =
-				_get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -1227,7 +1238,10 @@ static void _set_options(int argc, char **argv)
 				list_destroy(opt.clusters);
 			if (!(opt.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --clusters",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -1411,7 +1425,7 @@ static void _set_options(int argc, char **argv)
 			}
 			break;
 		case LONG_OPT_CONNTYPE:
-			opt.conn_type = verify_conn_type(optarg);
+			verify_conn_type(optarg, opt.conn_type);
 			break;
 		case LONG_OPT_BEGIN:
 			opt.begin = parse_time(optarg, 0);
@@ -1617,6 +1631,15 @@ static void _set_options(int argc, char **argv)
 			xfree(opt.export_env);
 			opt.export_env = xstrdup(optarg);
 			break;
+		case LONG_OPT_REQ_SWITCH:
+			pos_delimit = strstr(optarg,"@");
+			if (pos_delimit != NULL) {
+				pos_delimit[0] = '\0';
+				pos_delimit++;
+				opt.wait4switch = time_str2mins(pos_delimit) * 60;
+			}
+			opt.req_switch = _get_int(optarg, "switches");
+			break;
 		default:
 			if (spank_process_option (opt_char, optarg) < 0) {
 				error("Unrecognized command line parameter %c",
@@ -1756,6 +1779,15 @@ static void _set_pbs_options(int argc, char **argv)
 				      "-%d and %d", NICE_OFFSET, NICE_OFFSET);
 				exit(error_exit);
 			}
+			if (opt.nice < 0) {
+				uid_t my_uid = getuid();
+				if ((my_uid != 0) &&
+				    (my_uid != slurm_get_slurm_user_id())) {
+					error("Nice value must be "
+					      "non-negative, value ignored");
+					opt.nice = 0;
+				}
+			}
 			break;
 		case 'q':
 			xfree(opt.partition);
@@ -1964,6 +1996,47 @@ static void _parse_pbs_resource_list(char *rl)
 			}
 
 			xfree(temp);
+#ifdef HAVE_CRAY
+		/*
+		 * NB: no "mppmem" here since it specifies per-PE memory units,
+		 *     whereas SLURM uses per-node and per-CPU memory units.
+		 */
+		} else if (!strncmp(rl + i, "mppdepth=", 9)) {
+			/* Cray: number of CPUs (threads) per processing element */
+			i += 9;
+			temp = _get_pbs_option_value(rl, &i);
+			if (temp) {
+				opt.cpus_per_task = _get_int(temp, "mppdepth");
+				opt.cpus_set	  = true;
+			}
+			xfree(temp);
+		} else if (!strncmp(rl + i, "mppnodes=", 9)) {
+			/* Cray `nodes' variant: hostlist without prefix */
+			i += 9;
+			temp = _get_pbs_option_value(rl, &i);
+			if (!temp) {
+				error("No value given for mppnodes");
+				exit(error_exit);
+			}
+			xfree(opt.nodelist);
+			opt.nodelist = temp;
+		} else if (!strncmp(rl + i, "mppnppn=", 8)) {
+			/* Cray: number of processing elements per node */
+			i += 8;
+			temp = _get_pbs_option_value(rl, &i);
+			if (temp)
+				opt.ntasks_per_node = _get_int(temp, "mppnppn");
+			xfree(temp);
+		} else if (!strncmp(rl + i, "mppwidth=", 9)) {
+			/* Cray: task width (number of processing elements) */
+			i += 9;
+			temp = _get_pbs_option_value(rl, &i);
+			if (temp) {
+				opt.ntasks     = _get_int(temp, "mppwidth");
+				opt.ntasks_set = true;
+			}
+			xfree(temp);
+#endif	/* HAVE_CRAY */
 		} else if(!strncmp(rl+i, "nice=", 5)) {
 			i+=5;
 			temp = _get_pbs_option_value(rl, &i);
@@ -1976,6 +2049,15 @@ static void _parse_pbs_resource_list(char *rl)
 				      "-%d and %d", NICE_OFFSET, NICE_OFFSET);
 				exit(error_exit);
 			}
+			if (opt.nice < 0) {
+				uid_t my_uid = getuid();
+				if ((my_uid != 0) &&
+				    (my_uid != slurm_get_slurm_user_id())) {
+					error("Nice value must be "
+					      "non-negative, value ignored");
+					opt.nice = 0;
+				}
+			}
 			xfree(temp);
 		} else if(!strncmp(rl+i, "nodes=", 6)) {
 			i+=6;
@@ -2052,7 +2134,7 @@ static bool _opt_verify(void)
 		opt.ntasks_set = 1;
 	}
 
-	if (opt.mincpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.mincpus < opt.cpus_per_task))
 		opt.mincpus = opt.cpus_per_task;
 
 	if ((opt.job_name == NULL) && (opt.script_argc > 0))
@@ -2066,7 +2148,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task <= 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
@@ -2143,22 +2225,27 @@ static bool _opt_verify(void)
 		}
 	}
 
+	if (opt.cpus_set &&
+	    setenvf(NULL, "SLURM_CPUS_PER_TASK", "%d", opt.cpus_per_task)) {
+		error("Can't set SLURM_CPUS_PER_TASK env variable");
+	}
+
 	_set_distribution(opt.distribution, &dist, &lllp_dist);
-	if(dist)
-		if (setenvf(NULL, "SLURM_DISTRIBUTION", "%s", dist)) {
-			error("Can't set SLURM_DISTRIBUTION env variable");
-		}
+	if (dist &&
+	    setenvf(NULL, "SLURM_DISTRIBUTION", "%s", dist)) {
+		error("Can't set SLURM_DISTRIBUTION env variable");
+	}
 
-	if(opt.distribution == SLURM_DIST_PLANE)
-		if (setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d",
-			    opt.plane_size)) {
-			error("Can't set SLURM_DIST_PLANESIZE env variable");
-		}
+	if ((opt.distribution == SLURM_DIST_PLANE) &&
+	    setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d", opt.plane_size)) {
+		error("Can't set SLURM_DIST_PLANESIZE env variable");
+	}
 
-	if(lllp_dist)
-		if (setenvf(NULL, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
-			error("Can't set SLURM_DIST_LLLP env variable");
-		}
+	if (lllp_dist && setenvf(NULL, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
+		error("Can't set SLURM_DIST_LLLP env variable");
+	}
+
+	
 
 	/* bound threads/cores from ntasks_cores/sockets */
 	if (opt.ntasks_per_core > 0) {
@@ -2567,8 +2654,9 @@ static void _fullpath(char **filename, const char *cwd)
 
 #define tf_(b) (b == true) ? "true" : "false"
 
-static void _opt_list()
+static void _opt_list(void)
 {
+	int i;
 	char *str;
 
 	info("defined options for program `%s'", opt.progname);
@@ -2580,8 +2668,8 @@ static void _opt_list()
 	info("cwd               : %s", opt.cwd);
 	info("ntasks            : %d %s", opt.ntasks,
 		opt.ntasks_set ? "(set)" : "(default)");
-	info("cpus_per_task     : %d %s", opt.cpus_per_task,
-		opt.cpus_set ? "(set)" : "(default)");
+	if (opt.cpus_set)
+		info("cpus_per_task     : %d", opt.cpus_per_task);
 	if (opt.max_nodes) {
 		info("nodes             : %d-%d",
 		     opt.min_nodes, opt.max_nodes);
@@ -2622,8 +2710,11 @@ static void _opt_list()
 	str = print_constraints();
 	info("constraints       : %s", str);
 	xfree(str);
-	if (opt.conn_type != (uint16_t) NO_VAL)
-		info("conn_type         : %u", opt.conn_type);
+	for (i = 0; i < HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t) NO_VAL)
+			break;
+		info("conn_type[%d]    : %u", i, opt.conn_type[i]);
+	}
 	str = print_geometry(opt.geometry);
 	info("geometry          : %s", str);
 	xfree(str);
@@ -2669,6 +2760,8 @@ static void _opt_list()
 	info("plane_size        : %u", opt.plane_size);
 	info("propagate         : %s",
 	     opt.propagate == NULL ? "NONE" : opt.propagate);
+	info("switch            : %d", opt.req_switch);
+	info("wait-for-switch   : %d", opt.wait4switch);
 	str = print_commandline(opt.script_argc, opt.script_argv);
 	info("remote command    : `%s'", str);
 	xfree(str);
@@ -2702,6 +2795,7 @@ static void _usage(void)
 "              [--nodefile=file] [--nodelist=hosts] [--exclude=hosts]\n"
 "              [--network=type] [--mem-per-cpu=MB] [--qos=qos] [--gres=list]\n"
 "              [--cpu_bind=...] [--mem_bind=...] [--reservation=name]\n"
+"              [--switch=max-switches{@max-time-to-wait}]\n"
 "              [--export[=names]] executable [args...]\n");
 }
 
@@ -2740,7 +2834,7 @@ static void _help(void)
 "      --mail-user=user        who to send email notification for job state\n"
 "                              changes\n"
 "  -n, --ntasks=ntasks         number of tasks to run\n"
-"      --nice[=value]          decrease secheduling priority by value\n"
+"      --nice[=value]          decrease scheduling priority by value\n"
 "      --no-requeue            if set, do not permit the job to be requeued\n"
 "      --ntasks-per-node=n     number of tasks to invoke on each node\n"
 "  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
@@ -2756,6 +2850,9 @@ static void _help(void)
 "  -s, --share                 share nodes with other jobs\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
 "  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
+"      --wrap[=command string] wrap commmand string in a sh script and submit\n"
+"      --switch=max-switches{@max-time-to-wait}\n"
+"                              Optimum switches and max time to wait for optimum\n"
 "\n"
 "Constraint options:\n"
 "      --contiguous            demand a contiguous range of nodes\n"
diff --git a/src/sbatch/opt.h b/src/sbatch/opt.h
index f36eb4500..556cae705 100644
--- a/src/sbatch/opt.h
+++ b/src/sbatch/opt.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -147,7 +147,7 @@ typedef struct sbatch_options {
 	uint16_t geometry[HIGHEST_DIMENSIONS]; /* --geometry, -g	*/
 	bool reboot;		/* --reboot			*/
 	bool no_rotate;		/* --no_rotate, -R		*/
-	uint16_t conn_type;	/* --conn-type 			*/
+	uint16_t conn_type[HIGHEST_DIMENSIONS];	/* --conn-type 	*/
 	char *blrtsimage;       /* --blrts-image BlrtsImage for block */
 	char *linuximage;       /* --linux-image LinuxImage for block */
 	char *mloaderimage;     /* --mloader-image mloaderImage for block */
@@ -168,6 +168,8 @@ typedef struct sbatch_options {
  	int ckpt_interval;	/* --checkpoint (int minutes)   */
  	char *ckpt_interval_str;/* --checkpoint (string)        */
  	char *ckpt_dir;		/* --checkpoint-dir (string)    */
+	int req_switch;		/* Minimum number of switches   */
+	int wait4switch;	/* Maximum time to wait for minimum switches */
 	char **spank_job_env;	/* SPANK controlled environment for job
 				 * Prolog and Epilog		*/
 	int spank_job_env_size;	/* size of spank_job_env	*/
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 4d8582340..e562f815e 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,7 +50,7 @@
 #include <sys/param.h>               /* MAXPATHLEN */
 #include <fcntl.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/env.h"
 #include "src/common/plugstack.h"
@@ -66,6 +66,7 @@
 
 static void  _env_merge_filter(job_desc_msg_t *desc);
 static int   _fill_job_desc_from_opts(job_desc_msg_t *desc);
+static int   _check_cluster_specific_settings(job_desc_msg_t *desc);
 static void *_get_script_buffer(const char *filename, int *size);
 static char *_script_wrap(char *command_string);
 static void  _set_exit_code(void);
@@ -152,6 +153,9 @@ int main(int argc, char *argv[])
 	if (sbatch_set_first_avail_cluster(&desc) != SLURM_SUCCESS)
 		exit(error_exit);
 
+	if (_check_cluster_specific_settings(&desc) != SLURM_SUCCESS)
+		exit(error_exit);
+
 	while (slurm_submit_batch_job(&desc, &resp) < 0) {
 		static char *msg;
 
@@ -220,9 +224,35 @@ static void _env_merge_filter(job_desc_msg_t *desc)
 	xfree(tmp);
 }
 
+/* Returns SLURM_ERROR if settings are invalid for chosen cluster */
+static int _check_cluster_specific_settings(job_desc_msg_t *req)
+{
+	int rc = SLURM_SUCCESS;
+
+	if (is_cray_system()) {
+		/*
+		 * Fix options and inform user, but do not abort submission.
+		 */
+		if (req->shared && req->shared != (uint16_t)NO_VAL) {
+			info("--share is not (yet) supported on Cray.");
+			req->shared = false;
+		}
+		if (req->overcommit && req->overcommit != (uint8_t)NO_VAL) {
+			info("--overcommit is not supported on Cray.");
+			req->overcommit = false;
+		}
+		if (req->wait_all_nodes && req->wait_all_nodes != (uint16_t)NO_VAL) {
+			info("--wait-all-nodes is handled automatically on Cray.");
+			req->wait_all_nodes = false;
+		}
+	}
+	return rc;
+}
+
 /* Returns 0 on success, -1 on failure */
 static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
+		int i;
 	extern char **environ;
 
 	if (opt.jobid_set)
@@ -284,16 +314,18 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.hold)
 		desc->priority     = 0;
 
-	if ((int)opt.geometry[0] > 0) {
-		int i;
+	if (opt.geometry[0] != (uint16_t) NO_VAL) {
 		int dims = slurmdb_setup_cluster_dims();
 
 		for (i=0; i<dims; i++)
 			desc->geometry[i] = opt.geometry[i];
 	}
 
-	if (opt.conn_type != (uint16_t) NO_VAL)
-		desc->conn_type = opt.conn_type;
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t)NO_VAL)
+			break;
+		desc->conn_type[i] = opt.conn_type[i];
+	}
 	if (opt.reboot)
 		desc->reboot = 1;
 	if (opt.no_rotate)
@@ -319,8 +351,10 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.overcommit) {
 		desc->min_cpus = MAX(opt.min_nodes, 1);
 		desc->overcommit = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		desc->min_cpus = opt.ntasks * opt.cpus_per_task;
+	else
+		desc->min_cpus = opt.ntasks;
 	desc->max_cpus = desc->max_cpus;
 
 	if (opt.ntasks_set)
@@ -398,6 +432,11 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->spank_job_env      = opt.spank_job_env;
 		desc->spank_job_env_size = opt.spank_job_env_size;
 	}
+	if (opt.req_switch >= 0)
+		desc->req_switch = opt.req_switch;
+	if (opt.wait4switch >= 0)
+		desc->wait4switch = opt.wait4switch;
+
 
 	return 0;
 }
diff --git a/src/sbcast/Makefile.in b/src/sbcast/Makefile.in
index 2d15ad1e9..c7cfd7cfe 100644
--- a/src/sbcast/Makefile.in
+++ b/src/sbcast/Makefile.in
@@ -67,6 +67,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sbcast/agent.c b/src/sbcast/agent.c
index 0a9ce8af8..9c3864c0e 100644
--- a/src/sbcast/agent.c
+++ b/src/sbcast/agent.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,8 +47,8 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
 
+#include "slurm/slurm_errno.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
diff --git a/src/sbcast/opts.c b/src/sbcast/opts.c
index 155c325ee..b2f14e9ea 100644
--- a/src/sbcast/opts.c
+++ b/src/sbcast/opts.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sbcast/sbcast.c b/src/sbcast/sbcast.c
index fa70eeb0c..b53d45564 100644
--- a/src/sbcast/sbcast.c
+++ b/src/sbcast/sbcast.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,10 +47,10 @@
 #include <stdlib.h>
 #include <time.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 
+#include "slurm/slurm_errno.h"
 #include "src/common/forward.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
diff --git a/src/sbcast/sbcast.h b/src/sbcast/sbcast.h
index e327b903a..d7caa5966 100644
--- a/src/sbcast/sbcast.h
+++ b/src/sbcast/sbcast.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,9 +44,9 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <src/common/macros.h>
-#include <src/common/slurm_protocol_defs.h>
+#include "slurm/slurm.h"
+#include "src/common/macros.h"
+#include "src/common/slurm_protocol_defs.h"
 
 struct sbcast_parameters {
 	uint32_t block_size;
diff --git a/src/scancel/Makefile.in b/src/scancel/Makefile.in
index 802dbfd07..69a145d44 100644
--- a/src/scancel/Makefile.in
+++ b/src/scancel/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -121,7 +123,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -158,6 +163,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -215,6 +221,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -250,6 +257,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/scancel/opt.c b/src/scancel/opt.c
index b5177c79c..445e9f099 100644
--- a/src/scancel/opt.c
+++ b/src/scancel/opt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -208,7 +208,11 @@ static void _opt_default()
 	opt.account	= NULL;
 	opt.batch	= false;
 	opt.clusters    = NULL;
+#ifdef HAVE_FRONT_END
+	opt.ctld	= true;
+#else
 	opt.ctld	= false;
+#endif
 	opt.interactive	= false;
 	opt.job_cnt	= 0;
 	opt.job_name	= NULL;
@@ -361,11 +365,14 @@ static void _opt_args(int argc, char **argv)
 			break;
 		case (int)'M':
 			opt.ctld = true;
-			if(opt.clusters)
+			if (opt.clusters)
 				list_destroy(opt.clusters);
-			if(!(opt.clusters =
-			     slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+			opt.clusters = slurmdb_get_info_cluster(optarg);
+			if (!opt.clusters) {
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
diff --git a/src/scancel/scancel.c b/src/scancel/scancel.c
index 099e445a3..7e8920ff3 100644
--- a/src/scancel/scancel.c
+++ b/src/scancel/scancel.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,8 +57,9 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
+#include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
@@ -69,14 +70,16 @@
 #define MAX_THREADS 20
 
 
-static void _cancel_jobs (void);
+static void  _cancel_jobs (void);
 static void *_cancel_job_id (void *cancel_info);
 static void *_cancel_step_id (void *cancel_info);
 
 static int  _confirmation (int i, uint32_t step_id);
 static void _filter_job_records (void);
 static void _load_job_records (void);
-static int _verify_job_ids (void);
+static int  _multi_cluster(List clusters);
+static int  _proc_cluster(void);
+static int  _verify_job_ids (void);
 
 static job_info_msg_t * job_buffer_ptr = NULL;
 
@@ -107,9 +110,41 @@ main (int argc, char *argv[])
 		log_alter (log_opts, SYSLOG_FACILITY_DAEMON, NULL);
 	}
 
+	if (opt.clusters)
+		rc = _multi_cluster(opt.clusters);
+	else
+		rc = _proc_cluster();
+
+	exit (rc);
+}
+
+/* _multi_cluster - process job cancellation across a list of clusters */
+static int
+_multi_cluster(List clusters)
+{
+	ListIterator itr;
+	int rc = 0, rc2;
+
+	itr = list_iterator_create(clusters);
+	if (!itr)
+		fatal("list_iterator_create: malloc failure");
+	while ((working_cluster_rec = list_next(itr))) {
+		rc2 = _proc_cluster();
+		rc = MAX(rc, rc2);
+	}
+	list_iterator_destroy(itr);
+
+	return rc;
+}
+
+/* _proc_cluster - process job cancellation on a specific cluster */
+static int
+_proc_cluster(void)
+{
+	int rc;
+
 	_load_job_records();
 	rc = _verify_job_ids();
-
 	if ((opt.account) ||
 	    (opt.interactive) ||
 	    (opt.job_name) ||
@@ -120,14 +155,14 @@ main (int argc, char *argv[])
 	    (opt.state != JOB_END) ||
 	    (opt.user_name) ||
 	    (opt.wckey)) {
-		_filter_job_records ();
+		_filter_job_records();
 	}
 	_cancel_jobs ();
+	slurm_free_job_info_msg(job_buffer_ptr);
 
-	exit (rc);
+	return rc;
 }
 
-
 /* _load_job_records - load all job information for filtering and verification */
 static void
 _load_job_records (void)
@@ -455,7 +490,8 @@ _cancel_job_id (void *ci)
 		else
 			verbose("Signal %u to job %u", sig, job_id);
 
-		if ((sig == SIGKILL) || (!sig_set) || msg_to_ctld) {
+		if ((sig == SIGKILL) || (!sig_set) ||
+		    msg_to_ctld || opt.clusters) {
 			error_code = slurm_kill_job (job_id, sig,
 						     (uint16_t)opt.batch);
 		} else {
@@ -563,7 +599,6 @@ _confirmation (int i, uint32_t step_id)
 {
 	char in_line[128];
 	job_info_t *job_ptr = NULL;
-	char *line = NULL;
 
 	job_ptr = job_buffer_ptr->job_array ;
 	while (1) {
@@ -577,9 +612,8 @@ _confirmation (int i, uint32_t step_id)
 				job_ptr[i].partition);
 		}
 
-		/* we only set this here to avoid a warning.  We throw it away
-		   later. */
-		line = fgets (in_line, sizeof (in_line), stdin);
+		if (fgets(in_line, sizeof(in_line), stdin) == NULL)
+			continue;
 		if ((in_line[0] == 'y') || (in_line[0] == 'Y'))
 			return 1;
 		if ((in_line[0] == 'n') || (in_line[0] == 'N'))
diff --git a/src/scancel/scancel.h b/src/scancel/scancel.h
index 8211687bb..c192aa250 100644
--- a/src/scancel/scancel.h
+++ b/src/scancel/scancel.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/scontrol/Makefile.in b/src/scontrol/Makefile.in
index 03aae8fbf..eaab04ef8 100644
--- a/src/scontrol/Makefile.in
+++ b/src/scontrol/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -124,7 +126,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -161,6 +166,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -218,6 +224,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -253,6 +260,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/scontrol/create_res.c b/src/scontrol/create_res.c
index e751dd6f2..124ff16f1 100644
--- a/src/scontrol/create_res.c
+++ b/src/scontrol/create_res.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -130,6 +130,13 @@ static uint32_t _parse_flags(const char *flagstr, const char *msg)
 				outflags |= RESERVE_FLAG_NO_WEEKLY;
 			else
 				outflags |= RESERVE_FLAG_WEEKLY;
+		} else if (strncasecmp(curr, "License_Only", MAX(taglen,1))
+			   == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_LIC_ONLY;
+			else
+				outflags |= RESERVE_FLAG_LIC_ONLY;
 		} else {
 			error("Error parsing flags %s.  %s", flagstr, msg);
 			return 0xffffffff;
diff --git a/src/scontrol/info_block.c b/src/scontrol/info_block.c
index 26432e1a5..8a15627c1 100644
--- a/src/scontrol/info_block.c
+++ b/src/scontrol/info_block.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/scontrol/info_job.c b/src/scontrol/info_job.c
index 7161510c3..97caab696 100644
--- a/src/scontrol/info_job.c
+++ b/src/scontrol/info_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,8 +42,7 @@
 
 #include "scontrol.h"
 #include "src/common/stepd_api.h"
-#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 #define POLL_SLEEP	3	/* retry interval in seconds  */
 
@@ -760,8 +759,7 @@ static int _blocks_dealloc(void)
 		return -1;
 	}
 	for (i=0; i<new_bg_ptr->record_count; i++) {
-		if(new_bg_ptr->block_array[i].state
-		   == RM_PARTITION_DEALLOCATING) {
+		if(new_bg_ptr->block_array[i].state == BG_BLOCK_TERM) {
 			rc = 1;
 			break;
 		}
diff --git a/src/scontrol/info_node.c b/src/scontrol/info_node.c
index 09b53ef6d..d36a0e872 100644
--- a/src/scontrol/info_node.c
+++ b/src/scontrol/info_node.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -159,29 +159,28 @@ scontrol_print_node_list (char *node_list)
 
 	if (node_list == NULL) {
 		scontrol_print_node (NULL, node_info_ptr);
-	}
-	else {
-		if ( (host_list = hostlist_create (node_list)) ) {
+	} else {
+		if ((host_list = hostlist_create (node_list))) {
 			while ((this_node_name = hostlist_shift (host_list))) {
-				scontrol_print_node (this_node_name, node_info_ptr);
-				free (this_node_name);
+				scontrol_print_node(this_node_name,
+						    node_info_ptr);
+				free(this_node_name);
 			}
 
-			hostlist_destroy (host_list);
-		}
-		else {
+			hostlist_destroy(host_list);
+		} else {
 			exit_code = 1;
 			if (quiet_flag != 1) {
-				if (errno == EINVAL)
-					fprintf (stderr,
-					         "unable to parse node list %s\n",
-					         node_list);
-				else if (errno == ERANGE)
-					fprintf (stderr,
-					         "too many nodes in supplied range %s\n",
-					         node_list);
-				else
-					perror ("error parsing node list");
+				if (errno == EINVAL) {
+					fprintf(stderr,
+					        "unable to parse node list %s\n",
+					        node_list);
+				 } else if (errno == ERANGE) {
+					fprintf(stderr,
+					        "too many nodes in supplied range %s\n",
+					        node_list);
+				} else
+					perror("error parsing node list");
 			}
 		}
 	}
@@ -240,3 +239,146 @@ extern void	scontrol_print_topo (char *node_list)
 		      "node named %s", node_list);
 	}
 }
+
+/*
+ * Load current front_end table information into *node_buffer_pptr
+ */
+extern int
+scontrol_load_front_end(front_end_info_msg_t ** front_end_buffer_pptr)
+{
+	int error_code;
+	front_end_info_msg_t *front_end_info_ptr = NULL;
+
+	if (old_front_end_info_ptr) {
+		error_code = slurm_load_front_end (
+				old_front_end_info_ptr->last_update,
+				&front_end_info_ptr);
+		if (error_code == SLURM_SUCCESS)
+			slurm_free_front_end_info_msg (old_front_end_info_ptr);
+		else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
+			front_end_info_ptr = old_front_end_info_ptr;
+			error_code = SLURM_SUCCESS;
+			if (quiet_flag == -1) {
+				printf("slurm_load_front_end no change in "
+				       "data\n");
+			}
+		}
+	}
+	else
+		error_code = slurm_load_front_end((time_t) NULL,
+						  &front_end_info_ptr);
+
+	if (error_code == SLURM_SUCCESS) {
+		old_front_end_info_ptr = front_end_info_ptr;
+		*front_end_buffer_pptr = front_end_info_ptr;
+	}
+
+	return error_code;
+}
+
+/*
+ * scontrol_print_front_end - print the specified front_end node's information
+ * IN node_name - NULL to print all front_end node information
+ * IN node_ptr - pointer to front_end node table of information
+ * NOTE: call this only after executing load_front_end, called from
+ *	scontrol_print_front_end_list
+ * NOTE: To avoid linear searches, we remember the location of the
+ *	last name match
+ */
+extern void
+scontrol_print_front_end(char *node_name,
+			 front_end_info_msg_t  *front_end_buffer_ptr)
+{
+	int i, j, print_cnt = 0;
+	static int last_inx = 0;
+
+	for (j = 0; j < front_end_buffer_ptr->record_count; j++) {
+		if (node_name) {
+			i = (j + last_inx) % front_end_buffer_ptr->record_count;
+			if (!front_end_buffer_ptr->front_end_array[i].name ||
+			    strcmp(node_name, front_end_buffer_ptr->
+					      front_end_array[i].name))
+				continue;
+		} else if (front_end_buffer_ptr->front_end_array[j].name == NULL)
+			continue;
+		else
+			i = j;
+		print_cnt++;
+		slurm_print_front_end_table(stdout,
+					    &front_end_buffer_ptr->
+					    front_end_array[i],
+					    one_liner);
+
+		if (node_name) {
+			last_inx = i;
+			break;
+		}
+	}
+
+	if (print_cnt == 0) {
+		if (node_name) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				printf ("Node %s not found\n", node_name);
+		} else if (quiet_flag != 1)
+				printf ("No nodes in the system\n");
+	}
+}
+
+/*
+ * scontrol_print_front_end_list - print information about all front_end nodes
+ */
+extern void
+scontrol_print_front_end_list(char *node_list)
+{
+	front_end_info_msg_t *front_end_info_ptr = NULL;
+	int error_code;
+	hostlist_t host_list;
+	char *this_node_name;
+
+	error_code = scontrol_load_front_end(&front_end_info_ptr);
+	if (error_code) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			slurm_perror ("slurm_load_front_end error");
+		return;
+	}
+
+	if (quiet_flag == -1) {
+		char time_str[32];
+		slurm_make_time_str((time_t *)&front_end_info_ptr->last_update,
+			            time_str, sizeof(time_str));
+		printf ("last_update_time=%s, records=%d\n",
+			time_str, front_end_info_ptr->record_count);
+	}
+
+	if (node_list == NULL) {
+		scontrol_print_front_end(NULL, front_end_info_ptr);
+	} else {
+		if ((host_list = hostlist_create (node_list))) {
+			while ((this_node_name = hostlist_shift (host_list))) {
+				scontrol_print_front_end(this_node_name,
+							 front_end_info_ptr);
+				free(this_node_name);
+			}
+
+			hostlist_destroy(host_list);
+		} else {
+			exit_code = 1;
+			if (quiet_flag != 1) {
+				if (errno == EINVAL) {
+					fprintf(stderr,
+					        "unable to parse node list %s\n",
+					        node_list);
+				 } else if (errno == ERANGE) {
+					fprintf(stderr,
+					        "too many nodes in supplied range %s\n",
+					        node_list);
+				} else
+					perror("error parsing node list");
+			}
+		}
+	}
+
+	return;
+}
diff --git a/src/scontrol/info_part.c b/src/scontrol/info_part.c
index 880a82bba..0e8b8b3f8 100644
--- a/src/scontrol/info_part.c
+++ b/src/scontrol/info_part.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/scontrol/info_res.c b/src/scontrol/info_res.c
index aebd950f1..8555abec0 100644
--- a/src/scontrol/info_res.c
+++ b/src/scontrol/info_res.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index 6c79e6f70..3f207846d 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@
 \*****************************************************************************/
 
 #include "scontrol.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 #include "src/common/proc_args.h"
 
 #define OPT_LONG_HIDE   0x102
@@ -55,8 +55,10 @@ int input_words;	/* number of words of input permitted */
 int one_liner;		/* one record per line if =1 */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
 int verbosity;		/* count of "-v" options */
-uint32_t cluster_flags; /*what type of cluster are we talking to */
+uint32_t cluster_flags; /* what type of cluster are we talking to */
+
 block_info_msg_t *old_block_info_ptr = NULL;
+front_end_info_msg_t *old_front_end_info_ptr = NULL;
 job_info_msg_t *old_job_info_ptr = NULL;
 node_info_msg_t *old_node_info_ptr = NULL;
 partition_info_msg_t *old_part_info_ptr = NULL;
@@ -86,7 +88,7 @@ int
 main (int argc, char *argv[])
 {
 	int error_code = SLURM_SUCCESS, i, opt_char, input_field_count;
-	char **input_fields;
+	char **input_fields, *env_val;
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
 
 	int option_index;
@@ -117,6 +119,17 @@ main (int argc, char *argv[])
 
 	if (getenv ("SCONTROL_ALL"))
 		all_flag= 1;
+	if ((env_val = getenv("SLURM_CLUSTERS"))) {
+		if (!(clusters = slurmdb_get_info_cluster(env_val))) {
+			error("'%s' can't be reached now, "
+			      "or it is an invalid entry for "
+			      "SLURM_CLUSTERS.  Use 'sacctmgr --list "
+			      "cluster' to see available clusters.",
+			      env_val);
+			exit(1);
+		}
+		working_cluster_rec = list_peek(clusters);
+	}
 
 	while((opt_char = getopt_long(argc, argv, "adhM:oQvV",
 			long_options, &option_index)) != -1) {
@@ -141,13 +154,16 @@ main (int argc, char *argv[])
 			detail_flag = 0;
 			break;
 		case (int)'M':
-			if(clusters) {
+			if (clusters) {
 				list_destroy(clusters);
 				clusters = NULL;
 				working_cluster_rec = NULL;
 			}
-			if(!(clusters = slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+			if (!(clusters = slurmdb_get_info_cluster(optarg))) {
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -175,6 +191,8 @@ main (int argc, char *argv[])
 		}
 	}
 
+	if (clusters && (list_count(clusters) > 1))
+		fatal("Only one cluster can be used at a time with scontrol");
 	cluster_flags = slurmdb_setup_cluster_flags();
 
 	if (verbosity) {
@@ -205,7 +223,7 @@ main (int argc, char *argv[])
 			break;
 		error_code = _get_command (&input_field_count, input_fields);
 	}
-	if(clusters)
+	if (clusters)
 		list_destroy(clusters);
 	exit(exit_code);
 }
@@ -536,7 +554,7 @@ _process_command (int argc, char *argv[])
 {
 	int error_code = 0;
 	char *tag = argv[0];
-	int taglen = 0;
+	int tag_len = 0;
 
 	if (argc < 1) {
 		exit_code = 1;
@@ -544,14 +562,14 @@ _process_command (int argc, char *argv[])
 			fprintf(stderr, "no input");
 		return 0;
 	} else if(tag)
-		taglen = strlen(tag);
+		tag_len = strlen(tag);
 	else {
 		if (quiet_flag == -1)
 			fprintf(stderr, "input problem");
 		return 0;
 	}
 
-	if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
+	if (strncasecmp (tag, "abort", MAX(tag_len, 5)) == 0) {
 		/* require full command name */
 		if (argc > 2) {
 			exit_code = 1;
@@ -566,9 +584,9 @@ _process_command (int argc, char *argv[])
 				slurm_perror ("slurm_shutdown error");
 		}
 	}
-	else if (strncasecmp (tag, "all", MAX(taglen, 2)) == 0)
+	else if (strncasecmp (tag, "all", MAX(tag_len, 2)) == 0)
 		all_flag = 1;
-	else if (strncasecmp (tag, "completing", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "completing", MAX(tag_len, 2)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -577,23 +595,32 @@ _process_command (int argc, char *argv[])
 		}
 		scontrol_print_completing();
 	}
-	else if (strncasecmp (tag, "cluster", MAX(taglen, 2)) == 0) {
-		if(clusters) {
+	else if (strncasecmp (tag, "cluster", MAX(tag_len, 2)) == 0) {
+		if (clusters) {
 			list_destroy(clusters);
 			clusters = NULL;
 			working_cluster_rec = NULL;
 		}
 		if (argc >= 2) {
-			if(!(clusters = slurmdb_get_info_cluster(argv[1]))) {
-				error("'%s' invalid entry for --cluster",
+			if (!(clusters = slurmdb_get_info_cluster(argv[1]))) {
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
 			working_cluster_rec = list_peek(clusters);
+			if (list_count(clusters) > 1) {
+				fatal("Only one cluster can be used at a time "
+				      "with scontrol");
+			}
 		}
 		cluster_flags = slurmdb_setup_cluster_flags();
 		slurm_free_block_info_msg(old_block_info_ptr);
 		old_block_info_ptr = NULL;
+		slurm_free_front_end_info_msg(old_front_end_info_ptr);
+		old_front_end_info_ptr = NULL;
 		slurm_free_job_info_msg(old_job_info_ptr);
 		old_job_info_ptr = NULL;
 		slurm_free_node_info_msg(old_node_info_ptr);
@@ -617,7 +644,7 @@ _process_command (int argc, char *argv[])
 		/* if(old_slurm_ctl_conf_ptr) */
 		/* 	old_slurm_ctl_conf_ptr->last_update = 0; */
 	}
-	else if (strncasecmp (tag, "create", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "create", MAX(tag_len, 2)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, "too few arguments for %s keyword\n",
@@ -626,7 +653,7 @@ _process_command (int argc, char *argv[])
 		}
 		_create_it ((argc - 1), &argv[1]);
 	}
-	else if (strncasecmp (tag, "details", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "details", MAX(tag_len, 1)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -636,7 +663,7 @@ _process_command (int argc, char *argv[])
 		}
 		detail_flag = 1;
 	}
-	else if (strncasecmp (tag, "exit", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "exit", MAX(tag_len, 1)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -645,7 +672,7 @@ _process_command (int argc, char *argv[])
 		}
 		exit_flag = 1;
 	}
-	else if (strncasecmp (tag, "help", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "help", MAX(tag_len, 2)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -654,11 +681,11 @@ _process_command (int argc, char *argv[])
 		}
 		_usage ();
 	}
-	else if (strncasecmp (tag, "hide", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "hide", MAX(tag_len, 2)) == 0) {
 		all_flag = 0;
 		detail_flag = 0;
 	}
-	else if (strncasecmp (tag, "oneliner", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "oneliner", MAX(tag_len, 1)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -667,7 +694,7 @@ _process_command (int argc, char *argv[])
 		}
 		one_liner = 1;
 	}
-	else if (strncasecmp (tag, "pidinfo", MAX(taglen, 3)) == 0) {
+	else if (strncasecmp (tag, "pidinfo", MAX(tag_len, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -681,7 +708,7 @@ _process_command (int argc, char *argv[])
 		} else
 			scontrol_pid_info ((pid_t) atol (argv[1]) );
 	}
-	else if (strncasecmp (tag, "ping", MAX(taglen, 3)) == 0) {
+	else if (strncasecmp (tag, "ping", MAX(tag_len, 3)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -691,7 +718,7 @@ _process_command (int argc, char *argv[])
 		_print_ping ();
 	}
 	else if ((strncasecmp (tag, "\\q", 2) == 0) ||
-		 (strncasecmp (tag, "quiet", MAX(taglen, 4)) == 0)) {
+		 (strncasecmp (tag, "quiet", MAX(tag_len, 4)) == 0)) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, "too many arguments for keyword:%s\n",
@@ -699,7 +726,7 @@ _process_command (int argc, char *argv[])
 		}
 		quiet_flag = 1;
 	}
-	else if (strncasecmp (tag, "quit", MAX(taglen, 4)) == 0) {
+	else if (strncasecmp (tag, "quit", MAX(tag_len, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -708,7 +735,7 @@ _process_command (int argc, char *argv[])
 		}
 		exit_flag = 1;
 	}
-	else if (strncasecmp (tag, "reconfigure", MAX(taglen, 3)) == 0) {
+	else if (strncasecmp (tag, "reconfigure", MAX(tag_len, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			fprintf (stderr, "too many arguments for keyword:%s\n",
@@ -721,7 +748,7 @@ _process_command (int argc, char *argv[])
 				slurm_perror ("slurm_reconfigure error");
 		}
 	}
-	else if (strncasecmp (tag, "checkpoint", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "checkpoint", MAX(tag_len, 2)) == 0) {
 		if (argc > 5) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -747,7 +774,7 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (tag, "requeue", MAX(taglen, 3)) == 0) {
+	else if (strncasecmp (tag, "requeue", MAX(tag_len, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -773,7 +800,7 @@ _process_command (int argc, char *argv[])
 	else if ((strncasecmp (tag, "hold",  4) == 0) ||
 		 (strncasecmp (tag, "holdu", 5) == 0) ||
 		 (strncasecmp (tag, "uhold", 5) == 0) ||
-		 (strncasecmp (tag, "release", MAX(taglen, 3)) == 0)) {
+	         (strncasecmp (tag, "release", MAX(tag_len, 3)) == 0)) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -796,8 +823,8 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if ((strncasecmp (tag, "suspend", MAX(taglen, 2)) == 0) ||
-		 (strncasecmp (tag, "resume", MAX(taglen, 3)) == 0)) {
+	else if ((strncasecmp (tag, "suspend", MAX(tag_len, 2)) == 0) ||
+	         (strncasecmp (tag, "resume", MAX(tag_len, 3)) == 0)) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -820,15 +847,17 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (tag, "wait_job", MAX(taglen, 2)) == 0) {
-		if (argc > 2) {
+	else if (strncasecmp (tag, "wait_job", MAX(tag_len, 2)) == 0) {
+		if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+			fprintf(stderr,
+				"wait_job is handled automatically on Cray.\n");
+		} else if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
 					"too many arguments for keyword:%s\n",
 					tag);
-		}
-		else if (argc < 2) {
+		} else if (argc < 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
@@ -840,7 +869,65 @@ _process_command (int argc, char *argv[])
 				exit_code = 1;
 		}
 	}
-	else if (strncasecmp (tag, "setdebug", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "setdebugflags", MAX(tag_len, 9)) == 0) {
+		if (argc > 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr,
+					"too many arguments for keyword:%s\n",
+					tag);
+		} else if (argc < 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr,
+					"too few arguments for keyword:%s\n",
+					tag);
+		} else {
+			int i, mode = 0;
+			uint32_t debug_flags_plus  = 0;
+			uint32_t debug_flags_minus = 0, flags;
+
+			for (i = 1; i < argc; i++) {
+				if (argv[i][0] == '+')
+					mode = 1;
+				else if (argv[i][0] == '-')
+					mode = -1;
+				else {
+					mode = 0;
+					break;
+				}
+				flags = debug_str2flags(&argv[i][1]);
+				if (flags == NO_VAL)
+					break;
+				if (mode == 1)
+					debug_flags_plus  |= flags;
+				else
+					debug_flags_minus |= flags;
+			}
+			if (i < argc) {
+				exit_code = 1;
+				if (quiet_flag != 1) {
+					fprintf(stderr, "invalid debug "
+						"flag: %s\n", argv[i]);
+				}
+				if ((quiet_flag != 1) &&  (mode = 0)) {
+					fprintf(stderr, "Usage: setdebugflags"
+						" [+|-]NAME\n");
+				}
+			} else {
+				error_code = slurm_set_debugflags(
+					debug_flags_plus, debug_flags_minus);
+				if (error_code) {
+					exit_code = 1;
+					if (quiet_flag != 1)
+						slurm_perror(
+							"slurm_set_debug_flags"
+							" error");
+				}
+			}
+		}
+	}
+	else if (strncasecmp (tag, "setdebug", MAX(tag_len, 2)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -893,7 +980,7 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (tag, "schedloglevel", MAX(taglen, 2)) == 0) {
+	else if (strncasecmp (tag, "schedloglevel", MAX(tag_len, 2)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -943,10 +1030,10 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (tag, "show", MAX(taglen, 3)) == 0) {
+	else if (strncasecmp (tag, "show", MAX(tag_len, 3)) == 0) {
 		_show_it (argc, argv);
 	}
-	else if (strncasecmp (tag, "takeover", MAX(taglen, 8)) == 0) {
+	else if (strncasecmp (tag, "takeover", MAX(tag_len, 8)) == 0) {
 		char *secondary = NULL;
 		slurm_ctl_conf_info_msg_t  *slurm_ctl_conf_ptr = NULL;
 
@@ -967,7 +1054,7 @@ _process_command (int argc, char *argv[])
 		}
 		xfree(secondary);
 	}
-	else if (strncasecmp (tag, "shutdown", MAX(taglen, 8)) == 0) {
+	else if (strncasecmp (tag, "shutdown", MAX(tag_len, 8)) == 0) {
 		/* require full command name */
 		uint16_t options = 0;
 		if (argc == 2) {
@@ -996,7 +1083,7 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (tag, "update", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "update", MAX(tag_len, 1)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, "too few arguments for %s keyword\n",
@@ -1005,7 +1092,7 @@ _process_command (int argc, char *argv[])
 		}
 		_update_it ((argc - 1), &argv[1]);
 	}
-	else if (strncasecmp (tag, "delete", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "delete", MAX(tag_len, 1)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, "too few arguments for %s keyword\n",
@@ -1014,7 +1101,7 @@ _process_command (int argc, char *argv[])
 		}
 		_delete_it ((argc - 1), &argv[1]);
 	}
-	else if (strncasecmp (tag, "verbose", MAX(taglen, 4)) == 0) {
+	else if (strncasecmp (tag, "verbose", MAX(tag_len, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -1023,7 +1110,7 @@ _process_command (int argc, char *argv[])
 		}
 		quiet_flag = -1;
 	}
-	else if (strncasecmp (tag, "version", MAX(taglen, 4)) == 0) {
+	else if (strncasecmp (tag, "version", MAX(tag_len, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -1032,7 +1119,7 @@ _process_command (int argc, char *argv[])
 		}
 		_print_version();
 	}
-	else if (strncasecmp (tag, "listpids", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "listpids", MAX(tag_len, 1)) == 0) {
 		if (argc > 3) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -1043,7 +1130,7 @@ _process_command (int argc, char *argv[])
 					    argc <= 2 ? NULL : argv[2]);
 		}
 	}
-	else if (strncasecmp (tag, "notify", MAX(taglen, 1)) == 0) {
+	else if (strncasecmp (tag, "notify", MAX(tag_len, 1)) == 0) {
 		if (argc < 3) {
 			exit_code = 1;
 			fprintf (stderr,
@@ -1078,18 +1165,18 @@ _create_it (int argc, char *argv[])
 	for (i=0; i<argc; i++) {
 		char *tag = argv[i];
 		char *val = strchr(argv[i], '=');
-		int taglen;
+		int tag_len;
 
 		if (val) {
-			taglen = val - argv[i];
+			tag_len = val - argv[i];
 			val++;
 		} else {
-			taglen = strlen(tag);
+			tag_len = strlen(tag);
 		}
-		if (!strncasecmp(tag, "ReservationName", MAX(taglen, 3))) {
+		if (!strncasecmp(tag, "ReservationName", MAX(tag_len, 3))) {
 			error_code = scontrol_create_res(argc, argv);
 			break;
-		} else if (!strncasecmp(tag, "PartitionName", MAX(taglen, 3))) {
+		} else if (!strncasecmp(tag, "PartitionName", MAX(tag_len, 3))) {
 			error_code = scontrol_create_part(argc, argv);
 			break;
 		}
@@ -1114,7 +1201,7 @@ static void
 _delete_it (int argc, char *argv[])
 {
 	char *tag = NULL, *val = NULL;
-	int taglen = 0;
+	int tag_len = 0;
 
 	if (argc != 1) {
 		error("Only one option follows delete.  %d given.", argc);
@@ -1125,7 +1212,7 @@ _delete_it (int argc, char *argv[])
 	tag = argv[0];
 	val = strchr(argv[0], '=');
 	if (val) {
-		taglen = val - argv[0];
+		tag_len = val - argv[0];
 		val++;
 	} else {
 		error("Proper format is 'delete Partition=p'"
@@ -1135,7 +1222,7 @@ _delete_it (int argc, char *argv[])
 	}
 
 	/* First identify the entity type to delete */
-	if (strncasecmp (tag, "PartitionName", MAX(taglen, 3)) == 0) {
+	if (strncasecmp (tag, "PartitionName", MAX(tag_len, 3)) == 0) {
 		delete_part_msg_t part_msg;
 		part_msg.name = val;
 		if (slurm_delete_partition(&part_msg)) {
@@ -1143,7 +1230,7 @@ _delete_it (int argc, char *argv[])
 			snprintf(errmsg, 64, "delete_partition %s", argv[0]);
 			slurm_perror(errmsg);
 		}
-	} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 3)) == 0) {
+	} else if (strncasecmp (tag, "ReservationName", MAX(tag_len, 3)) == 0) {
 		reservation_name_msg_t   res_msg;
 		res_msg.name = val;
 		if (slurm_delete_reservation(&res_msg)) {
@@ -1151,12 +1238,12 @@ _delete_it (int argc, char *argv[])
 			snprintf(errmsg, 64, "delete_reservation %s", argv[0]);
 			slurm_perror(errmsg);
 		}
-	} else if (strncasecmp (tag, "BlockName", MAX(taglen, 3)) == 0) {
+	} else if (strncasecmp (tag, "BlockName", MAX(tag_len, 3)) == 0) {
 		if(cluster_flags & CLUSTER_FLAG_BG) {
 			update_block_msg_t   block_msg;
 			slurm_init_update_block_msg ( &block_msg );
 			block_msg.bg_block_id = val;
-			block_msg.state = RM_PARTITION_NAV;
+			block_msg.state = BG_BLOCK_NAV;
 			if (slurm_update_block(&block_msg)) {
 				char errmsg[64];
 				snprintf(errmsg, 64, "delete_block %s",
@@ -1184,7 +1271,7 @@ static void
 _show_it (int argc, char *argv[])
 {
 	char *tag = NULL, *val = NULL;
-	int taglen = 0;
+	int tag_len = 0;
 
 	if (argc > 3) {
 		exit_code = 1;
@@ -1203,10 +1290,10 @@ _show_it (int argc, char *argv[])
 	}
 
 	tag = argv[1];
-	taglen = strlen(tag);
+	tag_len = strlen(tag);
 	val = strchr(argv[1], '=');
 	if (val) {
-		taglen = val - argv[1];
+		tag_len = val - argv[1];
 		val++;
 	} else if (argc == 3) {
 		val = argv[2];
@@ -1214,11 +1301,16 @@ _show_it (int argc, char *argv[])
 		val = NULL;
 	}
 
-	if (strncasecmp (tag, "blocks", MAX(taglen, 1)) == 0) {
+	if (strncasecmp (tag, "aliases", MAX(tag_len, 1)) == 0) {
+		if (val)
+			_print_aliases (val);
+		else
+			_print_aliases (NULL);
+	} else if (strncasecmp (tag, "blocks", MAX(tag_len, 1)) == 0) {
 		scontrol_print_block (val);
-	} else if (strncasecmp (tag, "config", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "config", MAX(tag_len, 1)) == 0) {
 		_print_config (val);
-	} else if (strncasecmp (tag, "daemons", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "daemons", MAX(tag_len, 1)) == 0) {
 		if (val) {
 			exit_code = 1;
 			if (quiet_flag != 1)
@@ -1227,39 +1319,36 @@ _show_it (int argc, char *argv[])
 					argv[0]);
 		}
 		_print_daemons ();
-	} else if (strncasecmp (tag, "aliases", MAX(taglen, 1)) == 0) {
-		if (val)
-			_print_aliases (val);
-		else
-			_print_aliases (NULL);
-	} else if (strncasecmp (tag, "jobs", MAX(taglen, 1)) == 0 ||
-		   strncasecmp (tag, "jobid", MAX(taglen, 1)) == 0 ) {
-		scontrol_print_job (val);
-	} else if (strncasecmp (tag, "hostnames", MAX(taglen, 5)) == 0) {
+	} else if (strncasecmp (tag, "FrontendName",  MAX(tag_len, 1)) == 0) {
+		scontrol_print_front_end_list(val);
+	} else if (strncasecmp (tag, "hostnames", MAX(tag_len, 5)) == 0) {
 		if (val)
 			scontrol_print_hosts(val);
 		else
 			scontrol_print_hosts(getenv("SLURM_NODELIST"));
-	} else if (strncasecmp (tag, "hostlist", MAX(taglen, 5)) == 0) {
+	} else if (strncasecmp (tag, "hostlist", MAX(tag_len, 5)) == 0) {
 		if (!val) {
 			exit_code = 1;
 			fprintf(stderr, "invalid encode argument\n");
 			_usage();
 		} else if (scontrol_encode_hostlist(val))
 			exit_code = 1;
-	} else if (strncasecmp (tag, "nodes", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "jobs", MAX(tag_len, 1)) == 0 ||
+		   strncasecmp (tag, "jobid", MAX(tag_len, 1)) == 0 ) {
+		scontrol_print_job (val);
+	} else if (strncasecmp (tag, "nodes", MAX(tag_len, 1)) == 0) {
 		scontrol_print_node_list (val);
-	} else if (strncasecmp (tag, "partitions", MAX(taglen, 1)) == 0 ||
-		   strncasecmp (tag, "partitionname", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "partitions", MAX(tag_len, 1)) == 0 ||
+		   strncasecmp (tag, "partitionname", MAX(tag_len, 1)) == 0) {
 		scontrol_print_part (val);
-	} else if (strncasecmp (tag, "reservations", MAX(taglen, 1)) == 0 ||
-		   strncasecmp (tag, "reservationname", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "reservations", MAX(tag_len, 1)) == 0 ||
+		   strncasecmp (tag, "reservationname", MAX(tag_len, 1)) == 0) {
 		scontrol_print_res (val);
-	} else if (strncasecmp (tag, "slurmd", MAX(taglen, 2)) == 0) {
+	} else if (strncasecmp (tag, "slurmd", MAX(tag_len, 2)) == 0) {
 		_print_slurmd (val);
-	} else if (strncasecmp (tag, "steps", MAX(taglen, 2)) == 0) {
+	} else if (strncasecmp (tag, "steps", MAX(tag_len, 2)) == 0) {
 		scontrol_print_step (val);
-	} else if (strncasecmp (tag, "topology", MAX(taglen, 1)) == 0) {
+	} else if (strncasecmp (tag, "topology", MAX(tag_len, 1)) == 0) {
 		scontrol_print_topo (val);
 	} else {
 		exit_code = 1;
@@ -1283,39 +1372,43 @@ _update_it (int argc, char *argv[])
 {
 	char *val = NULL;
 	int i, error_code = SLURM_SUCCESS;
-	int nodetag=0, partag=0, jobtag=0;
-	int blocktag=0, subtag=0, restag=0;
-	int debugtag=0, steptag=0;
+	int node_tag = 0, part_tag = 0, job_tag = 0;
+	int block_tag = 0, sub_tag = 0, res_tag = 0;
+	int debug_tag = 0, step_tag = 0, front_end_tag = 0;
 
 	/* First identify the entity to update */
 	for (i=0; i<argc; i++) {
 		char *tag = argv[i];
-		int taglen = 0;
+		int tag_len = 0;
 		val = strchr(argv[i], '=');
 		if (!val)
 			continue;
-		taglen = val - argv[i];
+		tag_len = val - argv[i];
 		val++;
 
-		if (!strncasecmp(tag, "NodeName", MAX(taglen, 3))) {
-			nodetag=1;
-		} else if (!strncasecmp(tag, "PartitionName", MAX(taglen, 3))) {
-			partag=1;
-		} else if (!strncasecmp(tag, "JobId", MAX(taglen, 3))) {
-			jobtag=1;
-		} else if (!strncasecmp(tag, "StepId", MAX(taglen, 4))) {
-			steptag=1;
-		} else if (!strncasecmp(tag, "BlockName", MAX(taglen, 3))) {
-			blocktag=1;
-		} else if (!strncasecmp(tag, "SubBPName", MAX(taglen, 3))) {
-			subtag=1;
+		if (!strncasecmp(tag, "NodeName", MAX(tag_len, 3))) {
+			node_tag = 1;
+		} else if (!strncasecmp(tag, "PartitionName",
+					MAX(tag_len, 3))) {
+			part_tag = 1;
+		} else if (!strncasecmp(tag, "JobId", MAX(tag_len, 3))) {
+			job_tag = 1;
+		} else if (!strncasecmp(tag, "StepId", MAX(tag_len, 4))) {
+			step_tag = 1;
+		} else if (!strncasecmp(tag, "BlockName", MAX(tag_len, 3))) {
+			block_tag = 1;
+		} else if (!strncasecmp(tag, "SubBPName", MAX(tag_len, 3))) {
+			sub_tag = 1;
+		} else if (!strncasecmp(tag, "FrontendName",
+					MAX(tag_len, 2))) {
+			front_end_tag = 1;
 		} else if (!strncasecmp(tag, "ReservationName",
-					MAX(taglen, 3))) {
-			restag=1;
+					MAX(tag_len, 3))) {
+			res_tag = 1;
 		} else if (!strncasecmp(tag, "SlurmctldDebug",
-					MAX(taglen, 2))) {
-			debugtag=1;
-		}
+					MAX(tag_len, 2))) {
+			debug_tag= 1;
+		} 
 	}
 
 	/* The order of tests matters here.  An update job request can include
@@ -1325,21 +1418,23 @@ _update_it (int argc, char *argv[])
 	 * partition tag.  The order of the rest doesn't matter because there
 	 * aren't any other duplicate tags.  */
 
-	if (jobtag)
+	if (job_tag)
 		error_code = scontrol_update_job (argc, argv);
-	else if (steptag)
+	else if (step_tag)
 		error_code = scontrol_update_step (argc, argv);
-	else if (restag)
+	else if (res_tag)
 		error_code = scontrol_update_res (argc, argv);
-	else if (nodetag)
+	else if (node_tag)
 		error_code = scontrol_update_node (argc, argv);
-	else if (partag)
+	else if (front_end_tag)
+		error_code = scontrol_update_front_end (argc, argv);
+	else if (part_tag)
 		error_code = scontrol_update_part (argc, argv);
-	else if (blocktag)
+	else if (block_tag)
 		error_code = _update_bluegene_block (argc, argv);
-	else if (subtag)
+	else if (sub_tag)
 		error_code = _update_bluegene_subbp (argc, argv);
-	else if (debugtag)
+	else if (debug_tag)
 		error_code = _update_slurmctld_debug(val);
 	else {
 		exit_code = 1;
@@ -1384,10 +1479,10 @@ _update_bluegene_block (int argc, char *argv[])
 	for (i=0; i<argc; i++) {
 		char *tag = argv[i];
 		char *val = strchr(argv[i], '=');
-		int taglen = 0, vallen = 0;
+		int tag_len = 0, vallen = 0;
 
 		if (val) {
-			taglen = val - argv[i];
+			tag_len = val - argv[i];
 			val++;
 			vallen = strlen(val);
 		} else {
@@ -1398,19 +1493,19 @@ _update_bluegene_block (int argc, char *argv[])
 			return 0;
 		}
 
-		if (!strncasecmp(tag, "BlockName", MAX(taglen, 2))) {
+		if (!strncasecmp(tag, "BlockName", MAX(tag_len, 2))) {
 			block_msg.bg_block_id = val;
-		} else if (!strncasecmp(tag, "State", MAX(taglen, 2))) {
+		} else if (!strncasecmp(tag, "State", MAX(tag_len, 2))) {
 			if (!strncasecmp(val, "ERROR", MAX(vallen, 1)))
-				block_msg.state = RM_PARTITION_ERROR;
+				block_msg.state = BG_BLOCK_ERROR_FLAG;
 			else if (!strncasecmp(val, "FREE", MAX(vallen, 1)))
-				block_msg.state = RM_PARTITION_FREE;
+				block_msg.state = BG_BLOCK_FREE;
 			else if (!strncasecmp(val, "RECREATE", MAX(vallen, 3)))
-				block_msg.state = RM_PARTITION_CONFIGURING;
+				block_msg.state = BG_BLOCK_BOOTING;
 			else if (!strncasecmp(val, "REMOVE", MAX(vallen, 3)))
-				block_msg.state = RM_PARTITION_NAV;
+				block_msg.state = BG_BLOCK_NAV;
 			else if (!strncasecmp(val, "RESUME", MAX(vallen, 3)))
-				block_msg.state = RM_PARTITION_DEALLOCATING;
+				block_msg.state = BG_BLOCK_TERM;
 			else {
 				exit_code = 1;
 				fprintf (stderr, "Invalid input: %s\n",
@@ -1435,7 +1530,7 @@ _update_bluegene_block (int argc, char *argv[])
 		return 0;
 	} else if (block_msg.state == (uint16_t)NO_VAL) {
 		error("You didn't give me a state to set %s to "
-		      "(i.e. FREE, ERROR).", block_msg.nodes);
+		      "(i.e. FREE, ERROR).", block_msg.mp_str);
 		return 0;
 	}
 
@@ -1471,10 +1566,10 @@ _update_bluegene_subbp (int argc, char *argv[])
 	for (i=0; i<argc; i++) {
 		char *tag = argv[i];
 		char *val = strchr(argv[i], '=');
-		int taglen = 0, vallen = 0;
+		int tag_len = 0, vallen = 0;
 
 		if (val) {
-			taglen = val - argv[i];
+			tag_len = val - argv[i];
 			val++;
 			vallen = strlen(val);
 		} else {
@@ -1484,13 +1579,13 @@ _update_bluegene_subbp (int argc, char *argv[])
 			return 0;
 		}
 
-		if (!strncasecmp(tag, "SubBPName", MAX(taglen, 2)))
-			block_msg.nodes = val;
-		else if (!strncasecmp(tag, "State", MAX(taglen, 2))) {
+		if (!strncasecmp(tag, "SubBPName", MAX(tag_len, 2)))
+			block_msg.mp_str = val;
+		else if (!strncasecmp(tag, "State", MAX(tag_len, 2))) {
 			if (!strncasecmp(val, "ERROR", MAX(vallen, 1)))
-				block_msg.state = RM_PARTITION_ERROR;
+				block_msg.state = BG_BLOCK_ERROR_FLAG;
 			else if (!strncasecmp(val, "FREE", MAX(vallen, 1)))
-				block_msg.state = RM_PARTITION_FREE;
+				block_msg.state = BG_BLOCK_FREE;
 			else {
 				exit_code = 1;
 				fprintf (stderr, "Invalid input: %s\n",
@@ -1508,12 +1603,12 @@ _update_bluegene_subbp (int argc, char *argv[])
 		}
 	}
 
-	if(!block_msg.nodes) {
+	if(!block_msg.mp_str) {
 		error("You didn't supply an ionode list.");
 		return 0;
 	} else if (block_msg.state == (uint16_t)NO_VAL) {
 		error("You didn't give me a state to set %s to "
-		      "(i.e. FREE, ERROR).", block_msg.nodes);
+		      "(i.e. FREE, ERROR).", block_msg.mp_str);
 		return 0;
 	}
 
@@ -1609,6 +1704,7 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
      requeue <job_id>         re-queue a batch job                         \n\
      resume <job_id>          resume previously suspended job (see suspend)\n\
      setdebug <level>         set slurmctld debug level                    \n\
+     setdebugflags [+|-]<flag>  add or remove slurmctld DebugFlags         \n\
      schedloglevel <slevel>   set scheduler log level                      \n\
      show <ENTITY> [<ID>]     display state of identified entity, default  \n\
 			      is all records.                              \n\
@@ -1625,9 +1721,9 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
 			      are booted and usable                        \n\
      !!                       Repeat the last command entered.             \n\
 									   \n\
-  <ENTITY> may be \"aliases\", \"config\", \"daemons\", \"job\",           \n\
-       \"node\", \"partition\", \"reservation\", \"hostlist\",             \n\
-       \"hostnames\", \"slurmd\", \"topology\", or \"step\"                \n\
+  <ENTITY> may be \"aliases\", \"config\", \"daemons\", \"frontend\",      \n\
+       \"hostlist\", \"hostnames\", \"job\", \"node\", \"partition\",      \n\
+       \"reservation\", \"slurmd\", \"step\", or \"topology\"              \n\
        (also for BlueGene only: \"block\" or \"subbp\").                   \n\
 									   \n\
   <ID> may be a configuration parameter name, job id, node name, partition \n\
diff --git a/src/scontrol/scontrol.h b/src/scontrol/scontrol.h
index 10c7f55d4..2d9de5d84 100644
--- a/src/scontrol/scontrol.h
+++ b/src/scontrol/scontrol.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -78,7 +78,7 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
@@ -103,9 +103,10 @@ extern int exit_flag;	/* program to terminate if =1 */
 extern int input_words;	/* number of words of input permitted */
 extern int one_liner;	/* one record per line if =1 */
 extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
-extern uint32_t cluster_flags; /*what type of cluster are we talking to */
+extern uint32_t cluster_flags; /* what type of cluster are we talking to */
 
 extern block_info_msg_t *old_block_info_ptr;
+extern front_end_info_msg_t *old_front_end_info_ptr;
 extern job_info_msg_t *old_job_info_ptr;
 extern node_info_msg_t *old_node_info_ptr;
 extern partition_info_msg_t *old_part_info_ptr;
@@ -123,6 +124,8 @@ extern int	scontrol_job_notify(int argc, char *argv[]);
 extern int	scontrol_job_ready(char *job_id_str);
 extern void	scontrol_list_pids(const char *jobid_str,
 				   const char *node_name);
+extern int	scontrol_load_front_end(front_end_info_msg_t **
+					front_end_buffer_pptr);
 extern int 	scontrol_load_jobs (job_info_msg_t ** job_buffer_pptr);
 extern int 	scontrol_load_nodes (node_info_msg_t ** node_buffer_pptr,
 				     uint16_t show_flags);
@@ -133,6 +136,10 @@ extern void	scontrol_pid_info(pid_t job_pid);
 extern void	scontrol_print_completing (void);
 extern void	scontrol_print_completing_job(job_info_t *job_ptr,
 					      node_info_msg_t *node_info_msg);
+extern void	scontrol_print_front_end_list(char *node_list);
+extern void	scontrol_print_front_end(char *node_name,
+					 front_end_info_msg_t  *
+					 front_end_buffer_ptr);
 extern void	scontrol_print_job (char * job_id_str);
 extern void	scontrol_print_hosts (char * node_list);
 extern void	scontrol_print_node (char *node_name,
@@ -145,6 +152,7 @@ extern void	scontrol_print_step (char *job_step_id_str);
 extern void	scontrol_print_topo (char *node_list);
 extern int	scontrol_requeue(char *job_step_id_str);
 extern int	scontrol_suspend(char *op, char *job_id_str);
+extern int	scontrol_update_front_end (int argc, char *argv[]);
 extern int	scontrol_update_job (int argc, char *argv[]);
 extern int	scontrol_update_node (int argc, char *argv[]);
 extern int	scontrol_update_part (int argc, char *argv[]);
diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c
index b4c1094c3..68b684f7d 100644
--- a/src/scontrol/update_job.c
+++ b/src/scontrol/update_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -61,7 +61,7 @@ extern int
 scontrol_checkpoint(char *op, char *job_step_id_str, int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	uint32_t job_id = 0, step_id = 0, step_id_set = 0;
+	uint32_t job_id = 0, step_id = 0;
 	char *next_str;
 	uint32_t ckpt_errno;
 	char *ckpt_strerror = NULL;
@@ -74,7 +74,6 @@ scontrol_checkpoint(char *op, char *job_step_id_str, int argc, char *argv[])
 		if (next_str[0] == '.') {
 			step_id = (uint32_t) strtol (&next_str[1], &next_str,
 						     10);
-			step_id_set = 1;
 		} else
 			step_id = NO_VAL;
 		if (next_str[0] != '\0') {
@@ -200,6 +199,31 @@ _parse_restart_args(int argc, char **argv, uint16_t *stick, char **image_dir)
 	return 0;
 }
 
+/* Return the current time limit of the specified job_id or NO_VAL if the
+ * information is not available */
+static uint32_t _get_job_time(uint32_t job_id)
+{
+	uint32_t time_limit = NO_VAL;
+	int i, rc;
+	job_info_msg_t *resp;
+
+	rc = slurm_load_job(&resp, job_id, SHOW_ALL);
+	if (rc == SLURM_SUCCESS) {
+		for (i = 0; i < resp->record_count; i++) {
+			if (resp->job_array[i].job_id != job_id)
+				continue;	/* should not happen */
+			time_limit = resp->job_array[i].time_limit;
+			break;
+		}
+		slurm_free_job_info_msg(resp);
+	} else {
+		error("Could not load state information for job %u: %m",
+		      job_id);
+	}
+
+	return time_limit;
+}
+
 /*
  * scontrol_hold - perform some job hold/release operation
  * IN op - suspend/resume operation
@@ -217,6 +241,9 @@ scontrol_hold(char *op, char *job_id_str)
 
 	slurm_init_job_desc_msg (&job_msg);
 
+	/* set current user, needed e.g., for AllowGroups checks */
+	job_msg.user_id = getuid();
+
 	if (job_id_str) {
 		job_msg.job_id = (uint32_t) strtol(job_id_str, &next_str, 10);
 		if ((job_msg.job_id == 0) || (next_str[0] != '\0')) {
@@ -337,6 +364,9 @@ scontrol_update_job (int argc, char *argv[])
 
 	slurm_init_job_desc_msg (&job_msg);
 
+	/* set current user, needed e.g., for AllowGroups checks */
+	job_msg.user_id = getuid();
+
 	for (i=0; i<argc; i++) {
 		tag = argv[i];
 		val = strchr(argv[i], '=');
@@ -366,12 +396,39 @@ scontrol_update_job (int argc, char *argv[])
 			update_cnt++;
 		}
 		else if (strncasecmp(tag, "TimeLimit", MAX(taglen, 5)) == 0) {
-			int time_limit = time_str2mins(val);
+			bool incr, decr;
+			uint32_t job_current_time, time_limit;
+
+			incr = (val[0] == '+');
+			decr = (val[0] == '-');
+			if (incr || decr)
+				val++;
+			time_limit = time_str2mins(val);
 			if ((time_limit < 0) && (time_limit != INFINITE)) {
 				error("Invalid TimeLimit value");
 				exit_code = 1;
 				return 0;
 			}
+			if (incr || decr) {
+				job_current_time = _get_job_time(job_msg.
+								 job_id);
+				if (job_current_time == NO_VAL) {
+					exit_code = 1;
+					return 0;
+				}
+				if (incr) {
+					time_limit += job_current_time;
+				} else if (time_limit > job_current_time) {
+					error("TimeLimit decrement larger than"
+					      " current time limit (%u > %u)",
+					      time_limit, job_current_time);
+					exit_code = 1;
+					return 0;
+				} else {
+					time_limit = job_current_time -
+						     time_limit;
+				}
+			}
 			job_msg.time_limit = time_limit;
 			update_cnt++;
 		}
@@ -432,14 +489,22 @@ scontrol_update_job (int argc, char *argv[])
 		/* ReqNodes was replaced by NumNodes in SLURM version 2.1 */
 		else if ((strncasecmp(tag, "ReqNodes", MAX(taglen, 8)) == 0) ||
 		         (strncasecmp(tag, "NumNodes", MAX(taglen, 8)) == 0)) {
-			int rc = get_resource_arg_range(
-				val,
-				"requested node count",
-				(int *)&job_msg.min_nodes,
-				(int *)&job_msg.max_nodes,
-				false);
-			if(!rc)
-				return rc;
+			int min_nodes, max_nodes, rc;
+			if (strcmp(val, "0") == 0) {
+				job_msg.min_nodes = 0;
+			} else if (strcasecmp(val, "ALL") == 0) {
+				job_msg.min_nodes = INFINITE;
+			} else {
+				min_nodes = (int) job_msg.min_nodes;
+				max_nodes = (int) job_msg.max_nodes;
+				rc = get_resource_arg_range(
+						val, "requested node count",
+						&min_nodes, &max_nodes, false);
+				if (!rc)
+					return rc;
+				job_msg.min_nodes = (uint32_t) min_nodes;
+				job_msg.max_nodes = (uint32_t) max_nodes;
+			}
 			update_size = true;
 			update_cnt++;
 		}
@@ -508,6 +573,22 @@ scontrol_update_job (int argc, char *argv[])
 			job_msg.wckey = val;
 			update_cnt++;
 		}
+		else if (strncasecmp(tag, "Switches", MAX(taglen, 5)) == 0) {
+			char *sep_char;
+			job_msg.req_switch =
+				(uint32_t) strtol(val, &sep_char, 10);
+			update_cnt++;
+			if (sep_char && sep_char[0] == '@') {
+				job_msg.wait4switch = time_str2mins(sep_char+1)
+						      * 60;
+			}
+		}
+		else if (strncasecmp(tag, "wait-for-switch", MAX(taglen, 5))
+			 == 0) {
+			job_msg.wait4switch =
+				(uint32_t) strtol(val, (char **) NULL, 10);
+			update_cnt++;
+		}
 		else if (strncasecmp(tag, "Shared", MAX(taglen, 2)) == 0) {
 			if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
 				job_msg.shared = 1;
@@ -615,8 +696,8 @@ scontrol_update_job (int argc, char *argv[])
 			update_cnt++;
 		}
 		else if (strncasecmp(tag, "Conn-Type", MAX(taglen, 2)) == 0) {
-			job_msg.conn_type = verify_conn_type(val);
-			if(job_msg.conn_type != (uint16_t)NO_VAL)
+			verify_conn_type(val, job_msg.conn_type);
+			if(job_msg.conn_type[0] != (uint16_t)NO_VAL)
 				update_cnt++;
 		}
 		else if (strncasecmp(tag, "Licenses", MAX(taglen, 1)) == 0) {
@@ -625,8 +706,8 @@ scontrol_update_job (int argc, char *argv[])
 		}
 		else if (!strncasecmp(tag, "EligibleTime", MAX(taglen, 2)) ||
 			 !strncasecmp(tag, "StartTime",    MAX(taglen, 2))) {
-			if((job_msg.begin_time = parse_time(val, 0))) {
-				if(job_msg.begin_time < time(NULL))
+			if ((job_msg.begin_time = parse_time(val, 0))) {
+				if (job_msg.begin_time < time(NULL))
 					job_msg.begin_time = time(NULL);
 				update_cnt++;
 			}
@@ -652,11 +733,11 @@ scontrol_update_job (int argc, char *argv[])
 
 	if (slurm_update_job(&job_msg))
 		return slurm_get_errno ();
-	else {
-		if (update_size)
-			_update_job_size(job_msg.job_id);
-		return 0;
-	}
+
+	if (update_size)
+		_update_job_size(job_msg.job_id);
+
+	return SLURM_SUCCESS;
 }
 
 /*
diff --git a/src/scontrol/update_node.c b/src/scontrol/update_node.c
index a09ce158a..0879b67a7 100644
--- a/src/scontrol/update_node.c
+++ b/src/scontrol/update_node.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,30 +57,30 @@ scontrol_update_node (int argc, char *argv[])
 	update_node_msg_t node_msg;
 	char *reason_str = NULL;
 	char *tag, *val;
-	int taglen, vallen;
+	int tag_len, val_len;
 
 	slurm_init_update_node_msg(&node_msg);
 	for (i=0; i<argc; i++) {
 		tag = argv[i];
 		val = strchr(argv[i], '=');
 		if (val) {
-			taglen = val - argv[i];
+			tag_len = val - argv[i];
 			val++;
-			vallen = strlen(val);
+			val_len = strlen(val);
 		} else {
 			exit_code = 1;
 			error("Invalid input: %s  Request aborted", argv[i]);
 			return -1;
 		}
-		if (strncasecmp(tag, "NodeName", MAX(taglen, 1)) == 0)
+		if (strncasecmp(tag, "NodeName", MAX(tag_len, 1)) == 0)
 			node_msg.node_names = val;
-		else if (strncasecmp(tag, "Features", MAX(taglen, 1)) == 0) {
+		else if (strncasecmp(tag, "Features", MAX(tag_len, 1)) == 0) {
 			node_msg.features = val;
 			update_cnt++;
-		} else if (strncasecmp(tag, "Gres", MAX(taglen, 1)) == 0) {
+		} else if (strncasecmp(tag, "Gres", MAX(tag_len, 1)) == 0) {
 			node_msg.gres = val;
 			update_cnt++;
-		} else if (strncasecmp(tag, "Weight", MAX(taglen,1)) == 0) {
+		} else if (strncasecmp(tag, "Weight", MAX(tag_len,1)) == 0) {
 			/* Logic borrowed from function _handle_uint32 */
 			char *endptr;
 			unsigned long num;
@@ -115,7 +115,7 @@ scontrol_update_node (int argc, char *argv[])
 			}
 			node_msg.weight = num;
 			update_cnt++;
-		} else if (strncasecmp(tag, "Reason", MAX(taglen, 1)) == 0) {
+		} else if (strncasecmp(tag, "Reason", MAX(tag_len, 1)) == 0) {
 			int len = strlen(val);
 			reason_str = xmalloc(len+1);
 			if (*val == '"')
@@ -128,36 +128,44 @@ scontrol_update_node (int argc, char *argv[])
 				reason_str[len] = '\0';
 
 			node_msg.reason = reason_str;
-			if (getlogin() == NULL ||
-			    uid_from_string(getlogin(),
-					    &node_msg.reason_uid) < 0) {
+			if ((getlogin() == NULL) ||
+			    (uid_from_string(getlogin(),
+					     &node_msg.reason_uid) < 0)) {
 				node_msg.reason_uid = getuid();
 			}
 			update_cnt++;
 		}
-		else if (strncasecmp(tag, "State", MAX(taglen, 1)) == 0) {
+		else if (strncasecmp(tag, "State", MAX(tag_len, 1)) == 0) {
+			if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+				fprintf (stderr, "%s can not be changed through"
+					 " SLURM. Use native Cray tools such as"
+					 " xtprocadmin(8)\n", argv[i]);
+				fprintf (stderr, "Request aborted\n");
+				exit_code = 1;
+				goto done;
+			}
 			if (strncasecmp(val, "NoResp",
-				        MAX(vallen, 3)) == 0) {
+				        MAX(val_len, 3)) == 0) {
 				node_msg.node_state = NODE_STATE_NO_RESPOND;
 				update_cnt++;
 			} else if (strncasecmp(val, "DRAIN",
-				   MAX(vallen, 3)) == 0) {
+				   MAX(val_len, 3)) == 0) {
 				node_msg.node_state = NODE_STATE_DRAIN;
 				update_cnt++;
 			} else if (strncasecmp(val, "FAIL",
-				   MAX(vallen, 3)) == 0) {
+				   MAX(val_len, 3)) == 0) {
 				node_msg.node_state = NODE_STATE_FAIL;
 				update_cnt++;
 			} else if (strncasecmp(val, "RESUME",
-				   MAX(vallen, 3)) == 0) {
+				   MAX(val_len, 3)) == 0) {
 				node_msg.node_state = NODE_RESUME;
 				update_cnt++;
 			} else if (strncasecmp(val, "POWER_DOWN",
-				   MAX(vallen, 7)) == 0) {
+				   MAX(val_len, 7)) == 0) {
 				node_msg.node_state = NODE_STATE_POWER_SAVE;
 				update_cnt++;
 			} else if (strncasecmp(val, "POWER_UP",
-				   MAX(vallen, 7)) == 0) {
+				   MAX(val_len, 7)) == 0) {
 				node_msg.node_state = NODE_STATE_POWER_UP;
 				update_cnt++;
 			} else {
@@ -165,7 +173,7 @@ scontrol_update_node (int argc, char *argv[])
 				for (j = 0; j < NODE_STATE_END; j++) {
 					if (strncasecmp (node_state_string(j),
 							 val,
-							 MAX(vallen, 3)) == 0){
+							 MAX(val_len, 3)) == 0){
 						state_val = (uint16_t) j;
 						break;
 					}
@@ -225,3 +233,117 @@ done:	xfree(reason_str);
 	} else
 		return 0;
 }
+
+/*
+ * scontrol_update_front_end - update the slurm front_end node configuration
+ *	per the supplied arguments
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints
+ *			error message and returns 0
+ */
+extern int
+scontrol_update_front_end (int argc, char *argv[])
+{
+	int i, rc = 0, update_cnt = 0;
+	update_front_end_msg_t front_end_msg;
+	char *reason_str = NULL;
+	char *tag, *val;
+	int tag_len, val_len;
+
+	slurm_init_update_front_end_msg(&front_end_msg);
+	for (i=0; i<argc; i++) {
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			tag_len = val - argv[i];
+			val++;
+			val_len = strlen(val);
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s  Request aborted", argv[i]);
+			return -1;
+		}
+		if (strncasecmp(tag, "FrontendName", MAX(tag_len, 1)) == 0)
+			front_end_msg.name = val;
+		else if (strncasecmp(tag, "Reason", MAX(tag_len, 1)) == 0) {
+			int len = strlen(val);
+			reason_str = xmalloc(len+1);
+			if (*val == '"')
+				strcpy(reason_str, val+1);
+			else
+				strcpy(reason_str, val);
+
+			len = strlen(reason_str) - 1;
+			if ((len >= 0) && (reason_str[len] == '"'))
+				reason_str[len] = '\0';
+
+			front_end_msg.reason = reason_str;
+			if ((getlogin() == NULL) ||
+			    (uid_from_string(getlogin(),
+					     &front_end_msg.reason_uid) < 0)) {
+				front_end_msg.reason_uid = getuid();
+			}
+			update_cnt++;
+		}
+		else if (strncasecmp(tag, "State", MAX(tag_len, 1)) == 0) {
+			if (strncasecmp(val, "DRAIN",
+				   MAX(val_len, 3)) == 0) {
+				front_end_msg.node_state = NODE_STATE_DRAIN;
+				update_cnt++;
+			} else if (strncasecmp(val, "DOWN",
+				   MAX(val_len, 3)) == 0) {
+				front_end_msg.node_state = NODE_STATE_DOWN;
+				update_cnt++;
+			} else if (strncasecmp(val, "RESUME",
+				   MAX(val_len, 3)) == 0) {
+				front_end_msg.node_state = NODE_RESUME;
+				update_cnt++;
+			} else {
+				exit_code = 1;
+				fprintf(stderr,
+					 "Invalid input: %s\n"
+					 "Request aborted\n"
+					 "Valid states are: DRAIN RESUME\n",
+					 argv[i]);
+				goto done;
+			}
+		} else {
+			exit_code = 1;
+			fprintf(stderr, "Update of this parameter is not "
+				"supported: %s\n", argv[i]);
+			fprintf(stderr, "Request aborted\n");
+			goto done;
+		}
+	}
+
+	if ((front_end_msg.node_state == NODE_STATE_DOWN) &&
+	    ((front_end_msg.reason == NULL) ||
+	     (strlen(front_end_msg.reason) == 0))) {
+		fprintf (stderr, "You must specify a reason when DOWNING a "
+			"frontend node\nRequest aborted\n");
+		goto done;
+	}
+	if ((front_end_msg.node_state == NODE_STATE_DRAIN) &&
+	    ((front_end_msg.reason == NULL) ||
+	     (strlen(front_end_msg.reason) == 0))) {
+		fprintf (stderr, "You must specify a reason when DRAINING a "
+			"frontend node\nRequest aborted\n");
+		goto done;
+	}
+
+	if (update_cnt == 0) {
+		exit_code = 1;
+		fprintf(stderr, "No changes specified\n");
+		return 0;
+	}
+
+	rc = slurm_update_front_end(&front_end_msg);
+
+done:	xfree(reason_str);
+	if (rc) {
+		exit_code = 1;
+		return slurm_get_errno ();
+	} else
+		return 0;
+}
diff --git a/src/scontrol/update_part.c b/src/scontrol/update_part.c
index e356a9dae..88188ae6c 100644
--- a/src/scontrol/update_part.c
+++ b/src/scontrol/update_part.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -264,6 +264,36 @@ scontrol_parse_part_options (int argc, char *argv[], int *update_cnt_ptr,
 			part_msg_ptr->alternate = val;
 			(*update_cnt_ptr)++;
 		}
+		else if (strncasecmp(tag, "GraceTime", MAX(taglen, 5)) == 0) {
+			part_msg_ptr->grace_time = slurm_atoul(val);
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "DefMemPerCPU",
+				     MAX(taglen, 10)) == 0) {
+			part_msg_ptr->def_mem_per_cpu = (uint32_t) strtol(val,
+							(char **) NULL, 10);
+			part_msg_ptr->def_mem_per_cpu |= MEM_PER_CPU;
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "DefMemPerNode",
+				     MAX(taglen, 10)) == 0) {
+			part_msg_ptr->def_mem_per_cpu = (uint32_t) strtol(val,
+							(char **) NULL, 10);
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "MaxMemPerCPU",
+				     MAX(taglen, 10)) == 0) {
+			part_msg_ptr->max_mem_per_cpu = (uint32_t) strtol(val,
+							(char **) NULL, 10);
+			part_msg_ptr->max_mem_per_cpu |= MEM_PER_CPU;
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "MaxMemPerNode",
+				     MAX(taglen, 10)) == 0) {
+			part_msg_ptr->max_mem_per_cpu = (uint32_t) strtol(val,
+							(char **) NULL, 10);
+			(*update_cnt_ptr)++;
+		}
 		else {
 			exit_code = 1;
 			error("Update of this parameter is not "
diff --git a/src/scontrol/update_step.c b/src/scontrol/update_step.c
index 3b845bea5..82c8f884a 100644
--- a/src/scontrol/update_step.c
+++ b/src/scontrol/update_step.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,32 @@
 #include "scontrol.h"
 #include "src/common/proc_args.h"
 
+/* Return the current time limit of the specified job/step_id or NO_VAL if the
+ * information is not available */
+static uint32_t _get_step_time(uint32_t job_id, uint32_t step_id)
+{
+	uint32_t time_limit = NO_VAL;
+	int i, rc;
+	job_step_info_response_msg_t *resp;
+
+	rc = slurm_get_job_steps((time_t) 0, job_id, step_id, &resp, SHOW_ALL);
+	if (rc == SLURM_SUCCESS) {
+		for (i = 0; i < resp->job_step_count; i++) {
+			if ((resp->job_steps[i].job_id != job_id) ||
+			    (resp->job_steps[i].step_id != step_id))
+				continue;	/* should not happen */
+			time_limit = resp->job_steps[i].time_limit;
+			break;
+		}
+		slurm_free_job_step_info_response_msg(resp);
+	} else {
+		error("Could not load state information for step %u.%u: %m",
+		      job_id, step_id);
+	}
+
+	return time_limit;
+}
+
 /*
  * scontrol_update_step - update the slurm step configuration per the supplied
  *	arguments
@@ -52,7 +78,7 @@ extern int scontrol_update_step (int argc, char *argv[])
 {
 	int i, update_cnt = 0;
 	char *tag, *val;
-	int taglen, vallen;
+	int taglen;
 	step_update_request_msg_t step_msg;
 
 	slurm_init_update_step_msg (&step_msg);
@@ -63,7 +89,6 @@ extern int scontrol_update_step (int argc, char *argv[])
 		if (val) {
 			taglen = val - argv[i];
 			val++;
-			vallen = strlen(val);
 		} else {
 			exit_code = 1;
 			fprintf (stderr, "Invalid input: %s\n", argv[i]);
@@ -86,13 +111,41 @@ extern int scontrol_update_step (int argc, char *argv[])
 			} /* else apply to all steps of this job_id */
 		}
 		else if (strncasecmp(tag, "TimeLimit", MAX(taglen, 2)) == 0) {
-			int new_limit = time_str2mins(val);
-			if ((new_limit < 0) && (new_limit != INFINITE)) {
+			bool incr, decr;
+			uint32_t step_current_time, time_limit;
+
+			incr = (val[0] == '+');
+			decr = (val[0] == '-');
+			if (incr || decr)
+				val++;
+			time_limit = time_str2mins(val);
+			if ((time_limit < 0) && (time_limit != INFINITE)) {
 				error("Invalid TimeLimit value");
 				exit_code = 1;
 				return 0;
 			}
-			step_msg.time_limit = new_limit;
+			if (incr || decr) {
+				step_current_time = _get_step_time(
+							step_msg.job_id,
+							step_msg.step_id);
+				if (step_current_time == NO_VAL) {
+					exit_code = 1;
+					return 0;
+				}
+				if (incr) {
+					time_limit += step_current_time;
+				} else if (time_limit > step_current_time) {
+					error("TimeLimit decrement larger than"
+					      " current time limit (%u > %u)",
+					      time_limit, step_current_time);
+					exit_code = 1;
+					return 0;
+				} else {
+					time_limit = step_current_time -
+						     time_limit;
+				}
+			}
+			step_msg.time_limit = time_limit;
 			update_cnt++;
 		}
 		else {
diff --git a/src/sinfo/Makefile.in b/src/sinfo/Makefile.in
index 8f9b7bef5..78ece18c1 100644
--- a/src/sinfo/Makefile.in
+++ b/src/sinfo/Makefile.in
@@ -67,6 +67,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -123,7 +125,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -160,6 +165,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -217,6 +223,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -252,6 +259,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index 13edb8df1..b24810efc 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -126,7 +126,10 @@ extern void parse_command_line(int argc, char *argv[])
 		params.sort = xstrdup(env_val);
 	if ( ( env_val = getenv("SLURM_CLUSTERS") ) ) {
 		if (!(params.clusters = slurmdb_get_info_cluster(env_val))) {
-			error("'%s' invalid entry for SLURM_CLUSTERS",
+			error("'%s' can't be reached now, "
+			      "or it is an invalid entry for "
+			      "SLURM_CLUSTERS.  Use 'sacctmgr --list "
+			      "cluster' to see available clusters.",
 			      env_val);
 			exit(1);
 		}
@@ -181,7 +184,10 @@ extern void parse_command_line(int argc, char *argv[])
 				list_destroy(params.clusters);
 			if (!(params.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -559,6 +565,7 @@ _parse_format( char* format )
 					right_justify,
 					suffix );
 		} else if (field[0] == 'H') {
+			params.match_flags.reason_timestamp_flag = true;
 			format_add_timestamp( params.format_list,
 					      field_size,
 					      right_justify,
@@ -587,11 +594,23 @@ _parse_format( char* format )
 					field_size,
 					right_justify,
 					suffix );
+		} else if (field[0] == 'n') {
+			params.match_flags.hostnames_flag = true;
+			format_add_node_hostnames( params.format_list,
+					field_size,
+					right_justify,
+					suffix );
 		} else if (field[0] == 'N') {
 			format_add_node_list( params.format_list,
 					field_size,
 					right_justify,
 					suffix );
+		} else if (field[0] == 'o') {
+			params.match_flags.node_addr_flag = true;
+			format_add_node_address( params.format_list,
+					field_size,
+					right_justify,
+					suffix );
 		} else if (field[0] == 'p') {
 			params.match_flags.priority_flag = true;
 			format_add_priority( params.format_list,
@@ -640,11 +659,13 @@ _parse_format( char* format )
 					       right_justify,
 					       suffix );
 		} else if (field[0] == 'u') {
+			params.match_flags.reason_user_flag = true;
 			format_add_user( params.format_list,
 					field_size,
 					right_justify,
 					suffix );
 		} else if (field[0] == 'U') {
+			params.match_flags.reason_user_flag = true;
 			format_add_user_long( params.format_list,
 					      field_size,
 					      right_justify,
@@ -805,6 +826,10 @@ void _print_options( void )
 			"true" : "false");
 	printf("reason_flag     = %s\n", params.match_flags.reason_flag ?
 			"true" : "false");
+	printf("reason_timestamp_flag = %s\n",
+			params.match_flags.reason_timestamp_flag ?  "true" : "false");
+	printf("reason_user_flag = %s\n",
+			params.match_flags.reason_user_flag ?  "true" : "false");
 	printf("root_flag       = %s\n", params.match_flags.root_flag ?
 			"true" : "false");
 	printf("share_flag      = %s\n", params.match_flags.share_flag ?
diff --git a/src/sinfo/print.c b/src/sinfo/print.c
index 9a58f7343..0902cc333 100644
--- a/src/sinfo/print.c
+++ b/src/sinfo/print.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -550,6 +550,25 @@ int _print_memory(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_node_address(sinfo_data_t * sinfo_data, int width,
+			bool right_justify, char *suffix)
+{
+	if (sinfo_data) {
+		char *tmp = NULL;
+		tmp = hostlist_ranged_string_xmalloc(
+				sinfo_data->node_addr);
+		_print_str(tmp, width, right_justify, true);
+		xfree(tmp);
+	} else {
+		char *title = "NODE_ADDR";
+		_print_str(title, width, right_justify, false);
+	}
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_node_list(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
@@ -575,6 +594,28 @@ int _print_node_list(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_node_hostnames(sinfo_data_t * sinfo_data, int width,
+			  bool right_justify, char *suffix)
+{
+	if (params.node_field_flag)
+		width = params.node_field_size;
+
+	if (sinfo_data) {
+		char *tmp = NULL;
+		tmp = hostlist_ranged_string_xmalloc(
+				sinfo_data->hostnames);
+		_print_str(tmp, width, right_justify, true);
+		xfree(tmp);
+	} else {
+		char *title = "HOSTNAMES";
+		_print_str(title, width, right_justify, false);
+	}
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
 		   bool right_justify, char *suffix)
 {
diff --git a/src/sinfo/print.h b/src/sinfo/print.h
index 4513545e3..118a1e064 100644
--- a/src/sinfo/print.h
+++ b/src/sinfo/print.h
@@ -3,13 +3,13 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2011 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,7 +41,7 @@
 #ifndef _SINFO_PRINT_H_
 #define _SINFO_PRINT_H_
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/list.h"
 #include "src/sinfo/sinfo.h"
@@ -93,6 +93,10 @@ int  print_sinfo_list(List sinfo_list);
 	format_add_function(list,wid,right,suffix,_print_gres)
 #define format_add_memory(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_memory)
+#define format_add_node_address(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_node_address)
+#define format_add_node_hostnames(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_node_hostnames)
 #define format_add_node_list(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_node_list)
 #define format_add_nodes(list,wid,right,suffix) \
@@ -166,6 +170,10 @@ int _print_gres(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
 int _print_memory(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
+int _print_node_hostnames(sinfo_data_t * sinfo_data, int width,
+			  bool right_justify, char *suffix);
+int _print_node_address(sinfo_data_t * sinfo_data, int width,
+			bool right_justify, char *suffix);
 int _print_node_list(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
 int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index c11972b3f..336946538 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -3,13 +3,13 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2011 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,9 +48,6 @@
 #include "src/sinfo/sinfo.h"
 #include "src/sinfo/print.h"
 
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
-#include "src/plugins/select/bluegene/plugin/bluegene.h"
-
 /********************
  * Global Variables *
  ********************/
@@ -72,9 +69,9 @@ static bool _filter_out(node_info_t *node_ptr);
 static int  _get_info(bool clear_old);
 static void _sinfo_list_delete(void *data);
 static bool _match_node_data(sinfo_data_t *sinfo_ptr,
-                             node_info_t *node_ptr);
+			     node_info_t *node_ptr);
 static bool _match_part_data(sinfo_data_t *sinfo_ptr,
-                             partition_info_t* part_ptr);
+			     partition_info_t* part_ptr);
 static int  _multi_cluster(List clusters);
 static int  _query_server(partition_info_msg_t ** part_pptr,
 			  node_info_msg_t ** node_pptr,
@@ -182,21 +179,23 @@ static int _bg_report(block_info_msg_t *block_ptr)
 	}
 
 	if (!params.no_header)
-		printf("BG_BLOCK         NODES        OWNER    STATE    CONNECTION USE\n");
+		printf("BG_BLOCK         MIDPLANES       OWNER    STATE    CONNECTION USE\n");
 /*                      1234567890123456 123456789012 12345678 12345678 1234567890 12345+ */
 /*                      RMP_22Apr1544018 bg[123x456]  name     READY    TORUS      COPROCESSOR */
 
 	for (i=0; i<block_ptr->record_count; i++) {
-		printf("%-16.16s %-12.12s %-8.8s %-8.8s %-10.10s %s\n",
+		char *conn_str = conn_type_string_full(
+			block_ptr->block_array[i].conn_type);
+		printf("%-16.16s %-15.15s %-8.8s %-8.8s %-10.10s %s\n",
 		       block_ptr->block_array[i].bg_block_id,
-		       block_ptr->block_array[i].nodes,
+		       block_ptr->block_array[i].mp_str,
 		       block_ptr->block_array[i].owner_name,
 		       bg_block_state_string(
 			       block_ptr->block_array[i].state),
-		       conn_type_string(
-			       block_ptr->block_array[i].conn_type),
+		       conn_str,
 		       node_use_string(
 			       block_ptr->block_array[i].node_use));
+		xfree(conn_str);
 	}
 
 	return SLURM_SUCCESS;
@@ -207,7 +206,7 @@ static int _bg_report(block_info_msg_t *block_ptr)
  * part_pptr IN/OUT - partition information message
  * node_pptr IN/OUT - node information message
  * block_pptr IN/OUT - BlueGene block data
- * clear_old IN - If set, then always replace old data, needed when going 
+ * clear_old IN - If set, then always replace old data, needed when going
  *		  between clusters.
  * RET zero or error code
  */
@@ -502,6 +501,10 @@ static void _sort_hostlist(List sinfo_list)
 
 static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 {
+	if (params.match_flags.hostnames_flag ||
+	    params.match_flags.node_addr_flag)
+		return false;
+
 	if (sinfo_ptr->nodes &&
 	    params.match_flags.features_flag &&
 	    (_strcmp(node_ptr->features, sinfo_ptr->features)))
@@ -517,6 +520,17 @@ static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 	    (_strcmp(node_ptr->reason, sinfo_ptr->reason)))
 		return false;
 
+	if (sinfo_ptr->nodes &&
+	    params.match_flags.reason_timestamp_flag &&
+	    (node_ptr->reason_time != sinfo_ptr->reason_time))
+		return false;
+
+	if (sinfo_ptr->nodes &&
+	    params.match_flags.reason_user_flag &&
+	    node_ptr->reason_uid != sinfo_ptr->reason_uid) {
+		return false;
+	}
+
 	if (params.match_flags.state_flag) {
 		char *state1, *state2;
 		state1 = node_state_string(node_ptr->node_state);
@@ -562,7 +576,7 @@ static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 }
 
 static bool _match_part_data(sinfo_data_t *sinfo_ptr,
-                             partition_info_t* part_ptr)
+			     partition_info_t* part_ptr)
 {
 	if (part_ptr == sinfo_ptr->part_info) /* identical partition */
 		return true;
@@ -575,7 +589,7 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 
 	if (params.match_flags.groups_flag &&
 	    (_strcmp(part_ptr->allow_groups,
-	             sinfo_ptr->part_info->allow_groups)))
+		     sinfo_ptr->part_info->allow_groups)))
 		return false;
 
 	if (params.match_flags.job_size_flag &&
@@ -692,7 +706,9 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 			sinfo_ptr->max_weight = node_ptr->weight;
 	}
 
-	hostlist_push(sinfo_ptr->nodes, node_ptr->name);
+	hostlist_push(sinfo_ptr->nodes,     node_ptr->name);
+	hostlist_push(sinfo_ptr->node_addr, node_ptr->node_addr);
+	hostlist_push(sinfo_ptr->hostnames, node_ptr->node_hostname);
 
 	total_cpus = node_ptr->cpus;
 	total_nodes = node_scaling;
@@ -898,7 +914,9 @@ static sinfo_data_t *_create_sinfo(partition_info_t* part_ptr,
 
 	sinfo_ptr->part_info = part_ptr;
 	sinfo_ptr->part_inx = part_inx;
-	sinfo_ptr->nodes = hostlist_create("");
+	sinfo_ptr->nodes     = hostlist_create("");
+	sinfo_ptr->node_addr = hostlist_create("");
+	sinfo_ptr->hostnames = hostlist_create("");
 
 	if (node_ptr)
 		_update_sinfo(sinfo_ptr, node_ptr, node_scaling);
@@ -911,6 +929,8 @@ static void _sinfo_list_delete(void *data)
 	sinfo_data_t *sinfo_ptr = data;
 
 	hostlist_destroy(sinfo_ptr->nodes);
+	hostlist_destroy(sinfo_ptr->node_addr);
+	hostlist_destroy(sinfo_ptr->hostnames);
 	xfree(sinfo_ptr);
 }
 
@@ -925,4 +945,3 @@ static int _strcmp(char *data1, char *data2)
 		data2 = null_str;
 	return strcmp(data1, data2);
 }
-
diff --git a/src/sinfo/sinfo.h b/src/sinfo/sinfo.h
index 06844306e..9b5a88998 100644
--- a/src/sinfo/sinfo.h
+++ b/src/sinfo/sinfo.h
@@ -1,7 +1,5 @@
 /****************************************************************************\
  *  sinfo.h - definitions used for sinfo data functions
- *
- *  $Id$
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
@@ -11,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -62,7 +60,7 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
@@ -107,6 +105,8 @@ typedef struct {
 	time_t reason_time;
 	uint32_t reason_uid;
 
+	hostlist_t hostnames;
+	hostlist_t node_addr;
 	hostlist_t nodes;
 	hostlist_t ionodes;
 
@@ -129,10 +129,12 @@ struct sinfo_match_flags {
 	bool features_flag;
 	bool groups_flag;
 	bool gres_flag;
+	bool hostnames_flag;
 	bool job_size_flag;
 	bool default_time_flag;
 	bool max_time_flag;
 	bool memory_flag;
+	bool node_addr_flag;
 	bool partition_flag;
 	bool preempt_mode_flag;
 	bool priority_flag;
@@ -141,6 +143,8 @@ struct sinfo_match_flags {
 	bool share_flag;
 	bool state_flag;
 	bool weight_flag;
+	bool reason_timestamp_flag;
+	bool reason_user_flag;
 };
 
 /* Input parameters */
diff --git a/src/sinfo/sort.c b/src/sinfo/sort.c
index 24c8eac2b..231570b19 100644
--- a/src/sinfo/sort.c
+++ b/src/sinfo/sort.c
@@ -3,14 +3,14 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2011 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>,
  *             Morris Jette <jette1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -59,10 +59,12 @@ static int _sort_by_threads(void *void1, void *void2);
 static int _sort_by_disk(void *void1, void *void2);
 static int _sort_by_features(void *void1, void *void2);
 static int _sort_by_groups(void *void1, void *void2);
+static int _sort_by_hostnames(void *void1, void *void2);
 static int _sort_by_job_size(void *void1, void *void2);
 static int _sort_by_max_time(void *void1, void *void2);
 static int _sort_by_memory(void *void1, void *void2);
 static int _sort_by_node_list(void *void1, void *void2);
+static int _sort_by_node_addr(void *void1, void *void2);
 static int _sort_by_nodes_ai(void *void1, void *void2);
 static int _sort_by_nodes(void *void1, void *void2);
 static int _sort_by_partition(void *void1, void *void2);
@@ -127,8 +129,12 @@ void sort_sinfo_list(List sinfo_list)
 				list_sort(sinfo_list, _sort_by_memory);
 		else if (params.sort[i] == 'M')
 				list_sort(sinfo_list, _sort_by_preempt_mode);
+		else if (params.sort[i] == 'n')
+				list_sort(sinfo_list, _sort_by_hostnames);
 		else if (params.sort[i] == 'N')
 				list_sort(sinfo_list, _sort_by_node_list);
+		else if (params.sort[i] == 'o')
+				list_sort(sinfo_list, _sort_by_node_addr);
 		else if (params.sort[i] == 'p')
 				list_sort(sinfo_list, _sort_by_priority);
 		else if (params.sort[i] == 'P')
@@ -301,6 +307,116 @@ static int _sort_by_groups(void *void1, void *void2)
 	return diff;
 }
 
+static int _sort_by_node_addr(void *void1, void *void2)
+{
+	int diff = 0;
+	sinfo_data_t *sinfo1 = (sinfo_data_t *) void1;
+	sinfo_data_t *sinfo2 = (sinfo_data_t *) void2;
+	char *val1, *val2;
+#if	PURE_ALPHA_SORT == 0
+	int inx;
+#endif
+
+	val1 = hostlist_shift(sinfo1->node_addr);
+	if (val1) {
+		hostlist_push_host(sinfo1->node_addr, val1);
+		hostlist_sort(sinfo1->node_addr);
+	} else
+		val1 = "";
+
+	val2 = hostlist_shift(sinfo2->node_addr);
+	if (val2) {
+		hostlist_push_host(sinfo2->node_addr, val2);
+		hostlist_sort(sinfo2->node_addr);
+	} else
+		val2 = "";
+
+#if	PURE_ALPHA_SORT
+	diff = strcmp(val1, val2);
+#else
+	for (inx=0; ; inx++) {
+		if (val1[inx] == val2[inx]) {
+			if (val1[inx] == '\0')
+				break;
+			continue;
+		}
+		if ((isdigit((int)val1[inx])) &&
+		    (isdigit((int)val2[inx]))) {
+			int num1, num2;
+			num1 = atoi(val1+inx);
+			num2 = atoi(val2+inx);
+			diff = num1 - num2;
+		} else
+			diff = strcmp(val1, val2);
+		break;
+	}
+#endif
+	if (strlen(val1))
+		free(val1);
+	if (strlen(val2))
+		free(val2);
+
+	if (reverse_order)
+		diff = -diff;
+
+	return diff;
+}
+
+static int _sort_by_hostnames(void *void1, void *void2)
+{
+	int diff = 0;
+	sinfo_data_t *sinfo1 = (sinfo_data_t *) void1;
+	sinfo_data_t *sinfo2 = (sinfo_data_t *) void2;
+	char *val1, *val2;
+#if	PURE_ALPHA_SORT == 0
+	int inx;
+#endif
+
+	val1 = hostlist_shift(sinfo1->hostnames);
+	if (val1) {
+		hostlist_push_host(sinfo1->hostnames, val1);
+		hostlist_sort(sinfo1->hostnames);
+	} else
+		val1 = "";
+
+	val2 = hostlist_shift(sinfo2->hostnames);
+	if (val2) {
+		hostlist_push_host(sinfo2->hostnames, val2);
+		hostlist_sort(sinfo2->hostnames);
+	} else
+		val2 = "";
+
+#if	PURE_ALPHA_SORT
+	diff = strcmp(val1, val2);
+#else
+	for (inx=0; ; inx++) {
+		if (val1[inx] == val2[inx]) {
+			if (val1[inx] == '\0')
+				break;
+			continue;
+		}
+		if ((isdigit((int)val1[inx])) &&
+		    (isdigit((int)val2[inx]))) {
+			int num1, num2;
+			num1 = atoi(val1+inx);
+			num2 = atoi(val2+inx);
+			diff = num1 - num2;
+		} else
+			diff = strcmp(val1, val2);
+		break;
+	}
+#endif
+	if (strlen(val1))
+		free(val1);
+	if (strlen(val2))
+		free(val2);
+
+	if (reverse_order)
+		diff = -diff;
+
+	return diff;
+}
+
 static int _sort_by_job_size(void *void1, void *void2)
 {
 	int diff;
diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am
index 79a6375eb..20cacbe29 100644
--- a/src/slurmctld/Makefile.am
+++ b/src/slurmctld/Makefile.am
@@ -6,22 +6,18 @@ CLEANFILES = core.*
 
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
-sbin_PROGRAMS = slurmctld
-
-slurmctld_LDADD = 					\
-	$(top_builddir)/src/common/libdaemonize.la  \
-	$(top_builddir)/src/api/libslurm.o -ldl
-
-
-slurmctld_SOURCES = 	\
+# noinst_LTLIBRARIES = libslurmctld.la
+# libslurmctld_la_LDFLAGS  = $(LIB_LDFLAGS) -module --export-dynamic
+# libslurmctld_la_SOURCES =
+slurmctld_SOURCES =     \
 	acct_policy.c	\
 	acct_policy.h	\
 	agent.c  	\
 	agent.h		\
 	backup.c	\
-	basil_interface.c \
-	basil_interface.h \
 	controller.c 	\
+	front_end.c	\
+	front_end.h	\
 	gang.c		\
 	gang.h		\
 	groups.c	\
@@ -63,8 +59,13 @@ slurmctld_SOURCES = 	\
 	trigger_mgr.c	\
 	trigger_mgr.h
 
-slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
+sbin_PROGRAMS = slurmctld
+
+slurmctld_LDADD = 				    \
+	$(top_builddir)/src/common/libdaemonize.la  \
+	$(top_builddir)/src/api/libslurm.o -ldl
+slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
 force:
 $(slurmctld_LDADD) : force
diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in
index 75d9d02c7..e47fcd5e6 100644
--- a/src/slurmctld/Makefile.in
+++ b/src/slurmctld/Makefile.in
@@ -65,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -75,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -86,9 +88,9 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 PROGRAMS = $(sbin_PROGRAMS)
 am_slurmctld_OBJECTS = acct_policy.$(OBJEXT) agent.$(OBJEXT) \
-	backup.$(OBJEXT) basil_interface.$(OBJEXT) \
-	controller.$(OBJEXT) gang.$(OBJEXT) groups.$(OBJEXT) \
-	job_mgr.$(OBJEXT) job_scheduler.$(OBJEXT) job_submit.$(OBJEXT) \
+	backup.$(OBJEXT) controller.$(OBJEXT) front_end.$(OBJEXT) \
+	gang.$(OBJEXT) groups.$(OBJEXT) job_mgr.$(OBJEXT) \
+	job_scheduler.$(OBJEXT) job_submit.$(OBJEXT) \
 	licenses.$(OBJEXT) locks.$(OBJEXT) node_mgr.$(OBJEXT) \
 	node_scheduler.$(OBJEXT) partition_mgr.$(OBJEXT) \
 	ping_nodes.$(OBJEXT) port_mgr.$(OBJEXT) power_save.$(OBJEXT) \
@@ -130,7 +132,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -167,6 +172,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -224,6 +230,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -259,6 +266,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,19 +322,19 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
-slurmctld_LDADD = \
-	$(top_builddir)/src/common/libdaemonize.la  \
-	$(top_builddir)/src/api/libslurm.o -ldl
 
+# noinst_LTLIBRARIES = libslurmctld.la
+# libslurmctld_la_LDFLAGS  = $(LIB_LDFLAGS) -module --export-dynamic
+# libslurmctld_la_SOURCES =
 slurmctld_SOURCES = \
 	acct_policy.c	\
 	acct_policy.h	\
 	agent.c  	\
 	agent.h		\
 	backup.c	\
-	basil_interface.c \
-	basil_interface.h \
 	controller.c 	\
+	front_end.c	\
+	front_end.h	\
 	gang.c		\
 	gang.h		\
 	groups.c	\
@@ -368,6 +376,10 @@ slurmctld_SOURCES = \
 	trigger_mgr.c	\
 	trigger_mgr.h
 
+slurmctld_LDADD = \
+	$(top_builddir)/src/common/libdaemonize.la  \
+	$(top_builddir)/src/api/libslurm.o -ldl
+
 slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 all: all-am
 
@@ -459,8 +471,8 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/acct_policy.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backup.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basil_interface.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/controller.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/front_end.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gang.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/groups.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_mgr.Po@am__quote@
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 60beb3353..2bd018516 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/slurm_accounting_storage.h"
@@ -57,6 +57,24 @@ enum {
 	ACCT_POLICY_JOB_FINI
 };
 
+static slurmdb_used_limits_t *_get_used_limits_for_user(
+	List user_limit_list, uint32_t user_id)
+{
+	slurmdb_used_limits_t *used_limits = NULL;
+	ListIterator itr = NULL;
+
+	if (!user_limit_list)
+		return NULL;
+
+	itr = list_iterator_create(user_limit_list);
+	while ((used_limits = list_next(itr))) {
+		if (used_limits->uid == user_id)
+			break;
+	}
+	list_iterator_destroy(itr);
+
+	return used_limits;
+}
 static void _cancel_job(struct job_record *job_ptr)
 {
 	time_t now = time(NULL);
@@ -71,6 +89,21 @@ static void _cancel_job(struct job_record *job_ptr)
 	delete_job_details(job_ptr);
 }
 
+static uint64_t _get_unused_cpu_run_secs(struct job_record *job_ptr)
+{
+	uint64_t unused_cpu_run_secs = 0;
+	uint64_t time_limit_secs = (uint64_t)job_ptr->time_limit * 60;
+
+	/* No unused cpu_run_secs if job ran past its time limit */
+	if (job_ptr->end_time >= job_ptr->start_time + time_limit_secs) {
+		return 0;
+	}
+
+	unused_cpu_run_secs = job_ptr->total_cpus *
+		(job_ptr->start_time + time_limit_secs - job_ptr->end_time);
+	return unused_cpu_run_secs;
+}
+
 static bool _valid_job_assoc(struct job_record *job_ptr)
 {
 	slurmdb_association_rec_t assoc_rec, *assoc_ptr;
@@ -107,28 +140,32 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
 	slurmdb_association_rec_t *assoc_ptr = NULL;
 	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
 				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+	uint64_t unused_cpu_run_secs = 0;
+	uint64_t used_cpu_run_secs = 0;
 
 	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
 	    || !_valid_job_assoc(job_ptr))
 		return;
 
+	if (type == ACCT_POLICY_JOB_FINI)
+		unused_cpu_run_secs = _get_unused_cpu_run_secs(job_ptr);
+	else if (type == ACCT_POLICY_JOB_BEGIN)
+		used_cpu_run_secs = (uint64_t)job_ptr->total_cpus
+			* (uint64_t)job_ptr->time_limit * 60;
+
 	assoc_mgr_lock(&locks);
 	if (job_ptr->qos_ptr && (accounting_enforce & ACCOUNTING_ENFORCE_QOS)) {
-		ListIterator itr = NULL;
 		slurmdb_qos_rec_t *qos_ptr = NULL;
 		slurmdb_used_limits_t *used_limits = NULL;
 
 		qos_ptr = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-		if(!qos_ptr->usage->user_limit_list)
+		if (!qos_ptr->usage->user_limit_list)
 			qos_ptr->usage->user_limit_list =
 				list_create(slurmdb_destroy_used_limits);
-		itr = list_iterator_create(qos_ptr->usage->user_limit_list);
-		while((used_limits = list_next(itr))) {
-			if(used_limits->uid == job_ptr->user_id)
-				break;
-		}
-		list_iterator_destroy(itr);
-		if(!used_limits) {
+		used_limits = _get_used_limits_for_user(
+			qos_ptr->usage->user_limit_list,
+			job_ptr->user_id);
+		if (!used_limits) {
 			used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
 			used_limits->uid = job_ptr->user_id;
 			list_append(qos_ptr->usage->user_limit_list,
@@ -159,35 +196,73 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
 			qos_ptr->usage->grp_used_jobs++;
 			qos_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
 			qos_ptr->usage->grp_used_nodes += job_ptr->node_cnt;
+			qos_ptr->usage->grp_used_cpu_run_secs +=
+				used_cpu_run_secs;
 			used_limits->jobs++;
+			used_limits->cpus += job_ptr->total_cpus;
+			used_limits->nodes += job_ptr->node_cnt;
 			break;
 		case ACCT_POLICY_JOB_FINI:
-			if(qos_ptr->usage->grp_used_jobs)
-				qos_ptr->usage->grp_used_jobs--;
-			else
+
+			qos_ptr->usage->grp_used_jobs--;
+			if ((int32_t)qos_ptr->usage->grp_used_jobs < 0) {
+				qos_ptr->usage->grp_used_jobs = 0;
 				debug2("acct_policy_job_fini: used_jobs "
 				       "underflow for qos %s", qos_ptr->name);
+			}
 
 			qos_ptr->usage->grp_used_cpus -= job_ptr->total_cpus;
-			if((int32_t)qos_ptr->usage->grp_used_cpus < 0) {
+			if ((int32_t)qos_ptr->usage->grp_used_cpus < 0) {
 				qos_ptr->usage->grp_used_cpus = 0;
 				debug2("acct_policy_job_fini: grp_used_cpus "
 				       "underflow for qos %s", qos_ptr->name);
 			}
 
 			qos_ptr->usage->grp_used_nodes -= job_ptr->node_cnt;
-			if((int32_t)qos_ptr->usage->grp_used_nodes < 0) {
+			if ((int32_t)qos_ptr->usage->grp_used_nodes < 0) {
 				qos_ptr->usage->grp_used_nodes = 0;
 				debug2("acct_policy_job_fini: grp_used_nodes "
 				       "underflow for qos %s", qos_ptr->name);
 			}
 
-			if(used_limits->jobs)
-				used_limits->jobs--;
-			else
+			/* If the job finished early remove the extra
+			   time now. */
+			if (unused_cpu_run_secs >
+			    qos_ptr->usage->grp_used_cpu_run_secs) {
+				qos_ptr->usage->grp_used_cpu_run_secs = 0;
+				info("acct_policy_job_fini: "
+				       "grp_used_cpu_run_secs "
+				       "underflow for qos %s", qos_ptr->name);
+			} else
+				qos_ptr->usage->grp_used_cpu_run_secs -=
+					unused_cpu_run_secs;
+
+			used_limits->cpus -= job_ptr->total_cpus;
+			if ((int32_t)used_limits->cpus < 0) {
+				used_limits->cpus = 0;
+				debug2("acct_policy_job_fini: "
+				       "used_limits->cpus "
+				       "underflow for qos %s user %d",
+				       qos_ptr->name, used_limits->uid);
+			}
+
+			used_limits->jobs--;
+			if ((int32_t)used_limits->jobs < 0) {
+				used_limits->jobs = 0;
 				debug2("acct_policy_job_fini: used_jobs "
 				       "underflow for qos %s user %d",
 				       qos_ptr->name, used_limits->uid);
+			}
+
+			used_limits->nodes -= job_ptr->node_cnt;
+			if ((int32_t)used_limits->nodes < 0) {
+				used_limits->nodes = 0;
+				debug2("acct_policy_job_fini: "
+				       "used_limits->nodes"
+				       "underflow for qos %s user %d",
+				       qos_ptr->name, used_limits->uid);
+			}
+
 			break;
 		default:
 			error("acct_policy: qos unknown type %d", type);
@@ -214,6 +289,12 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
 			assoc_ptr->usage->used_jobs++;
 			assoc_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
 			assoc_ptr->usage->grp_used_nodes += job_ptr->node_cnt;
+			assoc_ptr->usage->grp_used_cpu_run_secs +=
+				used_cpu_run_secs;
+			debug4("acct_policy_job_begin: after adding job %i, "
+			       "assoc %s grp_used_cpu_run_secs is %"PRIu64"",
+			       job_ptr->job_id, assoc_ptr->acct,
+			       assoc_ptr->usage->grp_used_cpu_run_secs);
 			break;
 		case ACCT_POLICY_JOB_FINI:
 			if (assoc_ptr->usage->used_jobs)
@@ -238,6 +319,28 @@ static void _adjust_limit_usage(int type, struct job_record *job_ptr)
 				       "underflow for account %s",
 				       assoc_ptr->acct);
 			}
+
+			/* If the job finished early remove the extra
+			   time now. */
+			if (unused_cpu_run_secs >
+			    assoc_ptr->usage->grp_used_cpu_run_secs) {
+				assoc_ptr->usage->grp_used_cpu_run_secs = 0;
+				debug2("acct_policy_job_fini: "
+				       "grp_used_cpu_run_secs "
+				       "underflow for account %s",
+				       assoc_ptr->acct);
+			} else {
+				assoc_ptr->usage->grp_used_cpu_run_secs -=
+					unused_cpu_run_secs;
+				debug4("acct_policy_job_fini: job %u. "
+				       "Removed %"PRIu64" unused seconds "
+				       "from assoc %s "
+				       "grp_used_cpu_run_secs = %"PRIu64"",
+				       job_ptr->job_id, unused_cpu_run_secs,
+				       assoc_ptr->acct,
+				       assoc_ptr->usage->grp_used_cpu_run_secs);
+			}
+
 			break;
 		default:
 			error("acct_policy: association unknown type %d", type);
@@ -299,6 +402,8 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 	int parent = 0;
 	char *user_name = NULL;
 	bool rc = true;
+	uint32_t qos_max_cpus_limit = INFINITE;
+	uint32_t qos_max_nodes_limit = INFINITE;
 	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
 				   READ_LOCK, NO_LOCK, NO_LOCK };
 
@@ -315,13 +420,29 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 
 	assoc_mgr_lock(&locks);
 	if (qos_ptr) {
+		slurmdb_used_limits_t *used_limits = NULL;
+
 		/* for validation we don't need to look at
 		 * qos_ptr->grp_cpu_mins.
 		 */
+		qos_max_cpus_limit =
+			MIN(qos_ptr->grp_cpus, qos_ptr->max_cpus_pu);
 		if (((*limit_set_max_cpus) == ADMIN_SET_LIMIT)
-		    || (qos_ptr->grp_cpus == INFINITE)
+		    || (qos_max_cpus_limit == INFINITE)
 		    || (update_call && (job_desc->max_cpus == NO_VAL))) {
 			/* no need to check/set */
+		} else if ((job_desc->min_cpus != NO_VAL)
+			   && (job_desc->min_cpus > qos_ptr->max_cpus_pu)) {
+			info("job submit for user %s(%u): "
+			     "min cpu request %u exceeds "
+			     "per-user max cpu limit %u for qos '%s'",
+			     user_name,
+			     job_desc->user_id,
+			     job_desc->min_cpus,
+			     qos_ptr->max_cpus_pu,
+			     qos_ptr->name);
+			rc = false;
+			goto end_it;
 		} else if ((job_desc->min_cpus != NO_VAL)
 			   && (job_desc->min_cpus > qos_ptr->grp_cpus)) {
 			info("job submit for user %s(%u): "
@@ -336,30 +457,44 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 			goto end_it;
 		} else if ((job_desc->max_cpus == NO_VAL)
 			   || ((*limit_set_max_cpus)
-			       && (job_desc->max_cpus > qos_ptr->grp_cpus))) {
-			job_desc->max_cpus = qos_ptr->grp_cpus;
+			       && (job_desc->max_cpus > qos_max_cpus_limit))) {
+			job_desc->max_cpus = qos_max_cpus_limit;
 			(*limit_set_max_cpus) = 1;
-		} else if (job_desc->max_cpus > qos_ptr->grp_cpus) {
+		} else if (job_desc->max_cpus > qos_max_cpus_limit) {
 			info("job submit for user %s(%u): "
 			     "max cpu changed %u -> %u because "
 			     "of qos limit",
 			     user_name,
 			     job_desc->user_id,
 			     job_desc->max_cpus,
-			     qos_ptr->grp_cpus);
+			     qos_max_cpus_limit);
 			if (job_desc->max_cpus == NO_VAL)
 				(*limit_set_max_cpus) = 1;
-			job_desc->max_cpus = qos_ptr->grp_cpus;
+			job_desc->max_cpus = qos_max_cpus_limit;
 		}
 
 		/* for validation we don't need to look at
 		 * qos_ptr->grp_jobs.
 		 */
 
+		qos_max_nodes_limit =
+			MIN(qos_ptr->grp_nodes, qos_ptr->max_nodes_pu);
 		if (((*limit_set_max_nodes) == ADMIN_SET_LIMIT)
-		    || (qos_ptr->grp_nodes == INFINITE)
+		    || (qos_max_nodes_limit == INFINITE)
 		    || (update_call && (job_desc->max_nodes == NO_VAL))) {
 			/* no need to check/set */
+		} else if ((job_desc->min_nodes != NO_VAL)
+			   && (job_desc->min_nodes > qos_ptr->max_nodes_pu)) {
+			info("job submit for user %s(%u): "
+			     "min node request %u exceeds "
+			     "per-user max node limit %u for qos '%s'",
+			     user_name,
+			     job_desc->user_id,
+			     job_desc->min_nodes,
+			     qos_ptr->max_nodes_pu,
+			     qos_ptr->name);
+			rc = false;
+			goto end_it;
 		} else if ((job_desc->min_nodes != NO_VAL)
 			   && (job_desc->min_nodes > qos_ptr->grp_nodes)) {
 			info("job submit for user %s(%u): "
@@ -374,20 +509,21 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 			goto end_it;
 		} else if ((job_desc->max_nodes == 0)
 			   || ((*limit_set_max_nodes)
-			       && (job_desc->max_nodes > qos_ptr->grp_nodes))) {
-			job_desc->max_nodes = qos_ptr->grp_nodes;
+			       && (job_desc->max_nodes
+				   > qos_max_nodes_limit))) {
+			job_desc->max_nodes = qos_max_nodes_limit;
 			(*limit_set_max_nodes) = 1;
-		} else if (job_desc->max_nodes > qos_ptr->grp_nodes) {
+		} else if (job_desc->max_nodes > qos_max_nodes_limit) {
 			info("job submit for user %s(%u): "
 			     "max node changed %u -> %u because "
 			     "of qos limit",
 			     user_name,
 			     job_desc->user_id,
 			     job_desc->max_nodes,
-			     qos_ptr->grp_nodes);
+			     qos_max_nodes_limit);
 			if (job_desc->max_nodes == NO_VAL)
 				(*limit_set_max_nodes) = 1;
-			job_desc->max_nodes = qos_ptr->grp_nodes;
+			job_desc->max_nodes = qos_max_nodes_limit;
 		}
 
 		if ((qos_ptr->grp_submit_jobs != INFINITE) &&
@@ -488,17 +624,10 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 		}
 
 		if (qos_ptr->max_submit_jobs_pu != INFINITE) {
-			slurmdb_used_limits_t *used_limits = NULL;
-			if (qos_ptr->usage->user_limit_list) {
-				ListIterator itr = list_iterator_create(
-					qos_ptr->usage->user_limit_list);
-				while((used_limits = list_next(itr))) {
-					if (used_limits->uid
-					    == job_desc->user_id)
-						break;
-				}
-				list_iterator_destroy(itr);
-			}
+			if (!used_limits)
+				used_limits = _get_used_limits_for_user(
+					qos_ptr->usage->user_limit_list,
+					job_desc->user_id);
 			if (used_limits && (used_limits->submit_jobs
 					    >= qos_ptr->max_submit_jobs_pu)) {
 				info("job submit for user %s(%u): "
@@ -796,6 +925,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 	uint32_t time_limit;
 	uint64_t cpu_time_limit;
 	uint64_t job_cpu_time_limit;
+	uint64_t cpu_run_mins;
 	bool rc = true;
 	uint64_t usage_mins;
 	uint32_t wall_mins;
@@ -820,11 +950,13 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		return true;
 
 	/* clear old state reason */
-        if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
+	if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
 	    (job_ptr->state_reason == WAIT_ASSOC_RESOURCE_LIMIT) ||
-	    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT))
-                job_ptr->state_reason = WAIT_NO_REASON;
-
+	    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_JOB_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_RESOURCE_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_TIME_LIMIT))
+		job_ptr->state_reason = WAIT_NO_REASON;
 
 	job_cpu_time_limit = (uint64_t)job_ptr->time_limit
 		* (uint64_t)job_ptr->details->min_cpus;
@@ -832,12 +964,18 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 	assoc_mgr_lock(&locks);
 	qos_ptr = job_ptr->qos_ptr;
 	if(qos_ptr) {
+		slurmdb_used_limits_t *used_limits = NULL;
 		usage_mins = (uint64_t)(qos_ptr->usage->usage_raw / 60.0);
 		wall_mins = qos_ptr->usage->grp_used_wall / 60;
+		cpu_run_mins = qos_ptr->usage->grp_used_cpu_run_secs / 60;
 
+		/*
+		 * If the QOS has a GrpCPU limit set and the current usage
+		 * of the QOS exceeds that limit then hold the job
+		 */
 		if ((qos_ptr->grp_cpu_mins != (uint64_t)INFINITE)
 		    && (usage_mins >= qos_ptr->grp_cpu_mins)) {
-			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			job_ptr->state_reason = WAIT_QOS_JOB_LIMIT;
 			xfree(job_ptr->state_desc);
 			debug2("Job %u being held, "
 			       "the job is at or exceeds QOS %s's "
@@ -851,6 +989,11 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			goto end_it;
 		}
 
+		/* If the JOB's cpu limit wasn't administratively set and the
+		 * QOS has a GrpCPU limit, cancel the job if its minimum
+		 * cpu requirement has exceeded the limit for all CPUs
+		 * usable by the QOS
+		 */
 		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT)
 		    && qos_ptr->grp_cpus != INFINITE) {
 			if (job_ptr->details->min_cpus > qos_ptr->grp_cpus) {
@@ -870,7 +1013,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			if ((qos_ptr->usage->grp_used_cpus +
 			     job_ptr->details->min_cpus) > qos_ptr->grp_cpus) {
 				job_ptr->state_reason =
-					WAIT_ASSOC_RESOURCE_LIMIT;
+					WAIT_QOS_RESOURCE_LIMIT;
 				xfree(job_ptr->state_desc);
 				debug2("job %u being held, "
 				       "the job is at or exceeds "
@@ -889,7 +1032,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 
 		if ((qos_ptr->grp_jobs != INFINITE) &&
 		    (qos_ptr->usage->grp_used_jobs >= qos_ptr->grp_jobs)) {
-			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			job_ptr->state_reason = WAIT_QOS_JOB_LIMIT;
 			xfree(job_ptr->state_desc);
 			debug2("job %u being held, "
 			       "the job is at or exceeds "
@@ -902,6 +1045,28 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			goto end_it;
 		}
 
+		if (qos_ptr->grp_cpu_run_mins != INFINITE) {
+			if (cpu_run_mins + job_cpu_time_limit >
+			    qos_ptr->grp_cpu_run_mins) {
+				job_ptr->state_reason =
+					WAIT_ASSOC_RESOURCE_LIMIT;
+				xfree(job_ptr->state_desc);
+				debug2("job %u being held, "
+				       "qos %s is at or exceeds "
+				       "group max running cpu minutes "
+				       "limit %"PRIu64" with already "
+				       "used %"PRIu64" + requested %"PRIu64" "
+				       "for qos '%s'",
+				       job_ptr->job_id, qos_ptr->name,
+				       qos_ptr->grp_cpu_run_mins,
+				       cpu_run_mins,
+				       job_cpu_time_limit,
+				       qos_ptr->name);
+				rc = false;
+				goto end_it;
+			}
+		}
+
 		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
 		    && qos_ptr->grp_nodes != INFINITE) {
 			if (job_ptr->details->min_nodes > qos_ptr->grp_nodes) {
@@ -922,7 +1087,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			     job_ptr->details->min_nodes) >
 			    qos_ptr->grp_nodes) {
 				job_ptr->state_reason =
-					WAIT_ASSOC_RESOURCE_LIMIT;
+					WAIT_QOS_RESOURCE_LIMIT;
 				xfree(job_ptr->state_desc);
 				debug2("job %u being held, "
 				       "the job is at or exceeds "
@@ -943,7 +1108,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 
 		if ((qos_ptr->grp_wall != INFINITE)
 		    && (wall_mins >= qos_ptr->grp_wall)) {
-			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			job_ptr->state_reason = WAIT_QOS_JOB_LIMIT;
 			xfree(job_ptr->state_desc);
 			debug2("job %u being held, "
 			       "the job is at or exceeds "
@@ -962,7 +1127,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			    (job_cpu_time_limit > cpu_time_limit)) {
 				info("job %u being cancelled, "
 				     "cpu time limit %"PRIu64" exceeds "
-				     "qos max per job %"PRIu64"",
+				     "qos max per-job %"PRIu64"",
 				     job_ptr->job_id,
 				     job_cpu_time_limit,
 				     cpu_time_limit);
@@ -978,7 +1143,7 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			    qos_ptr->max_cpus_pj) {
 				info("job %u being cancelled, "
 				     "min cpu limit %u exceeds "
-				     "qos max %u",
+				     "qos per-job max %u",
 				     job_ptr->job_id,
 				     job_ptr->details->min_cpus,
 				     qos_ptr->max_cpus_pj);
@@ -988,22 +1153,61 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			}
 		}
 
-		if (qos_ptr->max_jobs_pu != INFINITE) {
-			slurmdb_used_limits_t *used_limits = NULL;
-			if(qos_ptr->usage->user_limit_list) {
-				ListIterator itr = list_iterator_create(
-					qos_ptr->usage->user_limit_list);
-				while((used_limits = list_next(itr))) {
-					if(used_limits->uid == job_ptr->user_id)
-						break;
-				}
-				list_iterator_destroy(itr);
+		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT) &&
+		    (qos_ptr->max_cpus_pu != INFINITE)) {
+			/* Cancel the job if it exceeds the per-user
+			 * CPU limit for the given QOS
+			 */
+			if(job_ptr->details->min_cpus >
+			   qos_ptr->max_cpus_pu) {
+				info("job %u being cancelled, "
+				     "min cpu limit %u exceeds "
+				     "qos per-user max %u",
+				     job_ptr->job_id,
+				     job_ptr->details->min_cpus,
+				     qos_ptr->max_cpus_pu);
+				cancel_job = 1;
+				rc = false;
+				goto end_it;
 			}
-			if(used_limits && (used_limits->jobs
-					   >= qos_ptr->max_jobs_pu)) {
+			/* Hold the job if the user has exceeded
+			 * the QOS per-user CPU limit with their
+			 * current usage */
+			if (!used_limits)
+				used_limits = _get_used_limits_for_user(
+					qos_ptr->usage->user_limit_list,
+					job_ptr->user_id);
+			if (used_limits && (used_limits->cpus
+					    >= qos_ptr->max_cpus_pu)) {
+				job_ptr->state_reason =
+					WAIT_QOS_RESOURCE_LIMIT;
 				debug2("job %u being held, "
 				       "the job is at or exceeds "
-				       "max jobs limit %u with %u for QOS %s",
+				       "max cpus per-user limit "
+				       "%u with %u for QOS %s",
+				       job_ptr->job_id,
+				       qos_ptr->max_cpus_pu,
+				       used_limits->cpus, qos_ptr->name);
+				rc = false;
+				goto end_it;
+			}
+		}
+
+
+		if (qos_ptr->max_jobs_pu != INFINITE) {
+			if (!used_limits)
+				used_limits = _get_used_limits_for_user(
+					qos_ptr->usage->user_limit_list,
+					job_ptr->user_id);
+
+			if (used_limits && (used_limits->jobs
+					    >= qos_ptr->max_jobs_pu)) {
+				job_ptr->state_reason =
+					WAIT_QOS_RESOURCE_LIMIT;
+				debug2("job %u being held, "
+				       "the job is at or exceeds "
+				       "max jobs per-user limit "
+				       "%u with %u for QOS %s",
 				       job_ptr->job_id,
 				       qos_ptr->max_jobs_pu,
 				       used_limits->jobs, qos_ptr->name);
@@ -1028,6 +1232,49 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			}
 		}
 
+		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT) &&
+		    (qos_ptr->max_nodes_pu != INFINITE)) {
+			/* Cancel the job if it exceeds the per-user
+			 * node limit for the given QOS
+			 */
+			if (job_ptr->details->min_nodes >
+			    qos_ptr->max_nodes_pu) {
+				info("job %u being cancelled, "
+				     "min node per-puser limit %u exceeds "
+				     "qos max %u",
+				     job_ptr->job_id,
+				     job_ptr->details->min_nodes,
+				     qos_ptr->max_nodes_pu);
+				cancel_job = 1;
+				rc = false;
+				goto end_it;
+			}
+
+			/*
+			* Hold the job if the user has exceeded
+			* the QOS per-user CPU limit with their
+			* current usage
+			*/
+			if (!used_limits)
+				used_limits = _get_used_limits_for_user(
+					qos_ptr->usage->user_limit_list,
+					job_ptr->user_id);
+			if (used_limits && (used_limits->nodes
+					    >= qos_ptr->max_nodes_pu)) {
+				job_ptr->state_reason =
+					WAIT_QOS_RESOURCE_LIMIT;
+				debug2("job %u being held, "
+				       "the job is at or exceeds "
+				       "max nodes per-user "
+				       "limit %u with %u for QOS %s",
+				       job_ptr->job_id,
+				       qos_ptr->max_nodes_pu,
+				       used_limits->nodes, qos_ptr->name);
+				rc = false;
+				goto end_it;
+			}
+		}
+
 		/* we don't need to check submit_jobs_pu here */
 
 		/* if the qos limits have changed since job
@@ -1054,6 +1301,8 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 	while(assoc_ptr) {
 		usage_mins = (uint64_t)(assoc_ptr->usage->usage_raw / 60.0);
 		wall_mins = assoc_ptr->usage->grp_used_wall / 60;
+		cpu_run_mins = assoc_ptr->usage->grp_used_cpu_run_secs / 60;
+
 #if _DEBUG
 		info("acct_job_limits: %u of %u",
 		     assoc_ptr->usage->used_jobs, assoc_ptr->max_jobs);
@@ -1132,6 +1381,30 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			goto end_it;
 		}
 
+		if ((!qos_ptr ||
+		     (qos_ptr && qos_ptr->grp_cpu_run_mins == INFINITE))
+		    && (assoc_ptr->grp_cpu_run_mins != INFINITE)) {
+			if (cpu_run_mins + job_cpu_time_limit >
+			    assoc_ptr->grp_cpu_run_mins) {
+				job_ptr->state_reason =
+					WAIT_ASSOC_RESOURCE_LIMIT;
+				xfree(job_ptr->state_desc);
+				debug2("job %u being held, "
+				       "assoc %u is at or exceeds "
+				       "group max running cpu minutes "
+				       "limit %"PRIu64" with already "
+				       "used %"PRIu64" + requested %"PRIu64" "
+				       "for account %s",
+				       job_ptr->job_id, assoc_ptr->id,
+				       assoc_ptr->grp_cpu_run_mins,
+				       cpu_run_mins,
+				       job_cpu_time_limit,
+				       assoc_ptr->acct);
+				rc = false;
+				goto end_it;
+			}
+		}
+
 		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
 		    && (!qos_ptr ||
 			(qos_ptr && qos_ptr->grp_nodes == INFINITE))
@@ -1454,10 +1727,13 @@ extern bool acct_policy_node_usable(struct job_record *job_ptr,
 		return true;
 
 	/* clear old state reason */
-        if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
+	if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
 	    (job_ptr->state_reason == WAIT_ASSOC_RESOURCE_LIMIT) ||
-	    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT))
-                job_ptr->state_reason = WAIT_NO_REASON;
+	    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_JOB_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_RESOURCE_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_QOS_TIME_LIMIT))
+		job_ptr->state_reason = WAIT_NO_REASON;
 
 
 	assoc_mgr_lock(&locks);
diff --git a/src/slurmctld/acct_policy.h b/src/slurmctld/acct_policy.h
index 3a1d9993c..e339d0f74 100644
--- a/src/slurmctld/acct_policy.h
+++ b/src/slurmctld/acct_policy.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index 2c4d94f24..dbfb6b804 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -207,7 +207,7 @@ static bool wiki2_sched_test = false;
  *	across a set of nodes. Use agent_queue_request() if immediate
  *	execution is not essential.
  * IN pointer to agent_arg_t, which is xfree'd (including hostlist,
- *	and msg_args) upon completion if AGENT_IS_THREAD is set
+ *	and msg_args) upon completion
  * RET always NULL (function format just for use as pthread)
  */
 void *agent(void *args)
@@ -330,9 +330,7 @@ void *agent(void *args)
 	slurm_mutex_unlock(&agent_info_ptr->thread_mutex);
 
       cleanup:
-#if AGENT_IS_THREAD
 	_purge_agent_args(agent_arg_ptr);
-#endif
 
 	if (agent_info_ptr) {
 		xfree(agent_info_ptr->thread_struct);
@@ -377,7 +375,7 @@ static int _valid_agent_arg(agent_arg_t *agent_arg_ptr)
 
 static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr)
 {
-	int i = 0, j=0;
+	int i = 0, j = 0;
 	agent_info_t *agent_info_ptr = NULL;
 	thd_t *thread_ptr = NULL;
 	int *span = NULL;
@@ -404,14 +402,20 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr)
 	    (agent_arg_ptr->msg_type != SRUN_EXEC)		&&
 	    (agent_arg_ptr->msg_type != SRUN_TIMEOUT)		&&
 	    (agent_arg_ptr->msg_type != SRUN_NODE_FAIL)		&&
+	    (agent_arg_ptr->msg_type != SRUN_REQUEST_SUSPEND)	&&
 	    (agent_arg_ptr->msg_type != SRUN_USER_MSG)		&&
 	    (agent_arg_ptr->msg_type != SRUN_STEP_MISSING)	&&
 	    (agent_arg_ptr->msg_type != SRUN_JOB_COMPLETE)) {
+#ifdef HAVE_FRONT_END
+		span = set_span(agent_arg_ptr->node_count,
+				agent_arg_ptr->node_count);
+#else
 		/* Sending message to a possibly large number of slurmd.
 		 * Push all message forwarding to slurmd in order to
 		 * offload as much work from slurmctld as possible. */
-		agent_info_ptr->get_reply = true;
 		span = set_span(agent_arg_ptr->node_count, 1);
+#endif
+		agent_info_ptr->get_reply = true;
 	} else {
 		/* Message is going to one node (for srun) or we want
 		 * it to get processed ASAP (SHUTDOWN or RECONFIGURE).
@@ -436,9 +440,9 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr)
 		}
 		free(name);
 		i++;
-		for(j = 0; j < span[thr_count]; j++) {
+		for (j = 0; j < span[thr_count]; j++) {
 			name = hostlist_shift(agent_arg_ptr->hostlist);
-			if(!name)
+			if (!name)
 				break;
 			hostlist_push(hl, name);
 			free(name);
@@ -449,7 +453,8 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr)
 			hostlist_ranged_string_xmalloc(hl);
 		hostlist_destroy(hl);
 #if 0
-		info("sending to nodes %s", thread_ptr[thr_count].nodelist);
+		info("sending msg_type %u to nodes %s",
+		     agent_arg_ptr->msg_type, thread_ptr[thr_count].nodelist);
 #endif
 		thr_count++;
 	}
@@ -593,7 +598,6 @@ static void *_wdog(void *args)
 
 static void _notify_slurmctld_jobs(agent_info_t *agent_ptr)
 {
-#if AGENT_IS_THREAD
 	/* Locks: Write job */
 	slurmctld_lock_t job_write_lock =
 	    { NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
@@ -631,9 +635,6 @@ static void _notify_slurmctld_jobs(agent_info_t *agent_ptr)
 	}
 
 	unlock_slurmctld(job_write_lock);
-#else
-	fatal("Code development needed here if agent is not thread");
-#endif
 }
 
 static void _notify_slurmctld_nodes(agent_info_t *agent_ptr,
@@ -643,18 +644,14 @@ static void _notify_slurmctld_nodes(agent_info_t *agent_ptr,
 	ret_data_info_t *ret_data_info = NULL;
 	state_t state;
 	int is_ret_list = 1;
-
-#if AGENT_IS_THREAD
 	/* Locks: Read config, write job, write node */
 	slurmctld_lock_t node_write_lock =
 	    { READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-#endif
 	thd_t *thread_ptr = agent_ptr->thread_struct;
 	int i;
 
 	/* Notify slurmctld of non-responding nodes */
 	if (no_resp_cnt) {
-#if AGENT_IS_THREAD
 		/* Update node table data for non-responding nodes */
 		lock_slurmctld(node_write_lock);
 		if (agent_ptr->msg_type == REQUEST_BATCH_JOB_LAUNCH) {
@@ -665,18 +662,15 @@ static void _notify_slurmctld_nodes(agent_info_t *agent_ptr,
 			job_complete(job_id, 0, true, false, 0);
 		}
 		unlock_slurmctld(node_write_lock);
-#else
-		fatal("Code development needed here if agent is not thread");
-#endif
 	}
 	if (retry_cnt && agent_ptr->retry)
 		_queue_agent_retry(agent_ptr, retry_cnt);
 
 	/* Update last_response on responding nodes */
-#if AGENT_IS_THREAD
 	lock_slurmctld(node_write_lock);
 	for (i = 0; i < agent_ptr->thread_count; i++) {
-		if(!thread_ptr[i].ret_list) {
+		char *down_msg, *node_names;
+		if (!thread_ptr[i].ret_list) {
 			state = thread_ptr[i].state;
 			is_ret_list = 0;
 			goto switch_on_state;
@@ -684,52 +678,52 @@ static void _notify_slurmctld_nodes(agent_info_t *agent_ptr,
 		is_ret_list = 1;
 
 		itr = list_iterator_create(thread_ptr[i].ret_list);
-		while((ret_data_info = list_next(itr))) {
+		while ((ret_data_info = list_next(itr))) {
 			state = ret_data_info->err;
 		switch_on_state:
 			switch(state) {
 			case DSH_NO_RESP:
-				if(!is_ret_list) {
+				if (!is_ret_list) {
 					node_not_resp(thread_ptr[i].nodelist,
 						      thread_ptr[i].
 						      start_time);
-					break;
+				} else {
+					node_not_resp(ret_data_info->node_name,
+						      thread_ptr[i].start_time);
 				}
-
-				node_not_resp(ret_data_info->node_name,
-					      thread_ptr[i].start_time);
 				break;
 			case DSH_FAILED:
-#ifdef HAVE_BG
-				error("Prolog/epilog failure");
+				if (is_ret_list)
+					node_names = ret_data_info->node_name;
+				else
+					node_names = thread_ptr[i].nodelist;
+#ifdef HAVE_FRONT_END
+				down_msg = "";
 #else
-				if(!is_ret_list) {
-					set_node_down(thread_ptr[i].nodelist,
-						      "Prolog/epilog failure");
-					break;
-				}
-				set_node_down(ret_data_info->node_name,
-					      "Prolog/epilog failure");
+				set_node_down(node_names,
+					      "Prolog/Epilog failure");
+				down_msg = ", set to state DOWN";
 #endif
+				error("Prolog/Epilog failure on nodes %s%s",
+				      node_names, down_msg);
 				break;
 			case DSH_DONE:
-				if(!is_ret_list) {
+				if (!is_ret_list)
 					node_did_resp(thread_ptr[i].nodelist);
-					break;
-				}
-				node_did_resp(ret_data_info->node_name);
+				else
+					node_did_resp(ret_data_info->node_name);
 				break;
 			default:
-				if(!is_ret_list) {
+				if (!is_ret_list) {
 					error("unknown state returned for %s",
 					      thread_ptr[i].nodelist);
-					break;
+				} else {
+					error("unknown state returned for %s",
+					      ret_data_info->node_name);
 				}
-				error("unknown state returned for %s",
-				      ret_data_info->node_name);
 				break;
 			}
-			if(!is_ret_list)
+			if (!is_ret_list)
 				goto finished;
 		}
 		list_iterator_destroy(itr);
@@ -748,9 +742,6 @@ finished:	;
 	    (agent_ptr->msg_type == REQUEST_HEALTH_CHECK) ||
 	    (agent_ptr->msg_type == REQUEST_NODE_REGISTRATION_STATUS))
 		ping_end();
-#else
-	fatal("Code development needed here if agent is not thread");
-#endif
 }
 
 /* Report a communications error for specified node
@@ -758,9 +749,8 @@ finished:	;
 static inline int _comm_err(char *node_name, slurm_msg_type_t msg_type)
 {
 	int rc = 1;
-#if AGENT_IS_THREAD
+
 	if ((rc = is_node_resp (node_name)))
-#endif
 		verbose("agent/is_node_resp: node:%s rpc:%d : %m",
 			node_name, msg_type);
 	return rc;
@@ -812,14 +802,11 @@ static void *_thread_per_group_rpc(void *args)
 	List ret_list = NULL;
 	ListIterator itr;
 	ret_data_info_t *ret_data_info = NULL;
-	int found = 0;
 	int sig_array[2] = {SIGUSR1, 0};
-
-#if AGENT_IS_THREAD
 	/* Locks: Write job, write node */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-#endif
+
 	xassert(args != NULL);
 	xsignal(SIGUSR1, _sig_handler);
 	xsignal_unblock(sig_array);
@@ -895,12 +882,10 @@ static void *_thread_per_group_rpc(void *args)
 	}
 
 	//info("got %d messages back", list_count(ret_list));
-	found = 0;
 	itr = list_iterator_create(ret_list);
 	while ((ret_data_info = list_next(itr)) != NULL) {
 		rc = slurm_get_return_code(ret_data_info->type,
 					   ret_data_info->data);
-#if AGENT_IS_THREAD
 		/* SPECIAL CASE: Mark node as IDLE if job already
 		   complete */
 		if (is_kill_msg &&
@@ -934,8 +919,6 @@ static void *_thread_per_group_rpc(void *args)
 			unlock_slurmctld(job_write_lock);
 			continue;
 		}
-#endif
-
 
 		if (((msg_type == REQUEST_SIGNAL_TASKS) ||
 		     (msg_type == REQUEST_TERMINATE_TASKS)) &&
@@ -946,8 +929,8 @@ static void *_thread_per_group_rpc(void *args)
 
 		switch (rc) {
 		case SLURM_SUCCESS:
-			/*debug3("agent processed RPC to node %s",
-			  ret_data_info->node_name); */
+			/* debug("agent processed RPC to node %s", */
+			/*       ret_data_info->node_name); */
 			thread_state = DSH_DONE;
 			break;
 		case SLURM_UNKNOWN_FORWARD_ADDR:
@@ -965,9 +948,6 @@ static void *_thread_per_group_rpc(void *args)
 			thread_state = DSH_FAILED;
 			break;
 		case ESLURMD_PROLOG_FAILED:
-			error("Prolog failure on host %s, "
-			      "setting DOWN",
-			      ret_data_info->node_name);
 			thread_state = DSH_FAILED;
 			break;
 		case ESLURM_INVALID_JOB_ID:
@@ -1537,10 +1517,8 @@ extern void mail_job_info (struct job_record *job_ptr, uint16_t mail_type)
  */
 static int _batch_launch_defer(queued_request_t *queued_req_ptr)
 {
-	char *hostname;
 	agent_arg_t *agent_arg_ptr;
 	batch_job_launch_msg_t *launch_msg_ptr;
-	struct node_record *node_ptr;
 	time_t now = time(NULL);
 	struct job_record  *job_ptr;
 	int delay_time, nodes_ready = 0;
@@ -1567,6 +1545,12 @@ static int _batch_launch_defer(queued_request_t *queued_req_ptr)
 	if (job_ptr->wait_all_nodes) {
 		(void) job_node_ready(launch_msg_ptr->job_id, &nodes_ready);
 	} else {
+#ifdef HAVE_FRONT_END
+		nodes_ready = 1;
+#else
+		struct node_record *node_ptr;
+		char *hostname;
+
 		hostname = hostlist_deranged_string_xmalloc(
 					agent_arg_ptr->hostlist);
 		node_ptr = find_node_record(hostname);
@@ -1582,6 +1566,7 @@ static int _batch_launch_defer(queued_request_t *queued_req_ptr)
 		    !IS_NODE_NO_RESPOND(node_ptr)) {
 			nodes_ready = 1;
 		}
+#endif
 	}
 
 	delay_time = difftime(now, job_ptr->start_time);
diff --git a/src/slurmctld/agent.h b/src/slurmctld/agent.h
index a186791f6..d663196e3 100644
--- a/src/slurmctld/agent.h
+++ b/src/slurmctld/agent.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,8 +44,6 @@
 
 #include "src/slurmctld/slurmctld.h"
 
-#define AGENT_IS_THREAD  	 1	/* set if agent itself a thread of
-					 * slurmctld, 0 for function call */
 #define AGENT_THREAD_COUNT	10	/* maximum active threads per agent */
 #define COMMAND_TIMEOUT 	30	/* command requeue or error, seconds */
 #define MAX_AGENT_CNT		(MAX_SERVER_THREADS / (AGENT_THREAD_COUNT + 2))
diff --git a/src/slurmctld/backup.c b/src/slurmctld/backup.c
index 0bef3fc94..aa1a86cfa 100644
--- a/src/slurmctld/backup.c
+++ b/src/slurmctld/backup.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -53,7 +53,7 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/daemonize.h"
 #include "src/common/log.h"
@@ -179,15 +179,15 @@ void run_backup(void)
 		}
 	}
 
-	/* Since pidfile is created as user root (its owner is
-	 *   changed to SlurmUser) SlurmUser may not be able to
-	 *   remove it, so this is not necessarily an error.
-	 * No longer need slurmctld_conf lock after above join. */
-	if (unlink(slurmctld_conf.slurmctld_pidfile) < 0)
-		verbose("Unable to remove pidfile '%s': %m",
-			slurmctld_conf.slurmctld_pidfile);
-
 	if (slurmctld_config.shutdown_time != 0) {
+		/* Since pidfile is created as user root (its owner is
+		 *   changed to SlurmUser) SlurmUser may not be able to
+		 *   remove it, so this is not necessarily an error.
+		 * No longer need slurmctld_conf lock after above join. */
+		if (unlink(slurmctld_conf.slurmctld_pidfile) < 0)
+			verbose("Unable to remove pidfile '%s': %m",
+				slurmctld_conf.slurmctld_pidfile);
+
 		info("BackupController terminating");
 		pthread_join(slurmctld_config.thread_id_sig, NULL);
 		log_fini();
@@ -533,9 +533,10 @@ static void _trigger_slurmctld_event(uint32_t trig_type)
 	ti.res_type = TRIGGER_RES_TYPE_SLURMCTLD;
 	ti.trig_type = trig_type;
 	if (slurm_pull_trigger(&ti)) {
-		error("error from _trigger_slurmctld_event in backup.c");
+		error("_trigger_slurmctld_event %u failure in backup.c: %m",
+		      trig_type);
 		return;
 	}
-	verbose("trigger pulled for SLURMCTLD event successful");
+	verbose("trigger pulled for SLURMCTLD event %u successful", trig_type);
 	return;
 }
diff --git a/src/slurmctld/basil_interface.c b/src/slurmctld/basil_interface.c
deleted file mode 100644
index ec558fae6..000000000
--- a/src/slurmctld/basil_interface.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*****************************************************************************\
- *  basil_interface.c - slurmctld interface to BASIL, Cray's Batch Application
- *	Scheduler Interface Layer (BASIL). In order to support development,
- *	these functions will provide basic BASIL-like functionality even
- *	without a BASIL command being present.
- *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-/* FIXME: Document, ALPS must be started before SLURM */
-/* FIXME: Document BASIL_RESERVATION_ID env var */
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#endif	/* HAVE_CONFIG_H */
-
-#include <slurm/slurm_errno.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "src/common/log.h"
-#include "src/common/node_select.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/slurmctld/basil_interface.h"
-#include "src/slurmctld/slurmctld.h"
-
-#define BASIL_DEBUG 1
-
-#ifdef HAVE_CRAY
-
-/* Make sure that each SLURM node has a BASIL node ID */
-static void _validate_basil_node_id(void)
-{
-	int i;
-	struct node_record *node_ptr = node_record_table_ptr;
-
-	for (i = 0;  i < node_record_count; i++, node_ptr++) {
-		if (node_ptr->basil_node_id != NO_VAL)
-			continue;
-		if (IS_NODE_DOWN(node_ptr))
-			continue;
-
-		error("Node %s has no basil node_id", node_ptr->name);
-		last_node_update = time(NULL);
-		set_node_down(node_ptr->name, "No BASIL node_id");
-	}
-}
-#endif	/* HAVE_CRAY */
-
-/*
- * basil_query - Query BASIL for node and reservation state.
- * Execute once at slurmctld startup and periodically thereafter.
- * RET 0 or error code
- */
-extern int basil_query(void)
-{
-	int error_code = SLURM_SUCCESS;
-#ifdef HAVE_CRAY
-	struct node_record *node_ptr;
-	int i;
-	static bool first_run = true;
-
-	/*
-	 * Issue the BASIL INVENTORY QUERY
-	 * FIXME: Still to be done,
-	 *        return SLURM_ERROR on failure
-	 */
-	debug("basil query initiated");
-
-	if (first_run) {
-		/* Set basil_node_id to NO_VAL since the default value
-		 * of zero is a valid BASIL node ID */
-		node_ptr = node_record_table_ptr;
-		for (i = 0; i < node_record_count; i++, node_ptr++)
-			node_ptr->basil_node_id = NO_VAL;
-		first_run = false;
-	}
-
-	/* Validate configuration for each node that BASIL reports: TBD */
-	_validate_basil_node_id();
-
-	/*
-	 * Confirm that each BASIL reservation is still valid,
-	 * iterate through each current ALPS reservation,
-	 * purge vestigial reservations.
-	 * FIXME: still to be done
-	 */
-#endif	/* HAVE_CRAY */
-
-	return error_code;
-}
-
-/*
- * basil_reserve - create a BASIL reservation.
- * IN job_ptr - pointer to job which has just been allocated resources
- * RET 0 or error code, job will abort or be requeued on failure
- */
-extern int basil_reserve(struct job_record *job_ptr)
-{
-	int error_code = SLURM_SUCCESS;
-#ifdef HAVE_CRAY
-	uint32_t reservation_id;
-
-	/*
-	 * Issue the BASIL RESERVE request
-	 * FIXME: still to be done, return SLURM_ERROR on error.
-	 */
-	select_g_select_jobinfo_set(job_ptr->select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &reservation_id);
-	debug("basil reservation made job_id=%u resv_id=%u",
-	      job_ptr->job_id, reservation_id);
-#endif	/* HAVE_CRAY */
-	return error_code;
-}
-
-/*
- * basil_release - release a BASIL reservation by job.
- * IN job_ptr - pointer to job which has just been deallocated resources
- * RET 0 or error code
- */
-extern int basil_release(struct job_record *job_ptr)
-{
-	int error_code = SLURM_SUCCESS;
-#ifdef HAVE_CRAY
-	uint32_t reservation_id = 0;
-	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &reservation_id);
-	if (reservation_id)
-		error_code = basil_release_id(reservation_id);
-
-#endif	/* HAVE_CRAY */
-	return error_code;
-}
-
-/*
- * basil_release_id - release a BASIL reservation by ID.
- * IN reservation_id - ID of reservation to release
- * RET 0 or error code
- */
-extern int basil_release_id(uint32_t reservation_id)
-{
-	int error_code = SLURM_SUCCESS;
-#ifdef HAVE_CRAY
-	/*
-	 * Issue the BASIL RELEASE request
-	 * FIXME: still to be done, return SLURM_ERROR on error.
-	 */
-	debug("basil release of reservation %d complete", reservation_id);
-#endif	/* HAVE_CRAY */
-	return error_code;
-}
diff --git a/src/slurmctld/basil_interface.h b/src/slurmctld/basil_interface.h
deleted file mode 100644
index fa296d43f..000000000
--- a/src/slurmctld/basil_interface.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*****************************************************************************\
- *  basil_interface.h - slurmctld interface to BASIL, Cray's Batch Application
- *	Scheduler Interface Layer (BASIL)
- *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef _HAVE_BASIL_INTERFACE_H
-#define _HAVE_BASIL_INTERFACE_H
-
-#include "src/slurmctld/slurmctld.h"
-
-/*
- * basil_query - Query BASIL for node and reservation state.
- * Execute once at slurmctld startup and periodically thereafter.
- * RET 0 or error code
- */
-extern int basil_query(void);
-
-/*
- * basil_reserve - create a BASIL reservation.
- * IN job_ptr - pointer to job which has just been allocated resources
- * RET 0 or error code
- */
-extern int basil_reserve(struct job_record *job_ptr);
-
-/*
- * basil_release - release a BASIL reservation by job.
- * IN job_ptr - pointer to job which has just been deallocated resources
- * RET 0 or error code
- */
-extern int basil_release(struct job_record *job_ptr);
-
-/*
- * basil_release_id - release a BASIL reservation by ID.
- * IN reservation_id - ID of reservation to release
- * RET 0 or error code
- */
-extern int basil_release_id(uint32_t reservation_id);
-
-#endif	/* !_HAVE_BASIL_INTERFACE_H */
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index a6390e143..2bb9c79ec 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,7 +58,7 @@
 #include <sys/resource.h>
 #include <sys/stat.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/checkpoint.h"
@@ -80,13 +80,14 @@
 #include "src/common/slurm_priority.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/switch.h"
+#include "src/common/timers.h"
 #include "src/common/uid.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
-#include "src/slurmctld/basil_interface.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/job_submit.h"
 #include "src/slurmctld/licenses.h"
@@ -124,8 +125,8 @@
 /**************************************************************************\
  * To test for memory leaks, set MEMORY_LEAK_DEBUG to 1 using
  * "configure --enable-memory-leak-debug" then execute
- * > valgrind --tool=memcheck --leak-check=yes --num-callers=6 \
- *    --leak-resolution=med slurmctld -D
+ * $ valgrind --tool=memcheck --leak-check=yes --num-callers=8 \
+ *   --leak-resolution=med ./slurmctld -Dc >valg.ctld.out 2>&1
  *
  * Then exercise the slurmctld functionality before executing
  * > scontrol shutdown
@@ -352,14 +353,6 @@ int main(int argc, char *argv[])
 		      slurmctld_conf.accounting_storage_type);
 	}
 
-	callbacks.acct_full   = trigger_primary_ctld_acct_full;
-	callbacks.dbd_fail    = trigger_primary_dbd_fail;
-	callbacks.dbd_resumed = trigger_primary_dbd_res_op;
-	callbacks.db_fail     = trigger_primary_db_fail;
-	callbacks.db_resumed  = trigger_primary_db_res_op;
-	acct_db_conn = acct_storage_g_get_connection(&callbacks, 0, false,
-						     slurmctld_cluster_name);
-
 	memset(&assoc_init_arg, 0, sizeof(assoc_init_args_t));
 	assoc_init_arg.enforce = accounting_enforce;
 	assoc_init_arg.update_resvs = update_assocs_in_resvs;
@@ -373,7 +366,14 @@ int main(int argc, char *argv[])
 	if (slurmctld_conf.track_wckey)
 		assoc_init_arg.cache_level |= ASSOC_MGR_CACHE_WCKEY;
 
-	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg)) {
+	callbacks.acct_full   = trigger_primary_ctld_acct_full;
+	callbacks.dbd_fail    = trigger_primary_dbd_fail;
+	callbacks.dbd_resumed = trigger_primary_dbd_res_op;
+	callbacks.db_fail     = trigger_primary_db_fail;
+	callbacks.db_resumed  = trigger_primary_db_res_op;
+	acct_db_conn = acct_storage_g_get_connection(&callbacks, 0, false,
+						     slurmctld_cluster_name);
+	if (assoc_mgr_init(acct_db_conn, &assoc_init_arg, errno)) {
 		if (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
 			error("Association database appears down, "
 			      "reading from state file.");
@@ -498,7 +498,7 @@ int main(int argc, char *argv[])
 			 * we call this since we are setting up static
 			 * variables inside the function sending a
 			 * NULL will just use those set before. */
-			if (assoc_mgr_init(acct_db_conn, NULL) &&
+			if (assoc_mgr_init(acct_db_conn, NULL, errno) &&
 			    (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS) &&
 			    !running_cache) {
 				trigger_primary_dbd_fail();
@@ -585,7 +585,7 @@ int main(int argc, char *argv[])
 
 		if (running_cache) {
 			/* break out and end the association cache
-			 * thread since we are shuting down, no reason
+			 * thread since we are shutting down, no reason
 			 * to wait for current info from the database */
 			slurm_mutex_lock(&assoc_cache_mutex);
 			running_cache = (uint16_t)NO_VAL;
@@ -647,6 +647,7 @@ int main(int argc, char *argv[])
 	job_fini();
 	part_fini();	/* part_fini() must preceed node_fini() */
 	node_fini();
+	purge_front_end_state();
 	resv_fini();
 	trigger_fini();
 	dir_name = slurm_get_state_save_location();
@@ -852,7 +853,6 @@ static void _sig_handler(int signal)
 static void *_slurmctld_rpc_mgr(void *no_data)
 {
 	slurm_fd_t newsockfd;
-	slurm_fd_t maxsockfd;
 	slurm_fd_t *sockfd;	/* our set of socket file descriptors */
 	slurm_addr_t cli_addr, srv_addr;
 	uint16_t port;
@@ -895,7 +895,6 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 	/* initialize ports for RPCs */
 	lock_slurmctld(config_read_lock);
 	nports = slurmctld_conf.slurmctld_port_count;
-	maxsockfd = slurmctld_conf.slurmctld_port + nports;
 	sockfd = xmalloc(sizeof(slurm_fd_t) * nports);
 	for (i=0; i<nports; i++) {
 		sockfd[i] = slurm_init_msg_engine_addrname_port(
@@ -995,7 +994,10 @@ static void *_service_connection(void *arg)
 	slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t));
 
 	slurm_msg_t_init(msg);
-
+	/*
+	 * slurm_receive_msg sets msg connection fd to accepted fd. This allows
+	 * possibility for slurmctld_req() to close accepted connection.
+	 */
 	if(slurm_receive_msg(conn->newsockfd, msg, 0) != 0) {
 		error("slurm_receive_msg: %m");
 		/* close should only be called when the socket implementation
@@ -1006,9 +1008,6 @@ static void *_service_connection(void *arg)
 		goto cleanup;
 	}
 
-	/* set msg connection fd to accepted fd. This allows
-	 *  possibility for slurmd_req () to close accepted connection
-	 */
 	if(errno != SLURM_SUCCESS) {
 		if (errno == SLURM_PROTOCOL_VERSION_ERROR) {
 			slurm_send_rc_msg(msg, SLURM_PROTOCOL_VERSION_ERROR);
@@ -1121,7 +1120,7 @@ static int _accounting_cluster_ready()
 	*/
 	total_node_bitmap = bit_alloc(node_record_count);
 	bit_nset(total_node_bitmap, 0, node_record_count-1);
-	cluster_nodes = bitmap2node_name(total_node_bitmap);
+	cluster_nodes = bitmap2node_name_sortable(total_node_bitmap, 0);
 	FREE_NULL_BITMAP(total_node_bitmap);
 	unlock_slurmctld(node_read_lock);
 
@@ -1403,9 +1402,6 @@ static void *_slurmctld_background(void *no_data)
 			last_health_check_time = now;
 			lock_slurmctld(node_write_lock);
 			run_health_check();
-#ifdef HAVE_CRAY
-			basil_query();
-#endif
 			unlock_slurmctld(node_write_lock);
 		}
 		if (((difftime(now, last_ping_node_time) >= ping_interval) ||
@@ -1549,6 +1545,7 @@ extern void save_all_state(void)
 	char *save_loc;
 
 	/* Each of these functions lock their own databases */
+	schedule_front_end_save();
 	schedule_job_save();
 	schedule_node_save();
 	schedule_part_save();
@@ -2121,12 +2118,18 @@ static int _ping_backup_controller(void)
 	slurmctld_lock_t config_read_lock = {
 		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 
+	lock_slurmctld(config_read_lock);
+	if (!slurmctld_conf.backup_addr) {
+		debug4("No backup slurmctld to ping");
+		unlock_slurmctld(config_read_lock);
+		return SLURM_SUCCESS;
+	}
+
 	/*
 	 *  Set address of controller to ping
 	 */
-	slurm_msg_t_init(&req);
-	lock_slurmctld(config_read_lock);
 	debug3("pinging backup slurmctld at %s", slurmctld_conf.backup_addr);
+	slurm_msg_t_init(&req);
 	slurm_set_addr(&req.address, slurmctld_conf.slurmctld_port,
 		       slurmctld_conf.backup_addr);
 	unlock_slurmctld(config_read_lock);
diff --git a/src/slurmctld/front_end.c b/src/slurmctld/front_end.c
new file mode 100644
index 000000000..df558342f
--- /dev/null
+++ b/src/slurmctld/front_end.c
@@ -0,0 +1,823 @@
+/*****************************************************************************\
+ *  front_end.c - Define front end node functions.
+ *****************************************************************************
+ *  Copyright (C) 2010 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/node_conf.h"
+#include "src/common/read_config.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/front_end.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/state_save.h"
+#include "src/slurmctld/trigger_mgr.h"
+
+/* Change FRONT_END_STATE_VERSION value when changing the state save format */
+#define FRONT_END_STATE_VERSION      "VER001"
+#define FRONT_END_2_2_STATE_VERSION  "VER001"	/* SLURM version 2.2 */
+
+front_end_record_t *front_end_nodes = NULL;
+uint16_t front_end_node_cnt = 0;
+time_t last_front_end_update = (time_t) 0;
+
+#ifdef HAVE_FRONT_END
+/*
+ * _dump_front_end_state - dump state of a specific front_end node to a buffer
+ * IN front_end_ptr - pointer to node for which information is requested
+ * IN/OUT buffer - location to store data, pointers automatically advanced
+ */
+static void _dump_front_end_state(front_end_record_t *front_end_ptr,
+				  Buf buffer)
+{
+	packstr  (front_end_ptr->name, buffer);
+	pack16   (front_end_ptr->node_state, buffer);
+	packstr  (front_end_ptr->reason, buffer);
+	pack_time(front_end_ptr->reason_time, buffer);
+	pack32   (front_end_ptr->reason_uid, buffer);
+}
+
+
+/*
+ * Open the front_end node state save file, or backup if necessary.
+ * state_file IN - the name of the state save file used
+ * RET the file description to read from or error code
+ */
+static int _open_front_end_state_file(char **state_file)
+{
+	int state_fd;
+	struct stat stat_buf;
+
+	*state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(*state_file, "/front_end_state");
+	state_fd = open(*state_file, O_RDONLY);
+	if (state_fd < 0) {
+		error("Could not open front_end state file %s: %m",
+		      *state_file);
+	} else if (fstat(state_fd, &stat_buf) < 0) {
+		error("Could not stat front_end state file %s: %m",
+		      *state_file);
+		(void) close(state_fd);
+	} else if (stat_buf.st_size < 10) {
+		error("Front_end state file %s too small", *state_file);
+		(void) close(state_fd);
+	} else 	/* Success */
+		return state_fd;
+
+	error("NOTE: Trying backup front_end_state save file. Information may "
+	      "be lost!");
+	xstrcat(*state_file, ".old");
+	state_fd = open(*state_file, O_RDONLY);
+	return state_fd;
+}
+
+/*
+ * _pack_front_end - dump all configuration information about a specific
+ *	front_end node in machine independent form (for network transmission)
+ * IN dump_front_end_ptr - pointer to front_end node for which information is
+ *	requested
+ * IN/OUT buffer - buffer where data is placed, pointers automatically updated
+ * IN protocol_version - slurm protocol version of client
+ * NOTE: if you make any changes here be sure to make the corresponding
+ *	changes to load_front_end_config in api/node_info.c
+ */
+static void _pack_front_end(struct front_end_record *dump_front_end_ptr,
+			    Buf buffer, uint16_t protocol_version)
+{
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		pack_time(dump_front_end_ptr->boot_time, buffer);
+		packstr(dump_front_end_ptr->name, buffer);
+		pack16(dump_front_end_ptr->node_state, buffer);
+
+		packstr(dump_front_end_ptr->reason, buffer);
+		pack_time(dump_front_end_ptr->reason_time, buffer);
+		pack32(dump_front_end_ptr->reason_uid, buffer);
+
+		pack_time(dump_front_end_ptr->slurmd_start_time, buffer);
+	} else {
+		error("_pack_front_end: Unsupported slurm version %u",
+		      protocol_version);
+	}
+}
+#endif
+
+/*
+ * assign_front_end - assign a front end node for starting a job
+ * RET pointer to the front end node to use or NULL if none available
+ */
+extern front_end_record_t *assign_front_end(void)
+{
+#ifdef HAVE_FRONT_END
+	static int last_assigned = -1;
+	front_end_record_t *front_end_ptr;
+	uint16_t state_flags;
+	int i;
+
+	for (i = 0; i < front_end_node_cnt; i++) {
+		last_assigned = (last_assigned + 1) % front_end_node_cnt;
+		front_end_ptr = front_end_nodes + last_assigned;
+		if (IS_NODE_DOWN(front_end_ptr) ||
+		    IS_NODE_DRAIN(front_end_ptr) ||
+		    IS_NODE_NO_RESPOND(front_end_ptr))
+			continue;
+		state_flags = front_end_nodes[last_assigned].node_state &
+			      NODE_STATE_FLAGS;
+		front_end_nodes[last_assigned].node_state =
+				NODE_STATE_ALLOCATED | state_flags;
+		front_end_nodes[last_assigned].job_cnt_run++;
+		return front_end_ptr;
+	}
+	fatal("assign_front_end: no available front end nodes found");
+#endif
+	return NULL;
+}
+
+/*
+ * avail_front_end - test if any front end nodes are available for starting job
+ */
+extern bool avail_front_end(void)
+{
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+	int i;
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		if (IS_NODE_DOWN(front_end_ptr)  ||
+		    IS_NODE_DRAIN(front_end_ptr) ||
+		    IS_NODE_NO_RESPOND(front_end_ptr))
+			continue;
+		return true;
+	}
+	return false;
+#else
+	return true;
+#endif
+}
+
+/*
+ * Update front end node state
+ * update_front_end_msg_ptr IN change specification
+ * RET SLURM_SUCCESS or error code
+ */
+extern int update_front_end(update_front_end_msg_t *msg_ptr)
+{
+#ifdef HAVE_FRONT_END
+	char  *this_node_name = NULL;
+	hostlist_t host_list;
+	front_end_record_t *front_end_ptr;
+	int i, rc = SLURM_SUCCESS;
+	time_t now = time(NULL);
+
+	if ((host_list = hostlist_create(msg_ptr->name)) == NULL) {
+		error("hostlist_create error on %s: %m", msg_ptr->name);
+		return ESLURM_INVALID_NODE_NAME;
+	}
+
+	last_front_end_update = now;
+	while ((this_node_name = hostlist_shift(host_list))) {
+		for (i = 0, front_end_ptr = front_end_nodes;
+		     i < front_end_node_cnt; i++, front_end_ptr++) {
+			xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+			if (strcmp(this_node_name, front_end_ptr->name))
+				continue;
+			if (msg_ptr->node_state == (uint16_t) NO_VAL) {
+				;	/* No change in node state */
+			} else if (msg_ptr->node_state == NODE_RESUME) {
+				front_end_ptr->node_state = NODE_STATE_IDLE;
+				xfree(front_end_ptr->reason);
+				front_end_ptr->reason_time = 0;
+				front_end_ptr->reason_uid = 0;
+			} else if (msg_ptr->node_state == NODE_STATE_DRAIN) {
+				front_end_ptr->node_state |= NODE_STATE_DRAIN;
+				if (msg_ptr->reason) {
+					xfree(front_end_ptr->reason);
+					front_end_ptr->reason =
+						xstrdup(msg_ptr->reason);
+					front_end_ptr->reason_time = now;
+					front_end_ptr->reason_uid =
+						msg_ptr->reason_uid;
+				}
+			} else if (msg_ptr->node_state == NODE_STATE_DOWN) {
+				set_front_end_down(front_end_ptr,
+						   msg_ptr->reason);
+			}
+			if (msg_ptr->node_state != (uint16_t) NO_VAL) {
+				info("update_front_end: set state of %s to %s",
+				     this_node_name,
+				     node_state_string(front_end_ptr->
+						       node_state));
+			}
+			break;
+		}
+		if (i >= front_end_node_cnt) {
+			info("update_front_end: could not find front end: %s",
+			     this_node_name);
+			rc = ESLURM_INVALID_NODE_NAME;
+		}
+		free(this_node_name);
+	}
+	hostlist_destroy(host_list);     
+
+	return rc;
+#else
+	return ESLURM_INVALID_NODE_NAME;
+#endif
+}
+
+/*
+ * find_front_end_record - find a record for front_endnode with specified name
+ * input: name - name of the desired front_end node
+ * output: return pointer to front_end node record or NULL if not found
+ */
+extern front_end_record_t *find_front_end_record(char *name)
+{
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+	int i;
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+		if (strcmp(front_end_ptr->name, name) == 0)
+			return front_end_ptr;
+	}
+#endif
+	return NULL;
+}
+
+/*
+ * log_front_end_state - log all front end node state
+ */
+extern void log_front_end_state(void)
+{
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+	int i;
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+		info("FrontendName=%s FrontendAddr=%s Port=%u State=%s "
+		     "Reason=%s JobCntRun=%u JobCntComp=%u",
+		     front_end_ptr->name, front_end_ptr->comm_name,
+		     front_end_ptr->port,
+		     node_state_string(front_end_ptr->node_state),
+		     front_end_ptr->reason, front_end_ptr->job_cnt_run,
+		     front_end_ptr->job_cnt_comp);
+	}
+#endif
+}
+
+/*
+ * purge_front_end_state - purge all front end node state
+ */
+extern void purge_front_end_state(void)
+{
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+	int i;
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+		xfree(front_end_ptr->comm_name);
+		xfree(front_end_ptr->name);
+		xfree(front_end_ptr->reason);
+	}
+	xfree(front_end_nodes);
+	front_end_node_cnt = 0;
+#endif
+}
+
+/*
+ * restore_front_end_state - restore frontend node state
+ * IN recover - replace job, node and/or partition data with latest
+ *              available information depending upon value
+ *              0 = use no saved state information, rebuild everything from
+ *		    slurm.conf contents
+ *              1 = recover saved job and trigger state,
+ *                  node DOWN/DRAIN/FAIL state and reason information
+ *              2 = recover all saved state
+ */
+extern void restore_front_end_state(int recover)
+{
+#ifdef HAVE_FRONT_END
+	slurm_conf_frontend_t *slurm_conf_fe_ptr;
+	ListIterator iter;
+	uint16_t state_base, state_flags, tree_width;
+	int i;
+
+	last_front_end_update = time(NULL);
+	if (recover == 0)
+		purge_front_end_state();
+	if (front_end_list == NULL)
+		return;		/* No front ends in slurm.conf */
+
+	iter = list_iterator_create(front_end_list);
+	if (iter == NULL)
+		fatal("list_iterator_create: malloc failure");
+	while ((slurm_conf_fe_ptr = (slurm_conf_frontend_t *)
+				    list_next(iter))) {
+		if (slurm_conf_fe_ptr->frontends == NULL)
+			fatal("FrontendName is NULL");
+		for (i = 0; i < front_end_node_cnt; i++) {
+			if (strcmp(front_end_nodes[i].name,
+				   slurm_conf_fe_ptr->frontends) == 0)
+				break;
+		}
+		if (i >= front_end_node_cnt) {
+			front_end_node_cnt++;
+			xrealloc(front_end_nodes,
+				 sizeof(front_end_record_t) *
+				 front_end_node_cnt);
+			front_end_nodes[i].name =
+				xstrdup(slurm_conf_fe_ptr->frontends);
+			front_end_nodes[i].magic = FRONT_END_MAGIC;
+		}
+		xfree(front_end_nodes[i].comm_name);
+		if (slurm_conf_fe_ptr->addresses) {
+			front_end_nodes[i].comm_name =
+				xstrdup(slurm_conf_fe_ptr->addresses);
+		} else {
+			front_end_nodes[i].comm_name =
+				xstrdup(front_end_nodes[i].name);
+		}
+		state_base  = front_end_nodes[i].node_state & NODE_STATE_BASE;
+		state_flags = front_end_nodes[i].node_state & NODE_STATE_FLAGS;
+		if ((state_base == 0) || (state_base == NODE_STATE_UNKNOWN)) {
+			front_end_nodes[i].node_state =
+				slurm_conf_fe_ptr->node_state | state_flags;
+		}
+		if ((front_end_nodes[i].reason == NULL) &&
+		    (slurm_conf_fe_ptr->reason != NULL)) {
+			front_end_nodes[i].reason =
+				xstrdup(slurm_conf_fe_ptr->reason);
+		}
+		if (slurm_conf_fe_ptr->port)
+			front_end_nodes[i].port = slurm_conf_fe_ptr->port;
+		else
+			front_end_nodes[i].port = slurmctld_conf.slurmd_port;
+		slurm_set_addr(&front_end_nodes[i].slurm_addr,
+			       front_end_nodes[i].port,
+			       front_end_nodes[i].comm_name);
+	}
+	list_iterator_destroy(iter);
+	if (front_end_node_cnt == 0)
+		fatal("No front end nodes defined");
+	tree_width = slurm_get_tree_width();
+	if (front_end_node_cnt > tree_width) {
+		fatal("front_end_node_cnt > tree_width (%u > %u)",
+		      front_end_node_cnt, tree_width);
+	}
+	if (slurm_get_debug_flags() & DEBUG_FLAG_FRONT_END)
+		log_front_end_state();
+#endif
+}
+
+/*
+ * pack_all_front_end - dump all front_end node information for all nodes
+ *	in machine independent form (for network transmission)
+ * OUT buffer_ptr - pointer to the stored data
+ * OUT buffer_size - set to size of the buffer in bytes
+ * IN protocol_version - slurm protocol version of client
+ * NOTE: the caller must xfree the buffer at *buffer_ptr
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern void pack_all_front_end(char **buffer_ptr, int *buffer_size, uid_t uid,
+			       uint16_t protocol_version)
+{
+	time_t now = time(NULL);
+	uint32_t nodes_packed = 0;
+	Buf buffer;
+#ifdef HAVE_FRONT_END
+	uint32_t tmp_offset;
+	front_end_record_t *front_end_ptr;
+	int i;
+
+	buffer_ptr[0] = NULL;
+	*buffer_size = 0;
+
+	buffer = init_buf(BUF_SIZE * 2);
+	nodes_packed = 0;
+
+	if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		/* write header: count and time */
+		pack32(nodes_packed, buffer);
+		pack_time(now, buffer);
+
+		/* write records */
+		for (i = 0, front_end_ptr = front_end_nodes;
+		     i < front_end_node_cnt; i++, front_end_ptr++) {
+			xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+			_pack_front_end(front_end_ptr, buffer,
+					protocol_version);
+			nodes_packed++;
+		}
+	} else {
+		error("pack_all_front_end: Unsupported slurm version %u",
+		      protocol_version);
+	}
+
+	tmp_offset = get_buf_offset (buffer);
+	set_buf_offset(buffer, 0);
+	pack32(nodes_packed, buffer);
+	set_buf_offset(buffer, tmp_offset);
+
+	*buffer_size = get_buf_offset(buffer);
+	buffer_ptr[0] = xfer_buf_data(buffer);
+#else
+	buffer_ptr[0] = NULL;
+	*buffer_size = 0;
+	buffer = init_buf(64);
+	pack32(nodes_packed, buffer);
+	pack_time(now, buffer);
+	*buffer_size = get_buf_offset(buffer);
+	buffer_ptr[0] = xfer_buf_data(buffer);
+#endif
+}
+
+/* dump_all_front_end_state - save the state of all front_end nodes to file */
+extern int dump_all_front_end_state(void)
+{
+#ifdef HAVE_FRONT_END
+	/* Save high-water mark to avoid buffer growth with copies */
+	static int high_buffer_size = (1024 * 1024);
+	int error_code = 0, i, log_fd;
+	char *old_file, *new_file, *reg_file;
+	front_end_record_t *front_end_ptr;
+	/* Locks: Read config and node */
+	slurmctld_lock_t node_read_lock = { READ_LOCK, NO_LOCK, READ_LOCK,
+					    NO_LOCK };
+	Buf buffer = init_buf(high_buffer_size);
+	DEF_TIMERS;
+
+	START_TIMER;
+	/* write header: version, time */
+	packstr(FRONT_END_STATE_VERSION, buffer);
+	pack_time(time(NULL), buffer);
+
+	/* write node records to buffer */
+	lock_slurmctld (node_read_lock);
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		xassert(front_end_ptr->magic == FRONT_END_MAGIC);
+		_dump_front_end_state(front_end_ptr, buffer);
+	}
+
+	old_file = xstrdup (slurmctld_conf.state_save_location);
+	xstrcat (old_file, "/front_end_state.old");
+	reg_file = xstrdup (slurmctld_conf.state_save_location);
+	xstrcat (reg_file, "/front_end_state");
+	new_file = xstrdup (slurmctld_conf.state_save_location);
+	xstrcat (new_file, "/front_end_state.new");
+	unlock_slurmctld (node_read_lock);
+
+	/* write the buffer to file */
+	lock_state_files();
+	log_fd = creat (new_file, 0600);
+	if (log_fd < 0) {
+		error ("Can't save state, error creating file %s %m", new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount, rc;
+		char *data = (char *)get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+
+		rc = fsync_and_close(log_fd, "front_end");
+		if (rc && !error_code)
+			error_code = rc;
+	}
+	if (error_code)
+		(void) unlink (new_file);
+	else {	/* file shuffle */
+		(void) unlink (old_file);
+		if (link(reg_file, old_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
+		(void) unlink (reg_file);
+		if (link(new_file, reg_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
+		(void) unlink (new_file);
+	}
+	xfree (old_file);
+	xfree (reg_file);
+	xfree (new_file);
+	unlock_state_files ();
+
+	free_buf (buffer);
+	END_TIMER2("dump_all_front_end_state");
+	return error_code;
+#else
+	return SLURM_SUCCESS;
+#endif
+}
+
+/*
+ * load_all_front_end_state - Load the front_end node state from file, recover
+ *	on slurmctld restart. Execute this after loading the configuration
+ *	file data. Data goes into common storage.
+ * IN state_only - if true, overwrite only front_end node state and reason
+ *	Use this to overwrite the "UNKNOWN state typically used in slurm.conf
+ * RET 0 or error code
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern int load_all_front_end_state(bool state_only)
+{
+#ifdef HAVE_FRONT_END
+	char *node_name = NULL, *reason = NULL, *data = NULL, *state_file;
+	int data_allocated, data_read = 0, error_code = 0, node_cnt = 0;
+	uint16_t node_state;
+	uint32_t data_size = 0, name_len;
+	uint32_t reason_uid = NO_VAL;
+	time_t reason_time = 0;
+	front_end_record_t *front_end_ptr;
+	int state_fd;
+	time_t time_stamp;
+	Buf buffer;
+	char *ver_str = NULL;
+	uint16_t protocol_version = (uint16_t) NO_VAL;
+
+	/* read the file */
+	lock_state_files ();
+	state_fd = _open_front_end_state_file(&state_file);
+	if (state_fd < 0) {
+		info ("No node state file (%s) to recover", state_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size], BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error ("Read error on %s: %m",
+						state_file);
+					break;
+				}
+			} else if (data_read == 0)     /* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close (state_fd);
+	}
+	xfree (state_file);
+	unlock_state_files ();
+
+	buffer = create_buf (data, data_size);
+
+	safe_unpackstr_xmalloc( &ver_str, &name_len, buffer);
+	debug3("Version string in front_end_state header is %s", ver_str);
+	if (ver_str) {
+		if (!strcmp(ver_str, FRONT_END_STATE_VERSION)) {
+			protocol_version = SLURM_PROTOCOL_VERSION;
+		}
+	}
+
+	if (protocol_version == (uint16_t) NO_VAL) {
+		error("*****************************************************");
+		error("Can not recover front_end state, version incompatible");
+		error("*****************************************************");
+		xfree(ver_str);
+		free_buf(buffer);
+		return EFAULT;
+	}
+	xfree(ver_str);
+
+	safe_unpack_time(&time_stamp, buffer);
+
+	while (remaining_buf (buffer) > 0) {
+		uint16_t base_state;
+		if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+			safe_unpackstr_xmalloc (&node_name, &name_len, buffer);
+			safe_unpack16 (&node_state,  buffer);
+			safe_unpackstr_xmalloc (&reason,    &name_len, buffer);
+			safe_unpack_time (&reason_time, buffer);
+			safe_unpack32 (&reason_uid,  buffer);
+			base_state = node_state & NODE_STATE_BASE;
+		} else
+			goto unpack_error;
+
+		/* validity test as possible */
+
+		/* find record and perform update */
+		front_end_ptr = find_front_end_record(node_name);
+		if (front_end_ptr == NULL) {
+			error("Front_end node %s has vanished from "
+			      "configuration", node_name);
+		} else if (state_only) {
+			uint16_t orig_flags;
+			orig_flags = front_end_ptr->node_state &
+				     NODE_STATE_FLAGS;
+			node_cnt++;
+			if (IS_NODE_UNKNOWN(front_end_ptr)) {
+				if (base_state == NODE_STATE_DOWN) {
+					orig_flags &= (~NODE_STATE_COMPLETING);
+					front_end_ptr->node_state =
+						NODE_STATE_DOWN | orig_flags;
+				}
+				if (node_state & NODE_STATE_DRAIN) {
+					 front_end_ptr->node_state |=
+						 NODE_STATE_DRAIN;
+				}
+				if (node_state & NODE_STATE_FAIL) {
+					front_end_ptr->node_state |=
+						NODE_STATE_FAIL;
+				}
+			}
+			if (front_end_ptr->reason == NULL) {
+				front_end_ptr->reason = reason;
+				reason = NULL;	/* Nothing to free */
+				front_end_ptr->reason_time = reason_time;
+				front_end_ptr->reason_uid = reason_uid;
+			}
+		} else {
+			node_cnt++;
+			front_end_ptr->node_state = node_state;
+			xfree(front_end_ptr->reason);
+			front_end_ptr->reason	= reason;
+			reason			= NULL;	/* Nothing to free */
+			front_end_ptr->reason_time	= reason_time;
+			front_end_ptr->reason_uid	= reason_uid;
+			front_end_ptr->last_response	= (time_t) 0;
+		}
+
+		xfree(node_name);
+		xfree(reason);
+	}
+
+fini:	info("Recovered state of %d front_end nodes", node_cnt);
+	free_buf (buffer);
+	return error_code;
+
+unpack_error:
+	error("Incomplete front_end node data checkpoint file");
+	error_code = EFAULT;
+	xfree (node_name);
+	xfree(reason);
+	goto fini;
+#else
+	return 0;
+#endif
+}
+
+/*
+ * set_front_end_down - make the specified front end node's state DOWN and
+ *	kill jobs as needed
+ * IN front_end_pt - pointer to the front end node
+ * IN reason - why the node is DOWN
+ */
+extern void set_front_end_down (front_end_record_t *front_end_ptr,
+				char *reason)
+{
+#ifdef HAVE_FRONT_END
+	time_t now = time(NULL);
+	uint16_t state_flags = front_end_ptr->node_state & NODE_STATE_FLAGS;
+
+	state_flags &= (~NODE_STATE_COMPLETING);
+	front_end_ptr->node_state = NODE_STATE_DOWN | state_flags;
+	trigger_front_end_down(front_end_ptr);
+	(void) kill_job_by_front_end_name(front_end_ptr->name);
+	if ((front_end_ptr->reason == NULL) ||
+	    (strncmp(front_end_ptr->reason, "Not responding", 14) == 0)) {
+		xfree(front_end_ptr->reason);
+		front_end_ptr->reason = xstrdup(reason);
+		front_end_ptr->reason_time = now;
+		front_end_ptr->reason_uid = slurm_get_slurm_user_id();
+	}
+	last_front_end_update = now;
+#endif
+}
+
+/*
+ * sync_front_end_state - synchronize job pointers and front-end node state
+ */
+extern void sync_front_end_state(void)
+{
+#ifdef HAVE_FRONT_END
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	front_end_record_t *front_end_ptr;
+	uint16_t state_flags;
+	int i;
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		front_end_ptr->job_cnt_comp = 0;
+		front_end_ptr->job_cnt_run  = 0;
+	}
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (job_ptr->batch_host) {
+			job_ptr->front_end_ptr =
+				find_front_end_record(job_ptr->batch_host);
+			if ((job_ptr->front_end_ptr == NULL) &&
+			    IS_JOB_RUNNING(job_ptr)) {
+				error("front end node %s has vanished, "
+				      "killing job %u",
+				      job_ptr->batch_host, job_ptr->job_id);
+				job_ptr->job_state = JOB_NODE_FAIL |
+						     JOB_COMPLETING;
+			} else if (job_ptr->front_end_ptr == NULL) {
+				info("front end node %s has vanished",
+				     job_ptr->batch_host);
+			} else if (IS_JOB_COMPLETING(job_ptr)) {
+				job_ptr->front_end_ptr->job_cnt_comp++;
+			} else if (IS_JOB_RUNNING(job_ptr)) {
+				job_ptr->front_end_ptr->job_cnt_run++;
+			}
+		} else {
+			job_ptr->front_end_ptr = NULL;
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		if (IS_NODE_IDLE(front_end_ptr) &&
+		    (front_end_ptr->job_cnt_run != 0)) {
+			state_flags = front_end_ptr->node_state &
+				      NODE_STATE_FLAGS;
+			front_end_ptr->node_state = NODE_STATE_ALLOCATED |
+						    state_flags;
+		}
+		if (IS_NODE_ALLOCATED(front_end_ptr) &&
+		    (front_end_ptr->job_cnt_run == 0)) {
+			state_flags = front_end_ptr->node_state &
+				      NODE_STATE_FLAGS;
+			front_end_ptr->node_state = NODE_STATE_IDLE |
+						    state_flags;
+		}
+		if (IS_NODE_COMPLETING(front_end_ptr) &&
+		    (front_end_ptr->job_cnt_comp == 0)) {
+			front_end_ptr->node_state &= (~NODE_STATE_COMPLETING);
+		}
+		if (!IS_NODE_COMPLETING(front_end_ptr) &&
+		    (front_end_ptr->job_cnt_comp != 0)) {
+			front_end_ptr->node_state |= NODE_STATE_COMPLETING;
+		}
+	}
+
+	if (slurm_get_debug_flags() & DEBUG_FLAG_FRONT_END)
+		log_front_end_state();
+#endif
+}
diff --git a/src/slurmctld/front_end.h b/src/slurmctld/front_end.h
new file mode 100644
index 000000000..bee023a85
--- /dev/null
+++ b/src/slurmctld/front_end.h
@@ -0,0 +1,131 @@
+/*****************************************************************************\
+ *  front_end.h - Define front end node functions.
+ *****************************************************************************
+ *  Copyright (C) 2010 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SLURM_FRONT_END_H__
+#define __SLURM_FRONT_END_H__
+
+#include "src/slurmctld/slurmctld.h"
+
+/*
+ * assign_front_end - assign a front end node for starting a job
+ * RET pointer to the front end node to use or NULL if none available
+ */
+extern front_end_record_t *assign_front_end(void);
+
+/*
+ * avail_front_end - test if any front end nodes are available for starting job
+ */
+extern bool avail_front_end(void);
+
+/* dump_all_front_end_state - save the state of all front_end nodes to file */
+extern int dump_all_front_end_state(void);
+
+/*
+ * find_front_end_record - find a record for front_endnode with specified name
+ * input: name - name of the desired front_end node
+ * output: return pointer to front_end node record or NULL if not found
+ */
+extern front_end_record_t *find_front_end_record(char *name);
+
+/*
+ * load_all_front_end_state - Load the front_end node state from file, recover
+ *	on slurmctld restart. Execute this after loading the configuration
+ *	file data. Data goes into common storage.
+ * IN state_only - if true, overwrite only front_end node state and reason
+ *	Use this to overwrite the "UNKNOWN state typically used in slurm.conf
+ * RET 0 or error code
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern int load_all_front_end_state(bool state_only);
+
+/*
+ * log_front_end_state - log all front end node state
+ */
+extern void log_front_end_state(void);
+
+/*
+ * pack_all_front_end - dump all front_end node information for all nodes
+ *	in machine independent form (for network transmission)
+ * OUT buffer_ptr - pointer to the stored data
+ * OUT buffer_size - set to size of the buffer in bytes
+ * IN protocol_version - slurm protocol version of client
+ * NOTE: the caller must xfree the buffer at *buffer_ptr
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern void pack_all_front_end(char **buffer_ptr, int *buffer_size, uid_t uid,
+			       uint16_t protocol_version);
+
+/*
+ * purge_front_end_state - purge all front end node state
+ */
+extern void purge_front_end_state(void);
+
+/*
+ * restore_front_end_state - restore front end node state
+ * IN recover - replace job, node and/or partition data with latest
+ *              available information depending upon value
+ *              0 = use no saved state information, rebuild everything from
+ *		    slurm.conf contents
+ *              1 = recover saved job and trigger state,
+ *                  node DOWN/DRAIN/FAIL state and reason information
+ *              2 = recover all saved state
+ */
+extern void restore_front_end_state(int recover);
+
+/*
+ * set_front_end_down - make the specified front end node's state DOWN and
+ *	kill jobs as needed
+ * IN front_end_pt - pointer to the front end node
+ * IN reason - why the node is DOWN
+ */
+extern void set_front_end_down (front_end_record_t *front_end_ptr,
+				char *reason);
+
+/*
+ * sync_front_end_state - synchronize job pointers and front-end node state
+ */
+extern void sync_front_end_state(void);
+
+/*
+ * Update front end node state
+ * update_front_end_msg_ptr IN change specification
+ * RET SLURM_SUCCESS or error code
+ */
+extern int update_front_end(update_front_end_msg_t *update_front_end_msg_ptr);
+
+#endif /*__SLURM_FRONT_END_H__*/
diff --git a/src/slurmctld/gang.c b/src/slurmctld/gang.c
index a9678799a..c9c934b3a 100644
--- a/src/slurmctld/gang.c
+++ b/src/slurmctld/gang.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -439,12 +439,11 @@ static int _job_fits_in_active_row(struct job_record *job_ptr,
  * each used socket to avoid activating another job on the same socket */
 static void _fill_sockets(bitstr_t *job_nodemap, struct gs_part *p_ptr)
 {
-	uint32_t c, i, size;
+	uint32_t c, i;
 	int n, first_bit, last_bit;
 
 	if (!job_nodemap || !p_ptr || !p_ptr->active_resmap)
 		return;
-	size      = bit_size(job_nodemap);
 	first_bit = bit_ffs(job_nodemap);
 	last_bit  = bit_fls(job_nodemap);
 	if ((first_bit < 0) || (last_bit < 0))
@@ -623,10 +622,10 @@ static void _preempt_job_dequeue(void)
 		preempt_mode = slurm_job_preempt_mode(job_ptr);
 
 		if (preempt_mode == PREEMPT_MODE_SUSPEND) {
-			if((rc = _suspend_job(job_id)) == ESLURM_DISABLED)
+			if ((rc = _suspend_job(job_id)) == ESLURM_DISABLED)
 				rc = SLURM_SUCCESS;
 		} else if (preempt_mode == PREEMPT_MODE_CANCEL) {
-			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
 			if (rc == SLURM_SUCCESS) {
 				info("preempted job %u has been killed",
 				     job_ptr->job_id);
@@ -656,7 +655,7 @@ static void _preempt_job_dequeue(void)
 			   job_ptr->batch_flag && job_ptr->details &&
 			   (job_ptr->details->requeue > 0)) {
 			rc = job_requeue(0, job_ptr->job_id, -1,
-					 (uint16_t)NO_VAL);
+					 (uint16_t)NO_VAL, true);
 			if (rc == SLURM_SUCCESS) {
 				info("preempted job %u has been requeued",
 				     job_ptr->job_id);
@@ -667,7 +666,7 @@ static void _preempt_job_dequeue(void)
 		}
 		
 		if (rc != SLURM_SUCCESS) {
-			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
 			if (rc == SLURM_SUCCESS)
 				info("preempted job %u had to be killed",
 				     job_ptr->job_id);
diff --git a/src/slurmctld/gang.h b/src/slurmctld/gang.h
index dcea1eb70..f013f6d93 100644
--- a/src/slurmctld/gang.h
+++ b/src/slurmctld/gang.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,8 @@
 #define __SCHED_GANG_H
 
 #include <stdio.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
diff --git a/src/slurmctld/groups.c b/src/slurmctld/groups.c
index c0ad117d6..c3e2cc284 100644
--- a/src/slurmctld/groups.c
+++ b/src/slurmctld/groups.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -77,7 +77,7 @@ struct group_cache_rec {
 };
 
 /*
- * get_group_members - indentify the users in a given group name
+ * get_group_members - identify the users in a given group name
  * IN group_name - a single group name
  * RET a zero terminated list of its UIDs or NULL on error
  * NOTE: User root has implicitly access to every group
@@ -93,7 +93,7 @@ extern uid_t *get_group_members(char *group_name)
 	int i, j, uid_cnt;
 #ifdef HAVE_AIX
 	FILE *fp = NULL;
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) || defined (__CYGWIN__)
 #else
 	char pw_buffer[PW_BUF_SIZE];
 	struct passwd pw;
@@ -121,7 +121,7 @@ extern uid_t *get_group_members(char *group_name)
 	setgrent_r(&fp);
 	while (!getgrent_r(&grp, grp_buffer, PW_BUF_SIZE, &fp)) {
 		grp_result = &grp;
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) || defined (__CYGWIN__)
 	setgrent();
 	while ((grp_result = getgrent()) != NULL) {
 #else
@@ -139,15 +139,12 @@ extern uid_t *get_group_members(char *group_name)
 		        for (i=0; grp_result->gr_mem[i]; i++) {
 				if (uid_from_string(grp_result->gr_mem[i],
 						    &my_uid) < 0) {
-				        error("Could not find user %s in "
-					      "configured group %s",
-					      grp_result->gr_mem[i],
-					      group_name);
+					/* Group member without valid login */
 					continue;
 				}
 				if (my_uid == 0)
 					continue;
-				if (j >= uid_cnt) {
+				if (j+1 >= uid_cnt) {
 					uid_cnt += 100;
 					xrealloc(group_uids, 
 						 (sizeof(uid_t) * uid_cnt));
@@ -166,7 +163,7 @@ extern uid_t *get_group_members(char *group_name)
 	setpwent();
 #if defined (__sun)
 	while ((pwd_result = getpwent_r(&pw, pw_buffer, PW_BUF_SIZE)) != NULL) {
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) || defined (__CYGWIN__)
 	while ((pwd_result = getpwent()) != NULL) {
 #else
 	while (!getpwent_r(&pw, pw_buffer, PW_BUF_SIZE, &pwd_result)) {
@@ -174,7 +171,7 @@ extern uid_t *get_group_members(char *group_name)
 #endif
  		if (pwd_result->pw_gid != my_gid)
 			continue;
-		if (j >= uid_cnt) {
+		if (j+1 >= uid_cnt) {
 			uid_cnt += 100;
 			xrealloc(group_uids, (sizeof(uid_t) * uid_cnt));
 		}
@@ -186,7 +183,7 @@ extern uid_t *get_group_members(char *group_name)
 	endpwent();
 #endif
 
-	_put_group_cache(group_name, group_uids, uid_cnt);
+	_put_group_cache(group_name, group_uids, j);
 	_log_group_members(group_name, group_uids);
 	return group_uids;
 }
@@ -257,11 +254,11 @@ static void _put_group_cache(char *group_name, void *group_uids, int uid_cnt)
 			fatal("list_create: malloc failure:");
 	}
 
-	sz = sizeof(uid_t) * (uid_cnt + 1);
+	sz = sizeof(uid_t) * (uid_cnt);
 	cache_rec = xmalloc(sizeof(struct group_cache_rec));
 	cache_rec->group_name = xstrdup(group_name);
 	cache_rec->uid_cnt    = uid_cnt;
-	cache_rec->group_uids = (uid_t *) xmalloc(sz);
+	cache_rec->group_uids = (uid_t *) xmalloc(sizeof(uid_t) + sz);
 	if (uid_cnt > 0)
 		memcpy(cache_rec->group_uids, group_uids, sz);
 	list_append(group_cache_list, cache_rec);
diff --git a/src/slurmctld/groups.h b/src/slurmctld/groups.h
index 71677cd8e..914fe525c 100644
--- a/src/slurmctld/groups.h
+++ b/src/slurmctld/groups.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -47,7 +47,7 @@
 extern void clear_group_cache(void);
 
 /*
- * get_group_members - indentify the users in a given group name
+ * get_group_members - identify the users in a given group name
  * IN group_name - a single group name
  * RET a zero terminated list of its UIDs or NULL on error
  * NOTE: User root has implicitly access to every group
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 4b5833b4a..b7ef4edbb 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -58,7 +58,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
@@ -72,11 +72,13 @@
 #include "src/common/slurm_priority.h"
 #include "src/common/slurm_protocol_pack.h"
 #include "src/common/switch.h"
+#include "src/common/timers.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/job_submit.h"
 #include "src/slurmctld/licenses.h"
@@ -99,7 +101,8 @@
 #define JOB_HASH_INX(_job_id)	(_job_id % hash_table_size)
 
 /* Change JOB_STATE_VERSION value when changing the state save format */
-#define JOB_STATE_VERSION      "VER010"
+#define JOB_STATE_VERSION      "VER011"
+#define JOB_2_3_STATE_VERSION  "VER011"		/* SLURM version 2.3 */
 #define JOB_2_2_STATE_VERSION  "VER010"		/* SLURM version 2.2 */
 #define JOB_2_1_STATE_VERSION  "VER009"		/* SLURM version 2.1 */
 
@@ -112,7 +115,8 @@ List   job_list = NULL;		/* job_record list */
 time_t last_job_update;		/* time of last update to job records */
 
 /* Local variables */
-static uint32_t maximum_prio = TOP_PRIORITY;
+static uint32_t highest_prio = 0;
+static uint32_t lowest_prio  = TOP_PRIORITY;
 static int      hash_table_size = 0;
 static int      job_count = 0;		/* job's in the system */
 static uint32_t job_id_sequence = 0;	/* first job_id to assign new job */
@@ -153,6 +157,7 @@ static int  _list_find_job_old(void *job_entry, void *key);
 static int  _load_job_details(struct job_record *job_ptr, Buf buffer,
 			      uint16_t protocol_version);
 static int  _load_job_state(Buf buffer,	uint16_t protocol_version);
+static uint32_t _max_switch_wait(uint32_t input_wait);
 static void _notify_srun_missing_step(struct job_record *job_ptr, int node_inx,
 				      time_t now, time_t node_boot_time);
 static int  _open_job_state_file(char **state_file);
@@ -173,17 +178,18 @@ static char *_read_job_ckpt_file(char *ckpt_file, int *size_ptr);
 static void _remove_defunct_batch_dirs(List batch_dirs);
 static int  _reset_detail_bitmaps(struct job_record *job_ptr);
 static void _reset_step_bitmaps(struct job_record *job_ptr);
-static int  _resume_job_nodes(struct job_record *job_ptr, bool clear_prio);
+static int  _resume_job_nodes(struct job_record *job_ptr, bool indf_susp);
+static void _send_job_kill(struct job_record *job_ptr);
 static void _set_job_id(struct job_record *job_ptr);
 static void _set_job_prio(struct job_record *job_ptr);
 static void _signal_batch_job(struct job_record *job_ptr, uint16_t signal);
 static void _signal_job(struct job_record *job_ptr, int signal);
 static void _suspend_job(struct job_record *job_ptr, uint16_t op);
-static int  _suspend_job_nodes(struct job_record *job_ptr, bool clear_prio);
+static int  _suspend_job_nodes(struct job_record *job_ptr, bool indf_susp);
 static bool _top_priority(struct job_record *job_ptr);
 static int  _validate_job_create_req(job_desc_msg_t * job_desc);
 static int  _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
-			       uid_t submit_uid);
+			       uid_t submit_uid, struct part_record *part_ptr);
 static void _validate_job_files(List batch_dirs);
 static int  _write_data_to_file(char *file_name, char *data);
 static int  _write_data_array_to_file(char *file_name, char **data,
@@ -230,7 +236,6 @@ struct job_record *create_job_record(int *error_code)
 	detail_ptr->submit_time = time(NULL);
 	job_ptr->requid = -1; /* force to -1 for sacct to know this
 			       * hasn't been set yet  */
-
 	if (list_append(job_list, job_ptr) == 0)
 		fatal("list_append memory allocation failure");
 
@@ -263,6 +268,7 @@ void delete_job_details(struct job_record *job_entry)
 	if (job_entry->details->depend_list)
 		list_destroy(job_entry->details->depend_list);
 	xfree(job_entry->details->dependency);
+	xfree(job_entry->details->orig_dependency);
 	for (i=0; i<job_entry->details->env_cnt; i++)
 		xfree(job_entry->details->env_sup[i]);
 	xfree(job_entry->details->env_sup);
@@ -310,6 +316,34 @@ static void _delete_job_desc_files(uint32_t job_id)
 	xfree(dir_name);
 }
 
+static uint32_t _max_switch_wait(uint32_t input_wait)
+{
+	static time_t sched_update = 0;
+	static uint32_t max_wait = 60;
+	char *sched_params, *tmp_ptr;
+	int i;
+
+	if (sched_update != slurmctld_conf.last_update) {
+		sched_params = slurm_get_sched_params();
+		if (sched_params &&
+		    (tmp_ptr = strstr(sched_params, "max_switch_wait="))) {
+		/*                                   0123456789012345 */
+			i = atoi(tmp_ptr + 16);
+			if (i < 0) {
+				error("ignoring SchedulerParameters: "
+				      "max_switch_wait of %d", i);
+			} else {
+				      max_wait = i;
+			}
+		}
+		xfree(sched_params);
+	}
+
+	if (max_wait > input_wait)
+		return input_wait;
+	return max_wait;
+}
+
 static slurmdb_qos_rec_t *_determine_and_validate_qos(
 	slurmdb_association_rec_t *assoc_ptr,
 	slurmdb_qos_rec_t *qos_rec,
@@ -332,8 +366,14 @@ static slurmdb_qos_rec_t *_determine_and_validate_qos(
 			else if(bit_set_count(assoc_ptr->usage->valid_qos) == 1)
 				qos_rec->id =
 					bit_ffs(assoc_ptr->usage->valid_qos);
+			else if (assoc_mgr_root_assoc
+				 && assoc_mgr_root_assoc->def_qos_id)
+				qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
 			else
 				qos_rec->name = "normal";
+		else if (assoc_mgr_root_assoc
+			 && assoc_mgr_root_assoc->def_qos_id)
+				qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
 		else
 			qos_rec->name = "normal";
 	}
@@ -574,10 +614,12 @@ extern int load_all_job_state(void)
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
-	if(ver_str) {
-		if(!strcmp(ver_str, JOB_STATE_VERSION)) {
+	if (ver_str) {
+		if (!strcmp(ver_str, JOB_STATE_VERSION)) {
 			protocol_version = SLURM_PROTOCOL_VERSION;
-		} else if(!strcmp(ver_str, JOB_2_1_STATE_VERSION)) {
+		} else if (!strcmp(ver_str, JOB_2_2_STATE_VERSION)) {
+			protocol_version = SLURM_2_2_PROTOCOL_VERSION;
+		} else if (!strcmp(ver_str, JOB_2_1_STATE_VERSION)) {
 			protocol_version = SLURM_2_1_PROTOCOL_VERSION;
 		}
 	}
@@ -594,6 +636,7 @@ extern int load_all_job_state(void)
 
 	safe_unpack_time(&buf_time, buffer);
 	safe_unpack32( &saved_job_id, buffer);
+	job_id_sequence = MAX(saved_job_id, job_id_sequence);
 	debug3("Job id in job_state header is %u", saved_job_id);
 
 	while (remaining_buf(buffer) > 0) {
@@ -602,8 +645,6 @@ extern int load_all_job_state(void)
 			goto unpack_error;
 		job_cnt++;
 	}
-
-	job_id_sequence = MAX(saved_job_id, job_id_sequence);
 	debug3("Set job_id_sequence to %u", job_id_sequence);
 
 	free_buf(buffer);
@@ -681,7 +722,6 @@ extern int load_last_job_id( void )
 		return EFAULT;
 	}
 	xfree(ver_str);
-	debug3("Version string in job_state header is %s", ver_str);
 
 	safe_unpack_time(&buf_time, buffer);
 	safe_unpack32( &job_id_sequence, buffer);
@@ -725,11 +765,13 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	pack32(dump_job_ptr->exit_code, buffer);
 	pack32(dump_job_ptr->derived_ec, buffer);
 	pack32(dump_job_ptr->db_index, buffer);
-	pack32(dump_job_ptr->assoc_id, buffer);
 	pack32(dump_job_ptr->resv_id, buffer);
 	pack32(dump_job_ptr->next_step_id, buffer);
 	pack32(dump_job_ptr->qos_id, buffer);
+	pack32(dump_job_ptr->req_switch, buffer);
+	pack32(dump_job_ptr->wait4switch, buffer);
 
+	pack_time(dump_job_ptr->preempt_time, buffer);
 	pack_time(dump_job_ptr->start_time, buffer);
 	pack_time(dump_job_ptr->end_time, buffer);
 	pack_time(dump_job_ptr->suspend_time, buffer);
@@ -779,6 +821,7 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->licenses, buffer);
 	packstr(dump_job_ptr->mail_user, buffer);
 	packstr(dump_job_ptr->resv_name, buffer);
+	packstr(dump_job_ptr->batch_host, buffer);
 
 	select_g_select_jobinfo_pack(dump_job_ptr->select_jobinfo,
 				     buffer, SLURM_PROTOCOL_VERSION);
@@ -822,7 +865,9 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	uint32_t exit_code, assoc_id, db_index, name_len, time_min;
 	uint32_t next_step_id, total_cpus, total_nodes = 0, cpu_cnt;
 	uint32_t resv_id, spank_job_env_size = 0, qos_id, derived_ec = 0;
+	uint32_t req_switch = 0, wait4switch = 0;
 	time_t start_time, end_time, suspend_time, pre_sus_time, tot_sus_time;
+	time_t preempt_time = 0;
 	time_t resize_time = 0, now = time(NULL);
 	uint16_t job_state, details, batch_flag, step_flag;
 	uint16_t kill_on_node_fail, direct_set_prio;
@@ -836,7 +881,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	char *account = NULL, *network = NULL, *mail_user = NULL;
 	char *comment = NULL, *nodes_completing = NULL, *alloc_node = NULL;
 	char *licenses = NULL, *state_desc = NULL, *wckey = NULL;
-	char *resv_name = NULL, *gres = NULL;
+	char *resv_name = NULL, *gres = NULL, *batch_host = NULL;
 	char **spank_job_env = (char **) NULL;
 	List gres_list = NULL, part_ptr_list = NULL;
 	struct job_record *job_ptr = NULL;
@@ -849,7 +894,157 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	slurmdb_qos_rec_t qos_rec;
 	bool job_finished = false;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack32(&assoc_id, buffer);
+		safe_unpack32(&job_id, buffer);
+
+		/* validity test as possible */
+		if (job_id == 0) {
+			verbose("Invalid job_id %u", job_id);
+			goto unpack_error;
+		}
+
+		job_ptr = find_job_record(job_id);
+		if (job_ptr == NULL) {
+			job_ptr = create_job_record(&error_code);
+			if (error_code) {
+				error("Create job entry failed for job_id %u",
+				      job_id);
+				goto unpack_error;
+			}
+			job_ptr->job_id = job_id;
+			_add_job_hash(job_ptr);
+		}
+
+		safe_unpack32(&user_id, buffer);
+		safe_unpack32(&group_id, buffer);
+		safe_unpack32(&time_limit, buffer);
+		safe_unpack32(&time_min, buffer);
+		safe_unpack32(&priority, buffer);
+		safe_unpack32(&alloc_sid, buffer);
+		safe_unpack32(&total_cpus, buffer);
+		safe_unpack32(&total_nodes, buffer);
+		safe_unpack32(&cpu_cnt, buffer);
+		safe_unpack32(&exit_code, buffer);
+		safe_unpack32(&derived_ec, buffer);
+		safe_unpack32(&db_index, buffer);
+		safe_unpack32(&resv_id, buffer);
+		safe_unpack32(&next_step_id, buffer);
+		safe_unpack32(&qos_id, buffer);
+		safe_unpack32(&req_switch, buffer);
+		safe_unpack32(&wait4switch, buffer);
+
+		safe_unpack_time(&preempt_time, buffer);
+		safe_unpack_time(&start_time, buffer);
+		safe_unpack_time(&end_time, buffer);
+		safe_unpack_time(&suspend_time, buffer);
+		safe_unpack_time(&pre_sus_time, buffer);
+		safe_unpack_time(&resize_time, buffer);
+		safe_unpack_time(&tot_sus_time, buffer);
+
+		safe_unpack16(&direct_set_prio, buffer);
+		safe_unpack16(&job_state, buffer);
+		safe_unpack16(&kill_on_node_fail, buffer);
+		safe_unpack16(&batch_flag, buffer);
+		safe_unpack16(&mail_type, buffer);
+		safe_unpack16(&state_reason, buffer);
+		safe_unpack16(&restart_cnt, buffer);
+		safe_unpack16(&resv_flags, buffer);
+		safe_unpack16(&wait_all_nodes, buffer);
+		safe_unpack16(&warn_signal, buffer);
+		safe_unpack16(&warn_time, buffer);
+		safe_unpack16(&limit_set_max_cpus, buffer);
+		safe_unpack16(&limit_set_max_nodes, buffer);
+		safe_unpack16(&limit_set_min_cpus, buffer);
+		safe_unpack16(&limit_set_min_nodes, buffer);
+		safe_unpack16(&limit_set_time, buffer);
+
+		safe_unpackstr_xmalloc(&state_desc, &name_len, buffer);
+		safe_unpackstr_xmalloc(&resp_host, &name_len, buffer);
+
+		safe_unpack16(&alloc_resp_port, buffer);
+		safe_unpack16(&other_port, buffer);
+
+		if (job_state & JOB_COMPLETING) {
+			safe_unpackstr_xmalloc(&nodes_completing,
+					       &name_len, buffer);
+		}
+		safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
+		safe_unpackstr_xmalloc(&partition, &name_len, buffer);
+		if (partition == NULL) {
+			error("No partition for job %u", job_id);
+			goto unpack_error;
+		}
+		part_ptr = find_part_record (partition);
+		if (part_ptr == NULL) {
+			part_ptr_list = get_part_list(partition);
+			if (part_ptr_list)
+				part_ptr = list_peek(part_ptr_list);
+		}
+		if (part_ptr == NULL) {
+			verbose("Invalid partition (%s) for job_id %u",
+				partition, job_id);
+			/* not fatal error, partition could have been removed,
+			 * reset_job_bitmaps() will clean-up this job */
+		}
+
+		safe_unpackstr_xmalloc(&name, &name_len, buffer);
+		safe_unpackstr_xmalloc(&wckey, &name_len, buffer);
+		safe_unpackstr_xmalloc(&alloc_node, &name_len, buffer);
+		safe_unpackstr_xmalloc(&account, &name_len, buffer);
+		safe_unpackstr_xmalloc(&comment, &name_len, buffer);
+		safe_unpackstr_xmalloc(&gres, &name_len, buffer);
+		safe_unpackstr_xmalloc(&network, &name_len, buffer);
+		safe_unpackstr_xmalloc(&licenses, &name_len, buffer);
+		safe_unpackstr_xmalloc(&mail_user, &name_len, buffer);
+		safe_unpackstr_xmalloc(&resv_name, &name_len, buffer);
+		safe_unpackstr_xmalloc(&batch_host, &name_len, buffer);
+
+		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
+						   protocol_version))
+			goto unpack_error;
+		if (unpack_job_resources(&job_resources, buffer,
+					 protocol_version))
+			goto unpack_error;
+
+		safe_unpack16(&ckpt_interval, buffer);
+		if (checkpoint_alloc_jobinfo(&check_job) ||
+		    checkpoint_unpack_jobinfo(check_job, buffer,
+					      protocol_version))
+			goto unpack_error;
+
+		safe_unpackstr_array(&spank_job_env, &spank_job_env_size,
+				     buffer);
+
+		if (gres_plugin_job_state_unpack(&gres_list, buffer, job_id,
+						 protocol_version) !=
+		    SLURM_SUCCESS)
+			goto unpack_error;
+		gres_plugin_job_state_log(gres_list, job_id);
+
+		safe_unpack16(&details, buffer);
+		if ((details == DETAILS_FLAG) &&
+		    (_load_job_details(job_ptr, buffer, protocol_version))) {
+			job_ptr->job_state = JOB_FAILED;
+			job_ptr->exit_code = 1;
+			job_ptr->state_reason = FAIL_SYSTEM;
+			xfree(job_ptr->state_desc);
+			job_ptr->end_time = now;
+			goto unpack_error;
+		}
+		safe_unpack16(&step_flag, buffer);
+
+		while (step_flag == STEP_FLAG) {
+			/* No need to put these into accounting if they
+			 * haven't been since all information will be
+			 * put in when the job is finished.
+			 */
+			if ((error_code = load_step_state(job_ptr, buffer,
+							  protocol_version)))
+				goto unpack_error;
+			safe_unpack16(&step_flag, buffer);
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&assoc_id, buffer);
 		safe_unpack32(&job_id, buffer);
 
@@ -996,7 +1191,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 				goto unpack_error;
 			safe_unpack16(&step_flag, buffer);
 		}
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		uint16_t kill_on_step_done;
 		uint32_t min_cpus;
 
@@ -1155,8 +1350,10 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		goto unpack_error;
 	}
 
-	if ((maximum_prio >= priority) && (priority > 1))
-		maximum_prio = priority;
+	if (priority > 1) {
+		highest_prio = MAX(highest_prio, priority);
+		lowest_prio  = MIN(lowest_prio,  priority);
+	}
 	if (job_id_sequence <= job_id)
 		job_id_sequence = job_id + 1;
 
@@ -1171,6 +1368,9 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	job_ptr->alloc_sid    = alloc_sid;
 	job_ptr->assoc_id     = assoc_id;
 	job_ptr->batch_flag   = batch_flag;
+	xfree(job_ptr->batch_host);
+	job_ptr->batch_host   = batch_host;
+	batch_host            = NULL;  /* reused, nothing left to free */
 	xfree(job_ptr->comment);
 	job_ptr->comment      = comment;
 	comment               = NULL;  /* reused, nothing left to free */
@@ -1248,6 +1448,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	job_ptr->total_nodes  = total_nodes;
 	job_ptr->cpu_cnt      = cpu_cnt;
 	job_ptr->tot_sus_time = tot_sus_time;
+	job_ptr->preempt_time = preempt_time;
 	job_ptr->user_id      = user_id;
 	job_ptr->wait_all_nodes = wait_all_nodes;
 	job_ptr->warn_signal  = warn_signal;
@@ -1257,6 +1458,13 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	job_ptr->limit_set_min_cpus  = limit_set_max_cpus;
 	job_ptr->limit_set_min_nodes = limit_set_min_nodes;
 	job_ptr->limit_set_time      = limit_set_time;
+	job_ptr->req_switch      = req_switch;
+	job_ptr->wait4switch     = wait4switch;
+	/* This needs to always to initialized to "true".  The select
+	   plugin will deal with it every time it goes through the
+	   logic if req_switch or wait4switch are set.
+	*/
+	job_ptr->best_switch     = true;
 
 	memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
 
@@ -1336,6 +1544,7 @@ unpack_error:
 	error("Incomplete job record");
 	xfree(alloc_node);
 	xfree(account);
+	xfree(batch_host);
 	xfree(comment);
 	xfree(gres);
 	xfree(resp_host);
@@ -1405,6 +1614,7 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 	packstr(detail_ptr->exc_nodes,  buffer);
 	packstr(detail_ptr->features,   buffer);
 	packstr(detail_ptr->dependency, buffer);
+	packstr(detail_ptr->orig_dependency, buffer);
 
 	packstr(detail_ptr->std_err,       buffer);
 	packstr(detail_ptr->std_in,        buffer);
@@ -1424,7 +1634,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 			     uint16_t protocol_version)
 {
 	char *req_nodes = NULL, *exc_nodes = NULL, *features = NULL;
-	char *cpu_bind, *dependency = NULL, *mem_bind;
+	char *cpu_bind, *dependency = NULL, *orig_dependency = NULL, *mem_bind;
 	char *err = NULL, *in = NULL, *out = NULL, *work_dir = NULL;
 	char *ckpt_dir = NULL, *restart_dir = NULL;
 	char **argv = (char **) NULL, **env_sup = (char **) NULL;
@@ -1441,7 +1651,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	multi_core_data_t *mc_ptr;
 
 	/* unpack the job's details from the buffer */
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		safe_unpack32(&min_cpus, buffer);
 		safe_unpack32(&max_cpus, buffer);
 		safe_unpack32(&min_nodes, buffer);
@@ -1477,6 +1687,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpackstr_xmalloc(&exc_nodes,  &name_len, buffer);
 		safe_unpackstr_xmalloc(&features,   &name_len, buffer);
 		safe_unpackstr_xmalloc(&dependency, &name_len, buffer);
+		safe_unpackstr_xmalloc(&orig_dependency, &name_len, buffer);
 
 		safe_unpackstr_xmalloc(&err, &name_len, buffer);
 		safe_unpackstr_xmalloc(&in,  &name_len, buffer);
@@ -1489,7 +1700,9 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 			goto unpack_error;
 		safe_unpackstr_array(&argv, &argc, buffer);
 		safe_unpackstr_array(&env_sup, &env_cnt, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		safe_unpack32(&min_cpus, buffer);
+		safe_unpack32(&max_cpus, buffer);
 		safe_unpack32(&min_nodes, buffer);
 		safe_unpack32(&max_nodes, buffer);
 		safe_unpack32(&num_tasks, buffer);
@@ -1523,6 +1736,54 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpackstr_xmalloc(&exc_nodes,  &name_len, buffer);
 		safe_unpackstr_xmalloc(&features,   &name_len, buffer);
 		safe_unpackstr_xmalloc(&dependency, &name_len, buffer);
+		orig_dependency = xstrdup(dependency);
+
+		safe_unpackstr_xmalloc(&err, &name_len, buffer);
+		safe_unpackstr_xmalloc(&in,  &name_len, buffer);
+		safe_unpackstr_xmalloc(&out, &name_len, buffer);
+		safe_unpackstr_xmalloc(&work_dir, &name_len, buffer);
+		safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
+		safe_unpackstr_xmalloc(&restart_dir, &name_len, buffer);
+
+		if (unpack_multi_core_data(&mc_ptr, buffer, protocol_version))
+			goto unpack_error;
+		safe_unpackstr_array(&argv, &argc, buffer);
+		safe_unpackstr_array(&env_sup, &env_cnt, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		safe_unpack32(&min_nodes, buffer);
+		safe_unpack32(&max_nodes, buffer);
+		safe_unpack32(&num_tasks, buffer);
+
+		safe_unpack16(&acctg_freq, buffer);
+		safe_unpack16(&contiguous, buffer);
+		safe_unpack16(&cpus_per_task, buffer);
+		safe_unpack16(&nice, buffer);
+		safe_unpack16(&ntasks_per_node, buffer);
+		safe_unpack16(&requeue, buffer);
+		safe_unpack16(&shared, buffer);
+		safe_unpack16(&task_dist, buffer);
+
+		safe_unpackstr_xmalloc(&cpu_bind, &name_len, buffer);
+		safe_unpack16(&cpu_bind_type, buffer);
+		safe_unpackstr_xmalloc(&mem_bind, &name_len, buffer);
+		safe_unpack16(&mem_bind_type, buffer);
+		safe_unpack16(&plane_size, buffer);
+
+		safe_unpack8(&open_mode, buffer);
+		safe_unpack8(&overcommit, buffer);
+		safe_unpack8(&prolog_running, buffer);
+
+		safe_unpack32(&pn_min_cpus, buffer);
+		safe_unpack32(&pn_min_memory, buffer);
+		safe_unpack32(&pn_min_tmp_disk, buffer);
+		safe_unpack_time(&begin_time, buffer);
+		safe_unpack_time(&submit_time, buffer);
+
+		safe_unpackstr_xmalloc(&req_nodes,  &name_len, buffer);
+		safe_unpackstr_xmalloc(&exc_nodes,  &name_len, buffer);
+		safe_unpackstr_xmalloc(&features,   &name_len, buffer);
+		safe_unpackstr_xmalloc(&dependency, &name_len, buffer);
+		orig_dependency = xstrdup(dependency);
 
 		safe_unpackstr_xmalloc(&err, &name_len, buffer);
 		safe_unpackstr_xmalloc(&in,  &name_len, buffer);
@@ -1560,6 +1821,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	xfree(job_ptr->details->argv);
 	xfree(job_ptr->details->cpu_bind);
 	xfree(job_ptr->details->dependency);
+	xfree(job_ptr->details->orig_dependency);
 	xfree(job_ptr->details->std_err);
 	for (i=0; i<job_ptr->details->env_cnt; i++)
 		xfree(job_ptr->details->env_sup[i]);
@@ -1584,6 +1846,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	job_ptr->details->cpu_bind_type = cpu_bind_type;
 	job_ptr->details->cpus_per_task = cpus_per_task;
 	job_ptr->details->dependency = dependency;
+	job_ptr->details->orig_dependency = orig_dependency;
 	job_ptr->details->env_cnt = env_cnt;
 	job_ptr->details->env_sup = env_sup;
 	job_ptr->details->std_err = err;
@@ -1626,6 +1889,7 @@ unpack_error:
 	xfree(argv);
 	xfree(cpu_bind);
 	xfree(dependency);
+	xfree(orig_dependency);
 /*	for (i=0; i<env_cnt; i++)
 	xfree(env_sup[i]);  Don't trust this on unpack error */
 	xfree(env_sup);
@@ -1792,7 +2056,8 @@ extern int kill_job_by_part_name(char *part_name)
 			} else
 				job_ptr->end_time = now;
 			if (!pending)
-				deallocate_nodes(job_ptr, false, suspended);
+				deallocate_nodes(job_ptr, false, suspended,
+						 false);
 			job_completion_logger(job_ptr, false);
 		} else if (pending) {
 			job_count++;
@@ -1810,10 +2075,235 @@ extern int kill_job_by_part_name(char *part_name)
 	list_iterator_destroy(job_iterator);
 
 	if (job_count)
-		last_job_update = time(NULL);
+		last_job_update = now;
 	return job_count;
 }
 
+/*
+ * kill_job_by_front_end_name - Given a front end node name, deallocate
+ *	resource for its jobs and kill them.
+ * IN node_name - name of a front end node
+ * RET number of jobs associated with this front end node
+ * NOTE: Patterned after kill_running_job_by_node_name()
+ */
+extern int kill_job_by_front_end_name(char *node_name)
+{
+#ifdef HAVE_FRONT_END
+	ListIterator job_iterator;
+	struct job_record  *job_ptr;
+	struct node_record *node_ptr;
+	time_t now = time(NULL);
+	int i, job_count = 0;
+
+	if (node_name == NULL)
+		fatal("kill_job_by_front_end_name: node_name is NULL");
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		bool suspended = false;
+
+		if (!IS_JOB_RUNNING(job_ptr) && !IS_JOB_SUSPENDED(job_ptr) &&
+		    !IS_JOB_COMPLETING(job_ptr))
+			continue;
+		if ((job_ptr->batch_host == NULL) ||
+		    strcmp(job_ptr->batch_host, node_name))
+			continue;	/* no match on node name */
+
+		if (IS_JOB_SUSPENDED(job_ptr)) {
+			enum job_states suspend_job_state = job_ptr->job_state;
+			/* we can't have it as suspended when we call the
+			 * accounting stuff.
+			 */
+			job_ptr->job_state = JOB_CANCELLED;
+			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
+			job_ptr->job_state = suspend_job_state;
+			suspended = true;
+		}
+		if (IS_JOB_COMPLETING(job_ptr)) {
+			job_count++;
+			while ((i = bit_ffs(job_ptr->node_bitmap_cg)) >= 0) {
+				bit_clear(job_ptr->node_bitmap_cg, i);
+				job_update_cpu_cnt(job_ptr, i);
+				if (job_ptr->node_cnt)
+					(job_ptr->node_cnt)--;
+				else {
+					error("node_cnt underflow on JobId=%u",
+				   	      job_ptr->job_id);
+				}
+				if (job_ptr->node_cnt == 0) {
+					job_ptr->job_state &= (~JOB_COMPLETING);
+					delete_step_records(job_ptr);
+					slurm_sched_schedule();
+				}
+				node_ptr = &node_record_table_ptr[i];
+				if (node_ptr->comp_job_cnt)
+					(node_ptr->comp_job_cnt)--;
+				else {
+					error("Node %s comp_job_cnt underflow, "
+					      "JobId=%u",
+					      node_ptr->name, job_ptr->job_id);
+				}
+			}
+		} else if (IS_JOB_RUNNING(job_ptr) || suspended) {
+			job_count++;
+			if (job_ptr->batch_flag && job_ptr->details &&
+				   (job_ptr->details->requeue > 0)) {
+				char requeue_msg[128];
+
+				srun_node_fail(job_ptr->job_id, node_name);
+
+				info("requeue job %u due to failure of node %s",
+				     job_ptr->job_id, node_name);
+				_set_job_prio(job_ptr);
+				snprintf(requeue_msg, sizeof(requeue_msg),
+					 "Job requeued due to failure "
+					 "of node %s",
+					 node_name);
+				slurm_sched_requeue(job_ptr, requeue_msg);
+				job_ptr->time_last_active  = now;
+				if (suspended) {
+					job_ptr->end_time =
+						job_ptr->suspend_time;
+					job_ptr->tot_sus_time +=
+						difftime(now,
+							 job_ptr->
+							 suspend_time);
+				} else
+					job_ptr->end_time = now;
+
+				/* We want this job to look like it
+				 * was terminated in the accounting logs.
+				 * Set a new submit time so the restarted
+				 * job looks like a new job. */
+				job_ptr->job_state  = JOB_NODE_FAIL;
+				build_cg_bitmap(job_ptr);
+				deallocate_nodes(job_ptr, false, suspended,
+						 false);
+				job_completion_logger(job_ptr, true);
+				job_ptr->db_index = 0;
+				job_ptr->job_state = JOB_PENDING;
+				if (job_ptr->node_cnt)
+					job_ptr->job_state |= JOB_COMPLETING;
+				job_ptr->details->submit_time = now;
+
+				/* restart from periodic checkpoint */
+				if (job_ptr->ckpt_interval &&
+				    job_ptr->ckpt_time &&
+				    job_ptr->details->ckpt_dir) {
+					xfree(job_ptr->details->restart_dir);
+					job_ptr->details->restart_dir =
+						xstrdup (job_ptr->details->
+							 ckpt_dir);
+					xstrfmtcat(job_ptr->details->
+						   restart_dir,
+						   "/%u", job_ptr->job_id);
+				}
+				job_ptr->restart_cnt++;
+				/* Since the job completion logger
+				 * removes the submit we need to add it
+				 * again. */
+				acct_policy_add_job_submit(job_ptr);
+			} else {
+				info("Killing job_id %u on failed node %s",
+				     job_ptr->job_id, node_name);
+				srun_node_fail(job_ptr->job_id, node_name);
+				job_ptr->job_state = JOB_NODE_FAIL |
+						     JOB_COMPLETING;
+				build_cg_bitmap(job_ptr);
+				job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
+				job_ptr->state_reason = FAIL_DOWN_NODE;
+				xfree(job_ptr->state_desc);
+				if (suspended) {
+					job_ptr->end_time =
+						job_ptr->suspend_time;
+					job_ptr->tot_sus_time +=
+						difftime(now,
+							 job_ptr->suspend_time);
+				} else
+					job_ptr->end_time = now;
+				deallocate_nodes(job_ptr, false, suspended,
+						 false);
+				job_completion_logger(job_ptr, false);
+			}
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
+	if (job_count)
+		last_job_update = now;
+	return job_count;
+#else
+	return 0;
+#endif
+}
+
+/*
+ * partition_in_use - determine whether a partition is in use by a RUNNING
+ *	PENDING or SUSPENDED job
+ * IN part_name - name of a partition
+ * RET true if the partition is in use, else false
+ */
+extern bool partition_in_use(char *part_name)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	struct part_record *part_ptr;
+
+	part_ptr = find_part_record (part_name);
+	if (part_ptr == NULL)	/* No such partition */
+		return false;
+
+	job_iterator = list_iterator_create(job_list);
+	if (job_iterator == NULL)
+		fatal("list_iterator_create: malloc failure");
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (job_ptr->part_ptr == part_ptr) {
+			if (!IS_JOB_FINISHED(job_ptr)) {
+				list_iterator_destroy(job_iterator);
+				return true;
+			}
+		}
+	}
+	list_iterator_destroy(job_iterator);
+	return false;
+}
+
+/*
+ * allocated_session_in_use - check if an interactive session is already running
+ * IN new_alloc - allocation (alloc_node:alloc_sid) to test for
+ * Returns true if an interactive session of the same node:sid already is in use
+ * by a RUNNING, PENDING, or SUSPENDED job. Provides its own locking.
+ */
+extern bool allocated_session_in_use(job_desc_msg_t *new_alloc)
+{
+	ListIterator job_iter;
+	struct job_record *job_ptr;
+	/* Locks: Read job */
+	slurmctld_lock_t job_read_lock = {
+		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if ((new_alloc->script != NULL) || (new_alloc->alloc_node == NULL))
+		return false;
+
+	lock_slurmctld(job_read_lock);
+	job_iter = list_iterator_create(job_list);
+	if (job_iter == NULL)
+		fatal("list_iterator_create: malloc failure");
+
+	while ((job_ptr = (struct job_record *)list_next(job_iter))) {
+		if (job_ptr->batch_flag || IS_JOB_FINISHED(job_ptr))
+			continue;
+		if (job_ptr->alloc_node &&
+		    (strcmp(job_ptr->alloc_node, new_alloc->alloc_node) == 0) &&
+		    (job_ptr->alloc_sid == new_alloc->alloc_sid))
+			break;
+	}
+	list_iterator_destroy(job_iter);
+	unlock_slurmctld(job_read_lock);
+
+	return job_ptr != NULL;
+}
+
 /*
  * kill_running_job_by_node_name - Given a node name, deallocate RUNNING
  *	or COMPLETING jobs from the node or kill them
@@ -1835,6 +2325,8 @@ extern int kill_running_job_by_node_name(char *node_name)
 	bit_position = node_ptr - node_record_table_ptr;
 
 	job_iterator = list_iterator_create(job_list);
+	if (job_iterator == NULL)
+		fatal("list_iterator_create: malloc failure");
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		bool suspended = false;
 		if ((job_ptr->node_bitmap == NULL) ||
@@ -1859,20 +2351,22 @@ extern int kill_running_job_by_node_name(char *node_name)
 			job_update_cpu_cnt(job_ptr, bit_position);
 			if (job_ptr->node_cnt)
 				(job_ptr->node_cnt)--;
-			else
+			else {
 				error("node_cnt underflow on JobId=%u",
 			   	      job_ptr->job_id);
+			}
 			if (job_ptr->node_cnt == 0) {
 				job_ptr->job_state &= (~JOB_COMPLETING);
-				delete_step_records(job_ptr, 0);
+				delete_step_records(job_ptr);
 				slurm_sched_schedule();
 			}
 			if (node_ptr->comp_job_cnt)
 				(node_ptr->comp_job_cnt)--;
-			else
+			else {
 				error("Node %s comp_job_cnt underflow, "
 				      "JobId=%u",
 				      node_ptr->name, job_ptr->job_id);
+			}
 		} else if (IS_JOB_RUNNING(job_ptr) || suspended) {
 			job_count++;
 			if ((job_ptr->details) &&
@@ -1916,7 +2410,8 @@ extern int kill_running_job_by_node_name(char *node_name)
 				 * job looks like a new job. */
 				job_ptr->job_state  = JOB_NODE_FAIL;
 				build_cg_bitmap(job_ptr);
-				deallocate_nodes(job_ptr, false, suspended);
+				deallocate_nodes(job_ptr, false, suspended,
+						 false);
 				job_completion_logger(job_ptr, true);
 				job_ptr->db_index = 0;
 				job_ptr->job_state = JOB_PENDING;
@@ -1958,8 +2453,9 @@ extern int kill_running_job_by_node_name(char *node_name)
 						difftime(now,
 							 job_ptr->suspend_time);
 				} else
-					job_ptr->end_time = time(NULL);
-				deallocate_nodes(job_ptr, false, suspended);
+					job_ptr->end_time = now;
+				deallocate_nodes(job_ptr, false, suspended,
+						 false);
 				job_completion_logger(job_ptr, false);
 			}
 		}
@@ -2141,9 +2637,9 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	debug3("   resp_host=%s alloc_resp_port=%u  other_port=%u",
 	       job_specs->resp_host,
 	       job_specs->alloc_resp_port, job_specs->other_port);
-	debug3("   dependency=%s account=%s comment=%s",
+	debug3("   dependency=%s account=%s qos=%s comment=%s",
 	       job_specs->dependency, job_specs->account,
-	       job_specs->comment);
+	       job_specs->qos, job_specs->comment);
 
 	num_tasks = (job_specs->num_tasks != (uint16_t) NO_VAL) ?
 		(long) job_specs->num_tasks : -1L;
@@ -2293,8 +2789,6 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	 */
 	if (job_ptr->priority == NO_VAL)
 		_set_job_prio(job_ptr);
-	else if (job_ptr->priority == 0)
-		job_ptr->state_reason = WAIT_HELD_USER;
 
 	if (license_job_test(job_ptr, time(NULL)) != SLURM_SUCCESS)
 		independent = false;
@@ -2361,7 +2855,12 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	no_alloc = test_only || too_fragmented ||
 		(!top_prio) || (!independent);
 
-	error_code = select_nodes(job_ptr, no_alloc, NULL);
+	if (!no_alloc && !avail_front_end()) {
+		debug("sched: schedule() returning, no front end nodes are "
+		       "available");
+		error_code = ESLURM_NODES_BUSY;
+	} else
+		error_code = select_nodes(job_ptr, no_alloc, NULL);
 
 	if (!test_only) {
 		last_job_update = now;
@@ -2466,7 +2965,7 @@ extern int job_fail(uint32_t job_id)
 		job_ptr->exit_code = 1;
 		job_ptr->state_reason = FAIL_LAUNCH;
 		xfree(job_ptr->state_desc);
-		deallocate_nodes(job_ptr, false, suspended);
+		deallocate_nodes(job_ptr, false, suspended, false);
 		job_completion_logger(job_ptr, false);
 		return SLURM_SUCCESS;
 	}
@@ -2483,15 +2982,15 @@ extern int job_fail(uint32_t job_id)
  * IN signal - signal to send, SIGKILL == cancel the job
  * IN batch_flag - signal batch shell only if set
  * IN uid - uid of requesting user
+ * IN preempt - true if job being preempted
  * RET 0 on success, otherwise ESLURM error code
- * global: job_list - pointer global job list
- *	last_job_update - time of last job table update
  */
 extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
-		      uid_t uid)
+		      uid_t uid, bool preempt)
 {
 	struct job_record *job_ptr;
 	time_t now = time(NULL);
+	uint16_t job_term_state;
 
 	/* Jobs submitted using Moab command should be cancelled using
 	 * Moab command for accurate job records */
@@ -2531,8 +3030,11 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 	if (IS_JOB_FINISHED(job_ptr))
 		return ESLURM_ALREADY_DONE;
 
+	/* let node select plugin do any state-dependent signalling actions */
+	select_g_job_signal(job_ptr, signal);
+
 	/* save user ID of the one who requested the job be cancelled */
-	if(signal == SIGKILL)
+	if (signal == SIGKILL)
 		job_ptr->requid = uid;
 	if (IS_JOB_PENDING(job_ptr) && IS_JOB_COMPLETING(job_ptr) &&
 	    (signal == SIGKILL)) {
@@ -2556,14 +3058,18 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 		return SLURM_SUCCESS;
 	}
 
+	if (preempt)
+		job_term_state = JOB_PREEMPTED;
+	else
+		job_term_state = JOB_CANCELLED;
 	if (IS_JOB_SUSPENDED(job_ptr) &&  (signal == SIGKILL)) {
 		last_job_update         = now;
 		job_ptr->end_time       = job_ptr->suspend_time;
 		job_ptr->tot_sus_time  += difftime(now, job_ptr->suspend_time);
-		job_ptr->job_state      = JOB_CANCELLED | JOB_COMPLETING;
+		job_ptr->job_state      = job_term_state | JOB_COMPLETING;
 		build_cg_bitmap(job_ptr);
 		jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
-		deallocate_nodes(job_ptr, false, true);
+		deallocate_nodes(job_ptr, false, true, preempt);
 		job_completion_logger(job_ptr, false);
 		verbose("job_signal %u of suspended job %u successful",
 			signal, job_id);
@@ -2576,9 +3082,9 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 			job_ptr->time_last_active	= now;
 			job_ptr->end_time		= now;
 			last_job_update			= now;
-			job_ptr->job_state = JOB_CANCELLED | JOB_COMPLETING;
+			job_ptr->job_state = job_term_state | JOB_COMPLETING;
 			build_cg_bitmap(job_ptr);
-			deallocate_nodes(job_ptr, false, false);
+			deallocate_nodes(job_ptr, false, false, preempt);
 			job_completion_logger(job_ptr, false);
 		} else if (batch_flag) {
 			if (job_ptr->batch_flag)
@@ -2606,10 +3112,11 @@ _signal_batch_job(struct job_record *job_ptr, uint16_t signal)
 	agent_arg_t *agent_args = NULL;
 
 	xassert(job_ptr);
+	xassert(job_ptr->batch_host);
 	i = bit_ffs(job_ptr->node_bitmap);
 	if (i < 0) {
 		error("_signal_batch_job JobId=%u lacks assigned nodes",
-		  job_ptr->job_id);
+		      job_ptr->job_id);
 		return;
 	}
 
@@ -2617,8 +3124,7 @@ _signal_batch_job(struct job_record *job_ptr, uint16_t signal)
 	agent_args->msg_type	= REQUEST_SIGNAL_TASKS;
 	agent_args->retry	= 1;
 	agent_args->node_count  = 1;
-	agent_args->hostlist	=
-		hostlist_create(node_record_table_ptr[i].name);
+	agent_args->hostlist	= hostlist_create(job_ptr->batch_host);
 	kill_tasks_msg = xmalloc(sizeof(kill_tasks_msg_t));
 	kill_tasks_msg->job_id      = job_ptr->job_id;
 	kill_tasks_msg->job_step_id = NO_VAL;
@@ -2671,6 +3177,8 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 
 	if (IS_JOB_RUNNING(job_ptr))
 		job_comp_flag = JOB_COMPLETING;
+	else if (IS_JOB_PENDING(job_ptr))
+		job_ptr->start_time = now;
 
 	if ((job_return_code == NO_VAL) &&
 	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_PENDING(job_ptr))) {
@@ -2715,13 +3223,11 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 		job_ptr->batch_flag++;	/* only one retry */
 		job_ptr->restart_cnt++;
 		job_ptr->job_state = JOB_PENDING | job_comp_flag;
-		/* Since the job completion logger
-		   removes the submit we need to add it
-		   again.
-		*/
+		/* Since the job completion logger removes the job submit
+		 * information, we need to add it again. */
 		acct_policy_add_job_submit(job_ptr);
 
-		info("Non-responding node, requeue JobId=%u", job_ptr->job_id);
+		info("Requeue JobId=%u due to node failure", job_ptr->job_id);
 	} else if (IS_JOB_PENDING(job_ptr) && job_ptr->details &&
 		   job_ptr->batch_flag) {
 		/* Possible failure mode with DOWN node and job requeue.
@@ -2766,7 +3272,7 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	last_job_update = now;
 	if (job_comp_flag) {	/* job was running */
 		build_cg_bitmap(job_ptr);
-		deallocate_nodes(job_ptr, false, suspended);
+		deallocate_nodes(job_ptr, false, suspended, false);
 	}
 	info("sched: job_complete for JobId=%u successful", job_id);
 	return SLURM_SUCCESS;
@@ -3051,6 +3557,84 @@ fini:	FREE_NULL_LIST(part_ptr_list);
 	return rc;
 }
 
+/*
+ * job_limits_check - check the limits specified for the job.
+ * IN job_ptr - pointer to job table entry.
+ * RET WAIT_NO_REASON on success, fail status otherwise.
+ */
+extern int job_limits_check(struct job_record **job_pptr)
+{
+	struct job_details *detail_ptr;
+	enum job_state_reason fail_reason;
+	struct part_record *part_ptr = NULL;
+	struct job_record *job_ptr = NULL;
+	slurmdb_qos_rec_t  *qos_ptr;
+	slurmdb_association_rec_t *assoc_ptr;
+
+
+	job_ptr = *job_pptr;
+	detail_ptr = job_ptr->details;
+	part_ptr = job_ptr->part_ptr;
+	qos_ptr = job_ptr->qos_ptr;
+	assoc_ptr = job_ptr->assoc_ptr;
+
+	fail_reason = WAIT_NO_REASON;
+	if ((detail_ptr->min_nodes > part_ptr->max_nodes) &&
+	    (!qos_ptr || (qos_ptr && !(qos_ptr->flags
+				       & QOS_FLAG_PART_MAX_NODE)))) {
+		info("Job %u requested too many nodes (%u) of "
+		     "partition %s(MaxNodes %u)",
+		     job_ptr->job_id, detail_ptr->min_nodes,
+		     part_ptr->name, part_ptr->max_nodes);
+		fail_reason = WAIT_PART_NODE_LIMIT;
+	} else if ((detail_ptr->max_nodes != 0) &&  /* no max_nodes for job */
+		   ((detail_ptr->max_nodes < part_ptr->min_nodes) &&
+		    (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
+					       QOS_FLAG_PART_MIN_NODE))))) {
+		info("Job %u requested too few nodes (%u) of "
+		     "partition %s(MinNodes %u)",
+		     job_ptr->job_id, detail_ptr->max_nodes,
+		     part_ptr->name, part_ptr->min_nodes);
+		fail_reason = WAIT_PART_NODE_LIMIT;
+	} else if (part_ptr->state_up == PARTITION_DOWN) {
+		info("Job %u requested down partition %s",
+		     job_ptr->job_id, part_ptr->name);
+		fail_reason = WAIT_PART_DOWN;
+	} else if (part_ptr->state_up == PARTITION_INACTIVE) {
+		info("Job %u requested inactive partition %s",
+		     job_ptr->job_id, part_ptr->name);
+		fail_reason = WAIT_PART_INACTIVE;
+	} else if ((job_ptr->time_limit != NO_VAL) &&
+		   ((job_ptr->time_limit > part_ptr->max_time) &&
+		    (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
+					       QOS_FLAG_PART_TIME_LIMIT))))) {
+		info("Job %u exceeds partition time limit", job_ptr->job_id);
+		fail_reason = WAIT_PART_TIME_LIMIT;
+	} else if (qos_ptr && assoc_ptr &&
+		   (qos_ptr->flags & QOS_FLAG_ENFORCE_USAGE_THRES) &&
+		   (!fuzzy_equal(qos_ptr->usage_thres, NO_VAL))) {
+		if (!job_ptr->prio_factors)
+			job_ptr->prio_factors =
+				xmalloc(sizeof(priority_factors_object_t));
+
+		if (!job_ptr->prio_factors->priority_fs) {
+			if (fuzzy_equal(assoc_ptr->usage->usage_efctv, NO_VAL))
+				priority_g_set_assoc_usage(assoc_ptr);
+			job_ptr->prio_factors->priority_fs =
+				priority_g_calc_fs_factor(
+					assoc_ptr->usage->usage_efctv,
+					(long double)assoc_ptr->usage->
+					shares_norm);
+		}
+		if (job_ptr->prio_factors->priority_fs < qos_ptr->usage_thres) {
+			info("Job %u exceeds usage threahold", job_ptr->job_id);
+			fail_reason = WAIT_QOS_THRES;
+		}
+	}
+
+	return (fail_reason);
+}
+
 /*
  * _job_create - create a job table record for the supplied specifications.
  *	This performs only basic tests for request validity (access to
@@ -3069,7 +3653,6 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		       struct job_record **job_pptr, uid_t submit_uid)
 {
 	int error_code = SLURM_SUCCESS, i, qos_error;
-	struct job_details *detail_ptr;
 	enum job_state_reason fail_reason;
 	struct part_record *part_ptr = NULL;
 	List part_ptr_list = NULL;
@@ -3079,24 +3662,44 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	List license_list = NULL;
 	bool valid;
 	slurmdb_qos_rec_t qos_rec, *qos_ptr;
+	uint32_t user_submit_priority;
 	uint16_t limit_set_max_cpus = 0;
 	uint16_t limit_set_max_nodes = 0;
 	uint16_t limit_set_min_cpus = 0;
 	uint16_t limit_set_min_nodes = 0;
 	uint16_t limit_set_time = 0;
+	static uint32_t node_scaling = 1;
+	static uint32_t cpus_per_mp = 1;
 
 #ifdef HAVE_BG
 	uint16_t geo[SYSTEM_DIMENSIONS];
 	uint16_t reboot;
 	uint16_t rotate;
-	uint16_t conn_type;
-	static uint32_t cpus_per_bp = 0;
+	uint16_t conn_type[SYSTEM_DIMENSIONS];
+	static bool sub_mp_system = 0;
 
-	if (!cpus_per_bp)
-		select_g_alter_node_cnt(SELECT_GET_BP_CPU_CNT, &cpus_per_bp);
+	if (node_scaling == 1) {
+		select_g_alter_node_cnt(SELECT_GET_NODE_SCALING,
+					&node_scaling);
+		select_g_alter_node_cnt(SELECT_GET_MP_CPU_CNT,
+					&cpus_per_mp);
+		if (node_scaling < 512)
+			sub_mp_system = 1;
+	}
 #endif
 
 	*job_pptr = (struct job_record *) NULL;
+	/*
+	 * Check user permission for negative 'nice' and non-0 priority values
+	 * (both restricted to SlurmUser) before running the job_submit plugin.
+	 */
+	if ((submit_uid != 0) && (submit_uid != slurmctld_conf.slurm_user_id)) {
+		if (job_desc->priority != 0)
+			job_desc->priority = NO_VAL;
+		if (job_desc->nice < NICE_OFFSET)
+			job_desc->nice = NICE_OFFSET;
+	}
+	user_submit_priority = job_desc->priority;
 
 	error_code = job_submit_plugin_submit(job_desc, (uint32_t) submit_uid);
 	if (error_code != SLURM_SUCCESS)
@@ -3112,15 +3715,22 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		}
 		if (job_desc->contiguous)
 			bit_fill_gaps(req_bitmap);
-
 		i = bit_set_count(req_bitmap);
 		if (i > job_desc->min_nodes)
-			job_desc->min_nodes = i;
+			job_desc->min_nodes = i * node_scaling;
 		if (i > job_desc->min_cpus)
-			job_desc->min_cpus = i;
+			job_desc->min_cpus = i * cpus_per_mp;
 		if (job_desc->max_nodes &&
-		    (job_desc->min_nodes > job_desc->max_nodes))
+		    (job_desc->min_nodes > job_desc->max_nodes)) {
+#if 0
+			info("_job_create: max node count less than required "
+			     "hostlist size for user %u", job_desc->user_id);
 			job_desc->max_nodes = job_desc->min_nodes;
+#else
+			error_code = ESLURM_INVALID_NODE_COUNT;
+			goto cleanup_fail;
+#endif
+		}
 	}
 
 	error_code = _valid_job_part(job_desc, submit_uid, req_bitmap,
@@ -3133,7 +3743,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	xstrtolower(job_desc->account);
 	xstrtolower(job_desc->wckey);
 
-	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid))) {
+	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid,
+					     part_ptr))) {
 		error_code = error_code;
 		goto cleanup_fail;
 	}
@@ -3275,22 +3886,32 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	}
 	select_g_select_jobinfo_get(job_desc->select_jobinfo,
 				    SELECT_JOBDATA_CONN_TYPE, &conn_type);
-	if (conn_type == (uint16_t) NO_VAL) {
-		conn_type = (uint16_t) SELECT_NAV;
+	if (conn_type[0] == (uint16_t) NO_VAL) {
+		conn_type[0] = (uint16_t) SELECT_NAV;
 		select_g_select_jobinfo_set(job_desc->select_jobinfo,
 					    SELECT_JOBDATA_CONN_TYPE,
 					    &conn_type);
-	} else if(((conn_type >= SELECT_SMALL)
-		   && (job_desc->min_cpus >= cpus_per_bp))
-		  || (((conn_type == SELECT_TORUS)|| (conn_type == SELECT_MESH))
-		      && (job_desc->min_cpus < cpus_per_bp))) {
+	} else if(((conn_type[0] >= SELECT_SMALL)
+		   && ((job_desc->min_cpus >= cpus_per_mp) && !sub_mp_system))
+		  || (!sub_mp_system
+		      && ((conn_type[0] == SELECT_TORUS)
+			  || (conn_type[0] == SELECT_MESH))
+		      && (job_desc->min_cpus < cpus_per_mp))) {
 		/* check to make sure we have a valid conn_type with
 		 * the cpu count */
 		info("Job's cpu count at %u makes our conn_type "
 		     "of '%s' invalid.",
-		     job_desc->min_cpus, conn_type_string(conn_type));
+		     job_desc->min_cpus, conn_type_string(conn_type[0]));
 		error_code = ESLURM_INVALID_NODE_COUNT;
 		goto cleanup_fail;
+	} else if ((conn_type[0] == SELECT_TORUS)
+		   || (conn_type[0] == SELECT_MESH)) {
+		int dim;
+		for (dim=1; dim<SYSTEM_DIMENSIONS; dim++)
+			conn_type[dim] = conn_type[0];
+		select_g_select_jobinfo_set(job_desc->select_jobinfo,
+					    SELECT_JOBDATA_CONN_TYPE,
+					    &conn_type);
 	}
 #endif
 
@@ -3344,21 +3965,37 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	job_ptr->qos_ptr = (void *) qos_ptr;
 	job_ptr->qos_id = qos_rec.id;
 
-	/* already confirmed submit_uid==0 */
-	/* If the priority isn't given we will figure it out later
-	 * after we see if the job is eligible or not. So we want
-	 * NO_VAL if not set. */
+	/*
+	 * Permission for altering priority was confirmed above. The job_submit
+	 * plugin may have set the priority directly or put the job on hold. If
+	 * the priority is not given, we will figure it out later after we see
+	 * if the job is eligible or not. So we want NO_VAL if not set.
+	 */
 	job_ptr->priority = job_desc->priority;
+	if (job_ptr->priority == 0) {
+		if (user_submit_priority == 0)
+			job_ptr->state_reason = WAIT_HELD_USER;
+		else
+			job_ptr->state_reason = WAIT_HELD;
+	} else if (job_ptr->priority != NO_VAL) {
+		job_ptr->direct_set_prio = true;
+	}
 
 	error_code = update_job_dependency(job_ptr, job_desc->dependency);
 	if (error_code != SLURM_SUCCESS)
 		goto cleanup_fail;
+	job_ptr->details->orig_dependency = xstrdup(job_ptr->details->
+						    dependency);
 
 	if (build_feature_list(job_ptr)) {
 		error_code = ESLURM_INVALID_FEATURE;
 		goto cleanup_fail;
 	}
-	if (gres_plugin_job_state_validate(job_ptr->gres, &job_ptr->gres_list)){
+	/* NOTE: If this job is being used to expand another job, this job's
+	 * gres_list has already been filled in with a copy of gres_list job
+	 * to be expanded by update_job_dependency() */
+	if ((job_ptr->details->expanding_jobid == 0) &&
+	    gres_plugin_job_state_validate(job_ptr->gres, &job_ptr->gres_list)){
 		error_code = ESLURM_INVALID_GRES;
 		goto cleanup_fail;
 	}
@@ -3381,62 +4018,15 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	job_ptr->license_list = license_list;
 	license_list = NULL;
 
-	/* Insure that requested partition is valid right now,
-	 * otherwise leave job queued and provide warning code */
-	detail_ptr = job_ptr->details;
-	fail_reason= WAIT_NO_REASON;
-	if ((job_desc->min_nodes > part_ptr->max_nodes) &&
-	    (qos_ptr && !(qos_ptr->flags & QOS_FLAG_PART_MAX_NODE))) {
-		info("Job %u requested too many nodes (%u) of "
-		     "partition %s(MaxNodes %u)",
-		     job_ptr->job_id, job_desc->min_nodes,
-		     part_ptr->name, part_ptr->max_nodes);
-		fail_reason = WAIT_PART_NODE_LIMIT;
-	} else if ((job_desc->max_nodes != 0) &&    /* no max_nodes for job */
-		   ((job_desc->max_nodes < part_ptr->min_nodes) &&
-		    (qos_ptr && !(qos_ptr->flags & QOS_FLAG_PART_MIN_NODE)))) {
-		info("Job %u requested too few nodes (%u) of "
-		     "partition %s(MinNodes %u)",
-		     job_ptr->job_id, job_desc->max_nodes,
-		     part_ptr->name, part_ptr->min_nodes);
-		fail_reason = WAIT_PART_NODE_LIMIT;
-	} else if (part_ptr->state_up == PARTITION_DOWN) {
-		info("Job %u requested down partition %s",
-		     job_ptr->job_id, part_ptr->name);
-		fail_reason = WAIT_PART_DOWN;
-	} else if (part_ptr->state_up == PARTITION_INACTIVE) {
-		info("Job %u requested inactive partition %s",
-		     job_ptr->job_id, part_ptr->name);
-		fail_reason = WAIT_PART_INACTIVE;
-	} else if ((job_ptr->time_limit != NO_VAL) &&
-		   ((job_ptr->time_limit > part_ptr->max_time) &&
-		    (qos_ptr &&
-		     !(qos_ptr->flags & QOS_FLAG_PART_TIME_LIMIT)))) {
-		info("Job %u exceeds partition time limit", job_ptr->job_id);
-		fail_reason = WAIT_PART_TIME_LIMIT;
-	} else if (qos_ptr && assoc_ptr &&
-		   (qos_ptr->flags & QOS_FLAG_ENFORCE_USAGE_THRES) &&
-		   (qos_ptr->usage_thres != (double)NO_VAL)) {
-		if (!job_ptr->prio_factors)
-			job_ptr->prio_factors =
-				xmalloc(sizeof(priority_factors_object_t));
-
-		if (!job_ptr->prio_factors->priority_fs) {
-			if (assoc_ptr->usage->usage_efctv
-			    == (long double)NO_VAL)
-				priority_g_set_assoc_usage(assoc_ptr);
-			job_ptr->prio_factors->priority_fs =
-				priority_g_calc_fs_factor(
-					assoc_ptr->usage->usage_efctv,
-					(long double)assoc_ptr->usage->
-					shares_norm);
-		}
-		if (job_ptr->prio_factors->priority_fs < qos_ptr->usage_thres) {
-			info("Job %u exceeds usage threahold", job_ptr->job_id);
-			fail_reason = WAIT_QOS_THRES;
-		}
-	}
-
+	if (job_desc->req_switch != NO_VAL) /* Max # of switches */
+		job_ptr->req_switch = job_desc->req_switch;
+	if (job_desc->wait4switch != NO_VAL)
+		job_ptr->wait4switch = _max_switch_wait(job_desc->wait4switch);
+	job_ptr->best_switch = true;
+
+	/* Insure that requested partition is valid right now,
+	 * otherwise leave job queued and provide warning code */
+	fail_reason = job_limits_check(&job_ptr);
 	if (fail_reason != WAIT_NO_REASON) {
 		if (fail_reason == WAIT_QOS_THRES)
 			error_code = ESLURM_QOS_THRES;
@@ -3675,15 +4265,19 @@ char **get_job_env(struct job_record *job_ptr, uint32_t * env_size)
  */
 char *get_job_script(struct job_record *job_ptr)
 {
-	char job_dir[30], *file_name, *script = NULL;
+	char *script = NULL;
 
-	file_name = slurm_get_state_save_location();
-	sprintf(job_dir, "/job.%d/script", job_ptr->job_id);
-	xstrcat(file_name, job_dir);
+	if (job_ptr->batch_flag) {
+		char *file_name = slurm_get_state_save_location();
+		char job_dir[30];
 
-	_read_data_from_file(file_name, &script);
+		sprintf(job_dir, "/job.%d/script", job_ptr->job_id);
+		xstrcat(file_name, job_dir);
 
-	xfree(file_name);
+		_read_data_from_file(file_name, &script);
+
+		xfree(file_name);
+	}
 	return script;
 }
 
@@ -3939,8 +4533,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 						    &wckey_ptr)) {
 				if (accounting_enforce &
 				    ACCOUNTING_ENFORCE_WCKEYS) {
-					error("_job_create: invalid wckey '%s' "
-					      "for user %u.",
+					error("_copy_job_desc_to_job_record: "
+					      "invalid wckey '%s' for user %u.",
 					      wckey_rec.name,
 					      job_desc->user_id);
 					return ESLURM_INVALID_WCKEY;
@@ -3948,8 +4542,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 			}
 		} else if (accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS) {
 			/* This should never happen */
-			info("_job_create: no wckey was given for "
-			     "job submit.");
+			info("_copy_job_desc_to_job_record: no wckey was given "
+			     "for job submit.");
 			return ESLURM_INVALID_WCKEY;
 		}
 	}
@@ -4135,13 +4729,19 @@ static char *_copy_nodelist_no_dup(char *node_list)
 	return buf;
 }
 
-static bool _valid_pn_min_mem(job_desc_msg_t * job_desc_msg)
+static bool _valid_pn_min_mem(job_desc_msg_t * job_desc_msg,
+			      struct part_record *part_ptr)
 {
 	uint32_t job_mem_limit = job_desc_msg->pn_min_memory;
-	uint32_t sys_mem_limit = slurmctld_conf.max_mem_per_cpu;
+	uint32_t sys_mem_limit;
 	uint16_t cpus_per_node, ratio;
 
-	if (sys_mem_limit == 0)
+	if (part_ptr && part_ptr->max_mem_per_cpu)
+		sys_mem_limit = part_ptr->max_mem_per_cpu;
+	else
+		sys_mem_limit = slurmctld_conf.max_mem_per_cpu;
+
+	if ((sys_mem_limit == 0) || (sys_mem_limit == MEM_PER_CPU))
 		return true;
 
 	if ((job_mem_limit & MEM_PER_CPU) && (sys_mem_limit & MEM_PER_CPU)) {
@@ -4167,7 +4767,7 @@ static bool _valid_pn_min_mem(job_desc_msg_t * job_desc_msg)
 		return false;
 	}
 
-	/* Our size is per CPU and limit per node or vise-versa.
+	/* Our size is per CPU and limit per node or vice-versa.
 	 * CPU count my vary by node, but we don't have a good
 	 * way to identify specific nodes for the job at this
 	 * point, so just pick the first node as a basis for enforcing
@@ -4296,7 +4896,8 @@ void job_time_limit(void)
 				debug("Warning signal %u to job %u ",
 				      job_ptr->warn_signal, job_ptr->job_id);
 				(void) job_signal(job_ptr->job_id,
-						  job_ptr->warn_signal, 0, 0);
+						  job_ptr->warn_signal, 0, 0,
+						  false);
 				job_ptr->warn_signal = 0;
 				job_ptr->warn_time = 0;
 			}
@@ -4502,10 +5103,10 @@ static void _job_timed_out(struct job_record *job_ptr)
 		job_ptr->job_state          = JOB_TIMEOUT | JOB_COMPLETING;
 		build_cg_bitmap(job_ptr);
 		job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
-		deallocate_nodes(job_ptr, true, false);
+		deallocate_nodes(job_ptr, true, false, false);
 		job_completion_logger(job_ptr, false);
 	} else
-		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+		job_signal(job_ptr->job_id, SIGKILL, 0, 0, false);
 	return;
 }
 
@@ -4516,7 +5117,7 @@ static void _job_timed_out(struct job_record *job_ptr)
  * IN submit_uid - who request originated
  */
 static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
-			      uid_t submit_uid)
+			      uid_t submit_uid, struct part_record *part_ptr)
 {
 	if ((job_desc_msg->min_cpus  == NO_VAL) &&
 	    (job_desc_msg->min_nodes == NO_VAL) &&
@@ -4576,18 +5177,17 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 
 	if (job_desc_msg->nice == (uint16_t) NO_VAL)
 		job_desc_msg->nice = NICE_OFFSET;
-	if ((submit_uid != 0) &&  /* only root or SlurmUser can set job prio */
-	    (submit_uid != slurmctld_conf.slurm_user_id)) {
-		if (job_desc_msg->priority != 0)
-			job_desc_msg->priority = NO_VAL;
-		if (job_desc_msg->nice < NICE_OFFSET)
-			job_desc_msg->nice = NICE_OFFSET;
-	}
 
 	if (job_desc_msg->pn_min_memory == NO_VAL) {
 		/* Default memory limit is DefMemPerCPU (if set) or no limit */
-		job_desc_msg->pn_min_memory = slurmctld_conf.def_mem_per_cpu;
-	} else if (!_valid_pn_min_mem(job_desc_msg))
+		if (part_ptr && part_ptr->def_mem_per_cpu) {
+			job_desc_msg->pn_min_memory =
+					part_ptr->def_mem_per_cpu;
+		} else {
+			job_desc_msg->pn_min_memory =
+					slurmctld_conf.def_mem_per_cpu;
+		}
+	} else if (!_valid_pn_min_mem(job_desc_msg, part_ptr))
 		return ESLURM_INVALID_TASK_MEMORY;
 
 	if (job_desc_msg->min_nodes == NO_VAL)
@@ -4635,6 +5235,7 @@ static void _list_delete_job(void *job_entry)
 	delete_job_details(job_ptr);
 	xfree(job_ptr->account);
 	xfree(job_ptr->alloc_node);
+	xfree(job_ptr->batch_host);
 	xfree(job_ptr->comment);
 	xfree(job_ptr->gres);
 	FREE_NULL_LIST(job_ptr->gres_list);
@@ -4662,7 +5263,7 @@ static void _list_delete_job(void *job_entry)
 	xfree(job_ptr->spank_job_env);
 	xfree(job_ptr->state_desc);
 	if (job_ptr->step_list) {
-		delete_step_records(job_ptr, 0);
+		delete_step_records(job_ptr);
 		list_destroy(job_ptr->step_list);
 	}
 	xfree(job_ptr->wckey);
@@ -4783,7 +5384,7 @@ extern void pack_all_jobs(char **buffer_ptr, int *buffer_size,
 		    (! IS_JOB_COMPLETING(job_ptr)) && IS_JOB_FINISHED(job_ptr))
 			continue;	/* job ready for purging, don't dump */
 
-		pack_job(job_ptr, show_flags, buffer, protocol_version);
+		pack_job(job_ptr, show_flags, buffer, protocol_version, uid);
 		jobs_packed++;
 	}
 	part_filter_clear();
@@ -4844,7 +5445,7 @@ extern int pack_one_job(char **buffer_ptr, int *buffer_size,
 	buffer = init_buf(BUF_SIZE);
 	pack32(jobs_packed, buffer);
 	pack_time(time(NULL), buffer);
-	pack_job(job_ptr, show_flags, buffer, protocol_version);
+	pack_job(job_ptr, show_flags, buffer, protocol_version, uid);
 
 	*buffer_size = get_buf_offset(buffer);
 	buffer_ptr[0] = xfer_buf_data(buffer);
@@ -4858,11 +5459,12 @@ extern int pack_one_job(char **buffer_ptr, int *buffer_size,
  * IN show_flags - job filtering options
  * IN/OUT buffer - buffer in which data is placed, pointers automatically
  *	updated
+ * IN uid - user requesting the data
  * NOTE: change _unpack_job_info_members() in common/slurm_protocol_pack.c
  *	  whenever the data format changes
  */
 void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
-	      uint16_t protocol_version)
+	      uint16_t protocol_version, uid_t uid)
 {
 	struct job_details *detail_ptr;
 	time_t begin_time = 0;
@@ -4870,7 +5472,7 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
 				   READ_LOCK, NO_LOCK, NO_LOCK };
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		pack32(dump_job_ptr->assoc_id, buffer);
 		pack32(dump_job_ptr->job_id, buffer);
 		pack32(dump_job_ptr->user_id, buffer);
@@ -4911,6 +5513,7 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		pack_time(dump_job_ptr->suspend_time, buffer);
 		pack_time(dump_job_ptr->pre_sus_time, buffer);
 		pack_time(dump_job_ptr->resize_time, buffer);
+		pack_time(dump_job_ptr->preempt_time, buffer);
 		pack32(dump_job_ptr->priority, buffer);
 
 		/* Only send the allocated nodelist since we are only sending
@@ -4932,11 +5535,22 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		packstr(dump_job_ptr->network, buffer);
 		packstr(dump_job_ptr->comment, buffer);
 		packstr(dump_job_ptr->gres, buffer);
+		packstr(dump_job_ptr->batch_host, buffer);
+		if (!IS_JOB_COMPLETED(dump_job_ptr) &&
+		    (show_flags & SHOW_DETAIL) &&
+		    ((dump_job_ptr->user_id == (uint32_t) uid) ||
+		     validate_slurm_user(uid))) {
+			char *batch_script = get_job_script(dump_job_ptr);
+			packstr(batch_script, buffer);
+			xfree(batch_script);
+		} else {
+			packnull(buffer);
+		}
 
 		assoc_mgr_lock(&locks);
 		if (assoc_mgr_qos_list) {
 			packstr(slurmdb_qos_str(assoc_mgr_qos_list,
-					        dump_job_ptr->qos_id), buffer);
+						dump_job_ptr->qos_id), buffer);
 		} else
 			packnull(buffer);
 		assoc_mgr_unlock(&locks);
@@ -4958,6 +5572,9 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 
 		packstr(dump_job_ptr->name, buffer);
 		packstr(dump_job_ptr->wckey, buffer);
+		pack32(dump_job_ptr->req_switch, buffer);
+		pack32(dump_job_ptr->wait4switch, buffer);
+		
 		packstr(dump_job_ptr->alloc_node, buffer);
 		if (!IS_JOB_COMPLETING(dump_job_ptr))
 			pack_bit_fmt(dump_job_ptr->node_bitmap, buffer);
@@ -4980,7 +5597,117 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		else
 			_pack_pending_job_details(NULL, buffer,
 						  protocol_version);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		pack32(dump_job_ptr->assoc_id, buffer);
+		pack32(dump_job_ptr->job_id, buffer);
+		pack32(dump_job_ptr->user_id, buffer);
+		pack32(dump_job_ptr->group_id, buffer);
+
+		pack16(dump_job_ptr->job_state,    buffer);
+		pack16(dump_job_ptr->batch_flag,   buffer);
+		pack16(dump_job_ptr->state_reason, buffer);
+		pack16(dump_job_ptr->restart_cnt,  buffer);
+		pack16(show_flags,  buffer);
+
+		pack32(dump_job_ptr->alloc_sid, buffer);
+		if ((dump_job_ptr->time_limit == NO_VAL)
+		    && dump_job_ptr->part_ptr)
+			pack32(dump_job_ptr->part_ptr->max_time, buffer);
+		else
+			pack32(dump_job_ptr->time_limit, buffer);
+		pack32(dump_job_ptr->time_min, buffer);
+
+		if (dump_job_ptr->details) {
+			pack16(dump_job_ptr->details->nice,  buffer);
+			pack_time(dump_job_ptr->details->submit_time, buffer);
+			/* Earliest possible begin time */
+			begin_time = dump_job_ptr->details->begin_time;
+		} else {
+			pack16(0, buffer);
+			pack_time((time_t) 0, buffer);
+		}
+
+		pack_time(begin_time, buffer);
+		/* Actual or expected start time */
+		if((dump_job_ptr->start_time) || (begin_time <= time(NULL)))
+			pack_time(dump_job_ptr->start_time, buffer);
+		else	/* earliest start time in the future */
+			pack_time(begin_time, buffer);
+
+		pack_time(dump_job_ptr->end_time, buffer);
+		pack_time(dump_job_ptr->suspend_time, buffer);
+		pack_time(dump_job_ptr->pre_sus_time, buffer);
+		pack_time(dump_job_ptr->resize_time, buffer);
+		pack32(dump_job_ptr->priority, buffer);
+
+		/* Only send the allocated nodelist since we are only sending
+		 * the number of cpus and nodes that are currently allocated. */
+		if (!IS_JOB_COMPLETING(dump_job_ptr))
+			packstr(dump_job_ptr->nodes, buffer);
+		else {
+			nodelist =
+				bitmap2node_name(dump_job_ptr->node_bitmap_cg);
+			packstr(nodelist, buffer);
+			xfree(nodelist);
+		}
+
+		if (!IS_JOB_PENDING(dump_job_ptr) && dump_job_ptr->part_ptr)
+			packstr(dump_job_ptr->part_ptr->name, buffer);
+		else
+			packstr(dump_job_ptr->partition, buffer);
+		packstr(dump_job_ptr->account, buffer);
+		packstr(dump_job_ptr->network, buffer);
+		packstr(dump_job_ptr->comment, buffer);
+		packstr(dump_job_ptr->gres, buffer);
+
+		assoc_mgr_lock(&locks);
+		if (assoc_mgr_qos_list) {
+			packstr(slurmdb_qos_str(assoc_mgr_qos_list,
+						dump_job_ptr->qos_id), buffer);
+		} else
+			packnull(buffer);
+		assoc_mgr_unlock(&locks);
+
+		packstr(dump_job_ptr->licenses, buffer);
+		packstr(dump_job_ptr->state_desc, buffer);
+		packstr(dump_job_ptr->resv_name, buffer);
+
+		pack32(dump_job_ptr->exit_code, buffer);
+		pack32(dump_job_ptr->derived_ec, buffer);
+
+		if (show_flags & SHOW_DETAIL) {
+			pack_job_resources(dump_job_ptr->job_resrcs, buffer,
+					   protocol_version);
+		} else {
+			uint32_t empty = NO_VAL;
+			pack32(empty, buffer);
+		}
+
+		packstr(dump_job_ptr->name, buffer);
+		packstr(dump_job_ptr->wckey, buffer);
+		packstr(dump_job_ptr->alloc_node, buffer);
+		if (!IS_JOB_COMPLETING(dump_job_ptr))
+			pack_bit_fmt(dump_job_ptr->node_bitmap, buffer);
+		else
+			pack_bit_fmt(dump_job_ptr->node_bitmap_cg, buffer);
+
+		select_g_select_jobinfo_pack(dump_job_ptr->select_jobinfo,
+					     buffer, protocol_version);
+
+		detail_ptr = dump_job_ptr->details;
+		/* A few details are always dumped here */
+		_pack_default_job_details(dump_job_ptr, buffer,
+					  protocol_version);
+
+		/* other job details are only dumped until the job starts
+		 * running (at which time they become meaningless) */
+		if (detail_ptr)
+			_pack_pending_job_details(detail_ptr, buffer,
+						  protocol_version);
+		else
+			_pack_pending_job_details(NULL, buffer,
+						  protocol_version);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		pack32(dump_job_ptr->assoc_id, buffer);
 		pack32(dump_job_ptr->job_id, buffer);
 		pack32(dump_job_ptr->user_id, buffer);
@@ -5268,6 +5995,7 @@ void purge_old_job(void)
 			job_ptr->end_time	= now;
 			job_completion_logger(job_ptr, false);
 			last_job_update		= now;
+			srun_allocate_abort(job_ptr);
 		}
 	}
 	list_iterator_destroy(job_iterator);
@@ -5505,7 +6233,7 @@ extern uint32_t get_next_job_id(void)
 
 	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
 	next_id = job_id_sequence + 1;
-	if (next_id >= MIN_NOALLOC_JOBID)
+	if (next_id >= slurmctld_conf.max_job_id)
 		next_id = slurmctld_conf.first_job_id;
 	return next_id;
 }
@@ -5516,6 +6244,7 @@ extern uint32_t get_next_job_id(void)
  */
 static void _set_job_id(struct job_record *job_ptr)
 {
+	int i;
 	uint32_t new_id;
 
 	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
@@ -5527,15 +6256,18 @@ static void _set_job_id(struct job_record *job_ptr)
 		fatal("_set_job_id: partition not set");
 
 	/* Insure no conflict in job id if we roll over 32 bits */
-	while (1) {
-		if (++job_id_sequence >= MIN_NOALLOC_JOBID)
+	for (i = 0; i < 1000; i++) {
+		if (++job_id_sequence >= slurmctld_conf.max_job_id)
 			job_id_sequence = slurmctld_conf.first_job_id;
 		new_id = job_id_sequence;
 		if (find_job_record(new_id) == NULL) {
 			job_ptr->job_id = new_id;
-			break;
+			return;
 		}
 	}
+	fatal("We have exhausted our supply of valid job id values."
+	      "FirstJobId=%u MaxJobId=%u", slurmctld_conf.first_job_id,
+	      slurmctld_conf.max_job_id);
 }
 
 
@@ -5550,20 +6282,41 @@ static void _set_job_prio(struct job_record *job_ptr)
 	xassert (job_ptr->magic == JOB_MAGIC);
 	if (IS_JOB_FINISHED(job_ptr))
 		return;
-	job_ptr->priority = slurm_sched_initial_priority(maximum_prio,
+	job_ptr->priority = slurm_sched_initial_priority(lowest_prio,
 							 job_ptr);
 	if ((job_ptr->priority <= 1) ||
 	    (job_ptr->direct_set_prio) ||
 	    (job_ptr->details && (job_ptr->details->nice != NICE_OFFSET)))
 		return;
 
-	maximum_prio = MIN(job_ptr->priority, maximum_prio);
+	lowest_prio = MIN(job_ptr->priority, lowest_prio);
+}
+
+/* After recovering job state, if using priority/basic then we increment the
+ * priorities of all jobs to avoid decrementing the base down to zero */
+extern void sync_job_priorities(void)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	uint32_t prio_boost = 0;
+
+	if ((highest_prio != 0) && (highest_prio < TOP_PRIORITY))
+		prio_boost = TOP_PRIORITY - highest_prio;
+	if (strcmp(slurmctld_conf.priority_type, "priority/basic") ||
+	    (prio_boost < 1000000))
+		return;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator)))
+		job_ptr->priority += prio_boost;
+	list_iterator_destroy(job_iterator);
+	lowest_prio += prio_boost;
 }
 
 
 /* After a node is returned to service, reset the priority of jobs
  * which may have been held due to that node being unavailable */
-void reset_job_priority(void)
+extern void reset_job_priority(void)
 {
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
@@ -5681,6 +6434,33 @@ static bool _top_priority(struct job_record *job_ptr)
 	return top;
 }
 
+static void _merge_job_licenses(struct job_record *shrink_job_ptr,
+				struct job_record *expand_job_ptr)
+{
+	xassert(shrink_job_ptr);
+	xassert(expand_job_ptr);
+
+	if (!shrink_job_ptr->licenses)		/* No licenses to add */
+		return;
+
+	if (!expand_job_ptr->licenses) {	/* Just transfer licenses */
+		expand_job_ptr->licenses = shrink_job_ptr->licenses;
+		shrink_job_ptr->licenses = NULL;
+		FREE_NULL_LIST(expand_job_ptr->license_list);
+		expand_job_ptr->license_list = shrink_job_ptr->license_list;
+		shrink_job_ptr->license_list = NULL;
+		return;
+	}
+
+	/* Merge the license information into expanding job */
+	xstrcat(expand_job_ptr->licenses, ",");
+	xstrcat(expand_job_ptr->licenses, shrink_job_ptr->licenses);
+	xfree(shrink_job_ptr->licenses);
+	FREE_NULL_LIST(expand_job_ptr->license_list);
+	FREE_NULL_LIST(shrink_job_ptr->license_list);
+	license_job_merge(expand_job_ptr);
+	return;
+}
 
 /*
  * update_job - update a job's parameters per the supplied specifications
@@ -5693,6 +6473,7 @@ static bool _top_priority(struct job_record *job_ptr)
 int update_job(job_desc_msg_t * job_specs, uid_t uid)
 {
 	int error_code = SLURM_SUCCESS;
+	enum job_state_reason fail_reason;
 	bool authorized = false;
 	uint32_t save_min_nodes = 0, save_max_nodes = 0;
 	uint32_t save_min_cpus = 0, save_max_cpus = 0;
@@ -5715,11 +6496,11 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	uint16_t rotate = (uint16_t) NO_VAL;
 	uint16_t geometry[SYSTEM_DIMENSIONS] = {(uint16_t) NO_VAL};
 	char *image = NULL;
-	static uint32_t cpus_per_bp = 0;
+	static uint32_t cpus_per_mp = 0;
 	static uint16_t cpus_per_node = 0;
 
-	if (!cpus_per_bp)
-		select_g_alter_node_cnt(SELECT_GET_BP_CPU_CNT, &cpus_per_bp);
+	if (!cpus_per_mp)
+		select_g_alter_node_cnt(SELECT_GET_MP_CPU_CNT, &cpus_per_mp);
 	if (!cpus_per_node)
 		select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
 					&cpus_per_node);
@@ -5781,6 +6562,140 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
+	if (job_specs->exc_nodes) {
+		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
+			error_code = ESLURM_DISABLED;
+		else if (job_specs->exc_nodes[0] == '\0') {
+			xfree(detail_ptr->exc_nodes);
+			FREE_NULL_BITMAP(detail_ptr->exc_node_bitmap);
+		} else {
+			if (node_name2bitmap(job_specs->exc_nodes, false,
+					     &exc_bitmap)) {
+				error("sched: Invalid node list for "
+				      "job_update: %s",job_specs->exc_nodes);
+				FREE_NULL_BITMAP(exc_bitmap);
+				error_code = ESLURM_INVALID_NODE_NAME;
+			}
+			if (exc_bitmap) {
+				xfree(detail_ptr->exc_nodes);
+				detail_ptr->exc_nodes =
+					job_specs->exc_nodes;
+				FREE_NULL_BITMAP(detail_ptr->exc_node_bitmap);
+				detail_ptr->exc_node_bitmap = exc_bitmap;
+				info("sched: update_job: setting exc_nodes to "
+				     "%s for job_id %u", job_specs->exc_nodes,
+				     job_specs->job_id);
+				job_specs->exc_nodes = NULL;
+			}
+		}
+	}
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
+#ifndef HAVE_BG
+	if (job_specs->req_nodes &&
+	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
+		/* Use req_nodes to change the nodes associated with a running
+		 * for lack of other field in the job request to use */
+		if ((job_specs->req_nodes[0] == '\0') ||
+		    node_name2bitmap(job_specs->req_nodes,false, &req_bitmap) ||
+		    !bit_super_set(req_bitmap, job_ptr->node_bitmap) ||
+		    job_ptr->details->expanding_jobid) {
+			info("sched: Invalid node list (%s) for job %u update",
+			     job_specs->req_nodes, job_specs->job_id);
+			error_code = ESLURM_INVALID_NODE_NAME;
+			goto fini;
+		} else if (req_bitmap) {
+			int i, i_first, i_last;
+			struct node_record *node_ptr;
+			info("sched: update_job: setting nodes to %s for "
+			     "job_id %u",
+			     job_specs->req_nodes, job_specs->job_id);
+			job_pre_resize_acctg(job_ptr);
+			i_first = bit_ffs(job_ptr->node_bitmap);
+			i_last  = bit_fls(job_ptr->node_bitmap);
+			for (i=i_first; i<=i_last; i++) {
+				if (bit_test(req_bitmap, i) ||
+				    !bit_test(job_ptr->node_bitmap, i))
+					continue;
+				node_ptr = node_record_table_ptr + i;
+				kill_step_on_node(job_ptr, node_ptr, false);
+				excise_node_from_job(job_ptr, node_ptr);
+			}
+			job_post_resize_acctg(job_ptr);
+			/* Since job_post_resize_acctg will restart
+			 * things, don't do it again. */
+			update_accounting = false;
+		} else {
+			update_accounting = true;
+		}
+		FREE_NULL_BITMAP(req_bitmap);
+		xfree(job_specs->req_nodes);
+	}
+#endif
+
+	if (job_specs->req_nodes) {
+		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
+			error_code = ESLURM_DISABLED;
+		else if (job_specs->req_nodes[0] == '\0') {
+			xfree(detail_ptr->req_nodes);
+			FREE_NULL_BITMAP(detail_ptr->req_node_bitmap);
+			xfree(detail_ptr->req_node_layout);
+		} else {
+			if (node_name2bitmap(job_specs->req_nodes, false,
+					     &req_bitmap)) {
+				info("sched: Invalid node list for "
+				     "job_update: %s", job_specs->req_nodes);
+				FREE_NULL_BITMAP(req_bitmap);
+				error_code = ESLURM_INVALID_NODE_NAME;
+			}
+			if (req_bitmap) {
+				xfree(detail_ptr->req_nodes);
+				detail_ptr->req_nodes =
+					job_specs->req_nodes;
+				FREE_NULL_BITMAP(detail_ptr->req_node_bitmap);
+				xfree(detail_ptr->req_node_layout);
+				detail_ptr->req_node_bitmap = req_bitmap;
+				info("sched: update_job: setting req_nodes to "
+				     "%s for job_id %u", job_specs->req_nodes,
+				     job_specs->job_id);
+				job_specs->req_nodes = NULL;
+			}
+		}
+	}
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
+	if (job_specs->min_nodes == INFINITE) {
+		/* Used by scontrol just to get current configuration info */
+		job_specs->min_nodes = NO_VAL;
+	}
+#if defined(HAVE_BG) || defined(HAVE_CRAY)
+	if ((job_specs->min_nodes != NO_VAL) &&
+	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
+#else
+	if ((job_specs->min_nodes != NO_VAL) &&
+	    (job_specs->min_nodes > job_ptr->node_cnt) &&
+	    !select_g_job_expand_allow() &&
+	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
+#endif
+		info("Change of size for job %u not supported",
+		     job_specs->job_id);
+		error_code = ESLURM_NOT_SUPPORTED;
+		goto fini;
+	}
+
+	if (job_specs->req_switch != NO_VAL) {
+		job_ptr->req_switch = job_specs->req_switch;
+		info("Change of switches to %u job %u",
+		     job_specs->req_switch, job_specs->job_id);
+	}
+	if (job_specs->wait4switch != NO_VAL) {
+		job_ptr->wait4switch = _max_switch_wait(job_specs->wait4switch);
+		info("Change of switch wait to %u secs job %u",
+		     job_ptr->wait4switch, job_specs->job_id);
+	}
+
 	if (job_specs->partition) {
 		List part_ptr_list = NULL;
 
@@ -5844,7 +6759,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			goto fini;
 	}
 
-	/* Always do this last just incase the assoc_ptr changed */
+	/* Always do this last just in case the assoc_ptr changed */
 	if (job_specs->comment && wiki_sched && !validate_slurm_user(uid)) {
 		/* User must use Moab command to change job comment */
 		error("Attempt to change comment for job %u",
@@ -5879,7 +6794,11 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				}
 			}
 		}
-	} else if (job_specs->qos) {
+	}
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
+	if (job_specs->qos) {
 		slurmdb_qos_rec_t qos_rec;
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_DISABLED;
@@ -5901,7 +6820,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
-
 	if (!authorized && (accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)) {
 		if (!acct_policy_validate(job_specs, job_ptr->part_ptr,
 					  job_ptr->assoc_ptr, job_ptr->qos_ptr,
@@ -6086,7 +7004,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	/* Reset min and max node counts as needed, insure consistency */
 	if (job_specs->min_nodes != NO_VAL) {
 		if (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))
-			;	/* shrink running job, handle later */
+			;	/* shrink running job, processed later */
 		else if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
 			error_code = ESLURM_DISABLED;
 		else if (job_specs->min_nodes < 1) {
@@ -6094,6 +7012,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			     job_specs->job_id);
 			error_code = ESLURM_INVALID_NODE_COUNT;
 		} else {
+			/* Resize of pending job */
 			save_min_nodes = detail_ptr->min_nodes;
 			detail_ptr->min_nodes = job_specs->min_nodes;
 		}
@@ -6142,7 +7061,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	}
 
 	if (job_specs->time_limit != NO_VAL) {
-		if (IS_JOB_FINISHED(job_ptr))
+		if (IS_JOB_FINISHED(job_ptr) || job_ptr->preempt_time)
 			error_code = ESLURM_DISABLED;
 		else if (job_ptr->time_limit == job_specs->time_limit) {
 			debug("sched: update_job: new time limit identical to "
@@ -6277,15 +7196,27 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			error_code = ESLURM_DISABLED;
 		else if (job_ptr->priority == job_specs->priority) {
 			debug("update_job: setting priority to current value");
-			if ((job_ptr->priority == 0) && authorized) {
+			if ((job_ptr->priority == 0) &&
+			    (job_ptr->user_id != uid) && authorized) {
+				/* Authorized user can change from user hold
+				 * to admin hold or admin hold to user hold */
 				if (job_specs->alloc_sid == ALLOC_SID_USER_HOLD)
 					job_ptr->state_reason = WAIT_HELD_USER;
 				else
 					job_ptr->state_reason = WAIT_HELD;
 			}
+		} else if ((job_ptr->priority == 0) &&
+			   (job_ptr->state_reason == WAIT_HELD_USER)) {
+			job_ptr->direct_set_prio = 0;
+			_set_job_prio(job_ptr);
+			info("sched: update_job: releasing user hold "
+			     "for job_id %u", job_specs->job_id);
+			job_ptr->state_reason = WAIT_NO_REASON;
+			xfree(job_ptr->state_desc);
 		} else if (authorized ||
 			 (job_ptr->priority > job_specs->priority)) {
-			job_ptr->details->nice = NICE_OFFSET;
+			if (job_specs->priority != 0)
+				job_ptr->details->nice = NICE_OFFSET;
 			if (job_specs->priority == INFINITE) {
 				job_ptr->direct_set_prio = 0;
 				_set_job_prio(job_ptr);
@@ -6298,26 +7229,18 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			     job_specs->job_id);
 			update_accounting = true;
 			if (job_ptr->priority == 0) {
-				if (authorized &&
-				    (job_specs->alloc_sid !=
+				if ((job_ptr->user_id == uid) ||
+				    (job_specs->alloc_sid ==
 				     ALLOC_SID_USER_HOLD)) {
-					job_ptr->state_reason = WAIT_HELD;
-				} else
 					job_ptr->state_reason = WAIT_HELD_USER;
+				} else 
+					job_ptr->state_reason = WAIT_HELD;
 				xfree(job_ptr->state_desc);
 			} else if ((job_ptr->state_reason == WAIT_HELD) ||
 				   (job_ptr->state_reason == WAIT_HELD_USER)) {
 				job_ptr->state_reason = WAIT_NO_REASON;
 				xfree(job_ptr->state_desc);
 			}
-		} else if ((job_ptr->priority == 0) &&
-			   (job_ptr->state_reason == WAIT_HELD_USER)) {
-			job_ptr->direct_set_prio = 0;
-			_set_job_prio(job_ptr);
-			info("sched: update_job: releasing user hold "
-			     "for job_id %u", job_specs->job_id);
-			job_ptr->state_reason = WAIT_NO_REASON;
-			xfree(job_ptr->state_desc);
 		} else {
 			error("sched: Attempt to increase priority for job %u",
 			      job_specs->job_id);
@@ -6508,7 +7431,8 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 
 	if (job_specs->gres) {
 		List tmp_gres_list = NULL;
-		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL)) {
+		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL) ||
+		    (detail_ptr->expanding_jobid != 0)) {
 			error_code = ESLURM_DISABLED;
 		} else if (job_specs->gres[0] == '\0') {
 			info("sched: update_job: cleared gres for job %u",
@@ -6565,116 +7489,72 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
-	if (job_specs->exc_nodes) {
-		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
-			error_code = ESLURM_DISABLED;
-		else if (job_specs->exc_nodes[0] == '\0') {
-			xfree(detail_ptr->exc_nodes);
-			FREE_NULL_BITMAP(detail_ptr->exc_node_bitmap);
-		} else {
-			if (node_name2bitmap(job_specs->exc_nodes, false,
-					     &exc_bitmap)) {
-				error("sched: Invalid node list for "
-				      "job_update: %s",job_specs->exc_nodes);
-				FREE_NULL_BITMAP(exc_bitmap);
-				error_code = ESLURM_INVALID_NODE_NAME;
-			}
-			if (exc_bitmap) {
-				xfree(detail_ptr->exc_nodes);
-				detail_ptr->exc_nodes =
-					job_specs->exc_nodes;
-				FREE_NULL_BITMAP(detail_ptr->exc_node_bitmap);
-				detail_ptr->exc_node_bitmap = exc_bitmap;
-				info("sched: update_job: setting exc_nodes to "
-				     "%s for job_id %u", job_specs->exc_nodes,
-				     job_specs->job_id);
-				job_specs->exc_nodes = NULL;
-			}
-		}
-	}
-	if (error_code != SLURM_SUCCESS)
-		goto fini;
-
-#ifndef HAVE_BG
-	if (job_specs->req_nodes &&
+	if ((job_specs->min_nodes != NO_VAL) &&
 	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
 		/* Use req_nodes to change the nodes associated with a running
 		 * for lack of other field in the job request to use */
-		if ((job_specs->req_nodes[0] == '\0') ||
-		    node_name2bitmap(job_specs->req_nodes,false, &req_bitmap) ||
-		    !bit_super_set(req_bitmap, job_ptr->node_bitmap)) {
-			info("sched: Invalid node list (%s) for job %u update",
-			     job_specs->req_nodes, job_specs->job_id);
-			error_code = ESLURM_INVALID_NODE_NAME;
-			goto fini;
-		} else if (req_bitmap) {
-			int i, i_first, i_last;
-			struct node_record *node_ptr;
-			info("sched: update_job: setting nodes to %s for "
-			     "job_id %u",
-			     job_specs->req_nodes, job_specs->job_id);
+		if ((job_specs->min_nodes == 0) && (job_ptr->node_cnt > 0) &&
+		    job_ptr->details && job_ptr->details->expanding_jobid) {
+			struct job_record *expand_job_ptr;
+			bitstr_t *orig_job_node_bitmap;
+
+			expand_job_ptr = find_job_record(job_ptr->details->
+							 expanding_jobid);
+			if (expand_job_ptr == NULL) {
+				info("Invalid node count (%u) for job %u "
+				     "update, job %u to expand not found",
+				     job_specs->min_nodes, job_specs->job_id,
+				     job_ptr->details->expanding_jobid);
+				error_code = ESLURM_INVALID_JOB_ID;
+				goto fini;
+			}
+			if (IS_JOB_SUSPENDED(job_ptr) ||
+			    IS_JOB_SUSPENDED(expand_job_ptr)) {
+				info("Can not expand job %u from job %u, "
+				     "job is suspended",
+				     expand_job_ptr->job_id, job_ptr->job_id);
+				error_code = ESLURM_JOB_SUSPENDED;
+				goto fini;
+			}
+			if ((job_ptr->step_list != NULL) &&
+			    (list_count(job_ptr->step_list) != 0)) {
+				info("Attempt to merge job %u with active "
+				     "steps into job %u",
+				     job_specs->job_id,
+				     job_ptr->details->expanding_jobid);
+				error_code = ESLURMD_STEP_EXISTS;
+				goto fini;
+			}
+			info("sched: killing job %u and moving all resources "
+			     "to job %u", job_specs->job_id,
+			     expand_job_ptr->job_id);
 			job_pre_resize_acctg(job_ptr);
-			i_first = bit_ffs(job_ptr->node_bitmap);
-			i_last  = bit_fls(job_ptr->node_bitmap);
-			for (i=i_first; i<=i_last; i++) {
-				if (bit_test(req_bitmap, i) ||
-				    !bit_test(job_ptr->node_bitmap, i))
-					continue;
-				node_ptr = node_record_table_ptr + i;
-				kill_step_on_node(job_ptr, node_ptr, false);
-				excise_node_from_job(job_ptr, node_ptr);
+			job_pre_resize_acctg(expand_job_ptr);
+			_send_job_kill(job_ptr);
+
+			xassert(job_ptr->job_resrcs);
+			xassert(job_ptr->job_resrcs->node_bitmap);
+			orig_job_node_bitmap = bit_copy(expand_job_ptr->
+							job_resrcs->
+							node_bitmap);
+			error_code = select_g_job_expand(job_ptr,
+							 expand_job_ptr);
+			if (error_code == SLURM_SUCCESS) {
+				_merge_job_licenses(job_ptr, expand_job_ptr);
+				rebuild_step_bitmaps(expand_job_ptr,
+						     orig_job_node_bitmap);
 			}
+			bit_free(orig_job_node_bitmap);
 			job_post_resize_acctg(job_ptr);
-			/* Since job_post_resize_acctg will restart
-			   things don't do it again. */
+			job_post_resize_acctg(expand_job_ptr);
+			/* Since job_post_resize_acctg will restart things,
+			 * don't do it again. */
 			update_accounting = false;
-		} else {
-			update_accounting = true;
-		}
-		FREE_NULL_BITMAP(req_bitmap);
-		xfree(job_specs->req_nodes);
-	}
-#endif
-
-	if (job_specs->req_nodes) {
-		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
-			error_code = ESLURM_DISABLED;
-		else if (job_specs->req_nodes[0] == '\0') {
-			xfree(detail_ptr->req_nodes);
-			FREE_NULL_BITMAP(detail_ptr->req_node_bitmap);
-			xfree(detail_ptr->req_node_layout);
-		} else {
-			if (node_name2bitmap(job_specs->req_nodes, false,
-					     &req_bitmap)) {
-				info("sched: Invalid node list for "
-				     "job_update: %s", job_specs->req_nodes);
-				FREE_NULL_BITMAP(req_bitmap);
-				error_code = ESLURM_INVALID_NODE_NAME;
-			}
-			if (req_bitmap) {
-				xfree(detail_ptr->req_nodes);
-				detail_ptr->req_nodes =
-					job_specs->req_nodes;
-				FREE_NULL_BITMAP(detail_ptr->req_node_bitmap);
-				xfree(detail_ptr->req_node_layout);
-				detail_ptr->req_node_bitmap = req_bitmap;
-				info("sched: update_job: setting req_nodes to "
-				     "%s for job_id %u", job_specs->req_nodes,
-				     job_specs->job_id);
-				job_specs->req_nodes = NULL;
-			}
-		}
-	}
-	if (error_code != SLURM_SUCCESS)
-		goto fini;
-
-#ifndef HAVE_BG
-	if ((job_specs->min_nodes != NO_VAL) &&
-	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
-		/* Use req_nodes to change the nodes associated with a running
-		 * for lack of other field in the job request to use */
-		if ((job_specs->min_nodes == 0) ||
-		    (job_specs->min_nodes > job_ptr->node_cnt)) {
+			if (error_code)
+				goto fini;
+		} else if ((job_specs->min_nodes == 0) ||
+		           (job_specs->min_nodes > job_ptr->node_cnt) ||
+			   job_ptr->details->expanding_jobid) {
 			info("sched: Invalid node count (%u) for job %u update",
 			     job_specs->min_nodes, job_specs->job_id);
 			error_code = ESLURM_INVALID_NODE_COUNT;
@@ -6705,11 +7585,10 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			     "job_id %u",
 			     job_ptr->nodes, job_specs->job_id);
 			/* Since job_post_resize_acctg will restart
-			   things don't do it again. */
+			 * things don't do it again. */
 			update_accounting = false;
 		}
 	}
-#endif
 
 	if (job_specs->ntasks_per_node != (uint16_t) NO_VAL) {
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
@@ -6739,6 +7618,8 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			if (rc != SLURM_SUCCESS)
 				error_code = rc;
 			else {
+				job_ptr->details->orig_dependency =
+					xstrdup(job_ptr->details->dependency);
 				info("sched: update_job: setting dependency to "
 				     "%s for job_id %u",
 				     job_ptr->details->dependency,
@@ -6751,12 +7632,11 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 
 	if (job_specs->begin_time) {
 		if (IS_JOB_PENDING(job_ptr) && detail_ptr) {
-			/* Make sure this time is current, it does no
-			   good for accounting to say this job could had
-			   started in the past since the job isn't
-			   going to start until now as the earliest.
-			*/
-			if(job_specs->begin_time < now)
+			char time_str[32];
+			/* Make sure this time is current, it does no good for
+			 * accounting to say this job could have started before
+			 * now */
+			if (job_specs->begin_time < now)
 				job_specs->begin_time = now;
 
 			detail_ptr->begin_time = job_specs->begin_time;
@@ -6764,6 +7644,11 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			if ((job_ptr->priority == 1) &&
 			    (detail_ptr->begin_time <= now))
 				_set_job_prio(job_ptr);
+			slurm_make_time_str(&detail_ptr->begin_time, time_str,
+					    sizeof(time_str));
+			info("sched: update_job: setting begin to %s for "
+			     "job_id %u",
+			     time_str, job_ptr->job_id);
 		} else {
 			error_code = ESLURM_DISABLED;
 			goto fini;
@@ -6780,23 +7665,24 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			     job_specs->licenses);
 			error_code = ESLURM_INVALID_LICENSES;
 		} else if (IS_JOB_PENDING(job_ptr)) {
-			if (job_ptr->license_list)
-				list_destroy(job_ptr->license_list);
+			FREE_NULL_LIST(job_ptr->license_list);
 			job_ptr->license_list = license_list;
+			info("sched: update_job: changing licenses from '%s' "
+			     "to '%s' for pending job %u",
+			     job_ptr->licenses, job_specs->licenses,
+			     job_ptr->job_id);
 			xfree(job_ptr->licenses);
 			job_ptr->licenses = job_specs->licenses;
 			job_specs->licenses = NULL; /* nothing to free */
-			info("sched: update_job: setting licenses to %s for "
-			     "job %u", job_ptr->licenses, job_ptr->job_id);
-		} else if (IS_JOB_RUNNING(job_ptr) && authorized) {
+		} else if (IS_JOB_RUNNING(job_ptr) &&
+			   (authorized || (license_list == NULL))) {
 			/* NOTE: This can result in oversubscription of
 			 * licenses */
 			license_job_return(job_ptr);
-			if (job_ptr->license_list)
-				list_destroy(job_ptr->license_list);
+			FREE_NULL_LIST(job_ptr->license_list);
 			job_ptr->license_list = license_list;
-			info("sched: update_job: changing licenses from %s to "
-			     "%s for  running job %u",
+			info("sched: update_job: changing licenses from '%s' "
+			     "to '%s' for running job %u",
 			     job_ptr->licenses, job_specs->licenses,
 			     job_ptr->job_id);
 			xfree(job_ptr->licenses);
@@ -6809,12 +7695,30 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("sched: update_job: could not change licenses "
 			     "for job %u", job_ptr->job_id);
 			error_code = ESLURM_DISABLED;
-			list_destroy(license_list);
+			FREE_NULL_LIST(license_list);
 		}
 	}
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
+	fail_reason = job_limits_check(&job_ptr);
+	if (fail_reason != WAIT_NO_REASON) {
+		if (fail_reason == WAIT_QOS_THRES)
+			error_code = ESLURM_QOS_THRES;
+		else if (fail_reason == WAIT_PART_TIME_LIMIT ||
+			 fail_reason == WAIT_PART_NODE_LIMIT)
+			error_code = SLURM_SUCCESS;
+		else
+			error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
+		job_ptr->priority = 1;      /* Move to end of queue */
+		job_ptr->state_reason = fail_reason;
+		xfree(job_ptr->state_desc);
+		return error_code;
+	} else if ((job_ptr->state_reason != WAIT_HELD) &&
+		   (job_ptr->state_reason != WAIT_HELD_USER)) {
+		job_ptr->state_reason = WAIT_NO_REASON;
+	}
+
 #ifdef HAVE_BG
 	select_g_select_jobinfo_get(job_specs->select_jobinfo,
 				    SELECT_JOBDATA_CONN_TYPE, &conn_type);
@@ -6823,7 +7727,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			error_code = ESLURM_DISABLED;
 		else {
 			if((conn_type >= SELECT_SMALL)
-			   && (detail_ptr->min_cpus >= cpus_per_bp)) {
+			   && (detail_ptr->min_cpus >= cpus_per_mp)) {
 				info("update_job: could not change "
 				     "conn_type to '%s' because cpu "
 				     "count is %u for job %u making "
@@ -6834,7 +7738,7 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				error_code = ESLURM_INVALID_NODE_COUNT;
 			} else if(((conn_type == SELECT_TORUS)
 				   || (conn_type == SELECT_MESH))
-				  && (detail_ptr->min_cpus < cpus_per_bp)) {
+				  && (detail_ptr->min_cpus < cpus_per_mp)) {
 				info("update_job: could not change "
 				     "conn_type to '%s' because cpu "
 				     "count is %u for job %u making "
@@ -6862,9 +7766,9 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				    SELECT_JOBDATA_CONN_TYPE, &conn_type);
 	if(detail_ptr &&
 	   (((conn_type >= SELECT_SMALL)
-	     && (detail_ptr->min_cpus >= cpus_per_bp))
+	     && (detail_ptr->min_cpus >= cpus_per_mp))
 	    || (((conn_type == SELECT_TORUS)|| (conn_type == SELECT_MESH))
-		&& (detail_ptr->min_cpus < cpus_per_bp)))) {
+		&& (detail_ptr->min_cpus < cpus_per_mp)))) {
 		info("update_job: With cpu count at %u our conn_type "
 		     "of '%s' is invalid for job %u.",
 		     detail_ptr->min_cpus,
@@ -7009,9 +7913,84 @@ fini:
 						    job_ptr);
 		}
 	}
+
+	/* If job update is successful and priority is calculated (not only
+	 * based upon job submit order), recalculate the job priority, since
+	 * many factors of an update may affect priority considerations. */
+	if ((error_code == SLURM_SUCCESS) &&
+	    strcmp(slurmctld_conf.priority_type, "priority/basic"))
+		_set_job_prio(job_ptr);
+
 	return error_code;
 }
 
+static void _send_job_kill(struct job_record *job_ptr)
+{
+	kill_job_msg_t *kill_job = NULL;
+	agent_arg_t *agent_args = NULL;
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+#else
+	int i;
+	struct node_record *node_ptr;
+#endif
+
+	xassert(job_ptr);
+	xassert(job_ptr->details);
+
+	agent_args = xmalloc(sizeof(agent_arg_t));
+	agent_args->msg_type = REQUEST_TERMINATE_JOB;
+	agent_args->retry = 0;	/* re_kill_job() resends as needed */
+	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
+	kill_job = xmalloc(sizeof(kill_job_msg_t));
+	last_node_update    = time(NULL);
+	kill_job->job_id    = job_ptr->job_id;
+	kill_job->step_id   = NO_VAL;
+	kill_job->job_state = job_ptr->job_state;
+	kill_job->job_uid   = job_ptr->user_id;
+	kill_job->nodes     = xstrdup(job_ptr->nodes);
+	kill_job->time      = time(NULL);
+	kill_job->start_time = job_ptr->start_time;
+	kill_job->select_jobinfo = select_g_select_jobinfo_copy(
+			job_ptr->select_jobinfo);
+	kill_job->spank_job_env = xduparray(job_ptr->spank_job_env_size,
+					    job_ptr->spank_job_env);
+	kill_job->spank_job_env_size = job_ptr->spank_job_env_size;
+
+#ifdef HAVE_FRONT_END
+	if (job_ptr->batch_host &&
+	    (front_end_ptr = job_ptr->front_end_ptr)) {
+		hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+		agent_args->node_count++;
+	}
+#else
+	for (i = 0, node_ptr = node_record_table_ptr;
+	     i < node_record_count; i++, node_ptr++) {
+		if (!bit_test(job_ptr->node_bitmap, i))
+			continue;
+		hostlist_push(agent_args->hostlist, node_ptr->name);
+		agent_args->node_count++;
+	}
+#endif
+	if (agent_args->node_count == 0) {
+		if (job_ptr->details->expanding_jobid == 0) {
+			error("Job %u allocated no nodes to be killed on",
+			      job_ptr->job_id);
+		}
+		xfree(kill_job->nodes);
+		xfree(kill_job);
+		hostlist_destroy(agent_args->hostlist);
+		xfree(agent_args);
+		return;
+	}
+
+	agent_args->msg_args = kill_job;
+	agent_queue_request(agent_args);
+	return;
+}
+
 /* Record accounting information for a job immediately before changing size */
 extern void job_pre_resize_acctg(struct job_record *job_ptr)
 {
@@ -7103,7 +8082,7 @@ validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 			      reg_msg->job_id[i], reg_msg->step_id[i],
 			      reg_msg->node_name);
 			abort_job_on_node(reg_msg->job_id[i],
-					  job_ptr, node_ptr);
+					  job_ptr, node_ptr->name);
 		}
 
 		else if (IS_JOB_RUNNING(job_ptr) ||
@@ -7134,7 +8113,7 @@ validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 				      reg_msg->step_id[i],
 				      reg_msg->node_name);
 				abort_job_on_node(reg_msg->job_id[i], job_ptr,
-						node_ptr);
+						node_ptr->name);
 			}
 		}
 
@@ -7153,7 +8132,7 @@ validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 			      reg_msg->job_id[i], reg_msg->step_id[i],
 			      reg_msg->node_name);
 			abort_job_on_node(reg_msg->job_id[i],
-					  job_ptr, node_ptr);
+					  job_ptr, node_ptr->name);
 		}
 
 		else {		/* else job is supposed to be done */
@@ -7287,22 +8266,19 @@ static void _notify_srun_missing_step(struct job_record *job_ptr, int node_inx,
  *	agent request per node as they register.
  * IN job_id - id of the job to be killed
  * IN job_ptr - pointer to terminating job (NULL if unknown, e.g. orphaned)
- * IN node_ptr - pointer to the node on which the job resides
+ * IN node_name - name of the node on which the job resides
  */
 extern void
-abort_job_on_node(uint32_t job_id, struct job_record *job_ptr,
-		  struct node_record *node_ptr)
+abort_job_on_node(uint32_t job_id, struct job_record *job_ptr, char *node_name)
 {
 	agent_arg_t *agent_info;
 	kill_job_msg_t *kill_req;
 
-	debug("Aborting job %u on node %s", job_id, node_ptr->name);
-
 	kill_req = xmalloc(sizeof(kill_job_msg_t));
 	kill_req->job_id	= job_id;
 	kill_req->step_id	= NO_VAL;
 	kill_req->time          = time(NULL);
-	kill_req->nodes		= xstrdup(node_ptr->name);
+	kill_req->nodes		= xstrdup(node_name);
 	if (job_ptr) {  /* NULL if unknown */
 		kill_req->start_time = job_ptr->start_time;
 		kill_req->select_jobinfo =
@@ -7317,7 +8293,13 @@ abort_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 	agent_info = xmalloc(sizeof(agent_arg_t));
 	agent_info->node_count	= 1;
 	agent_info->retry	= 0;
-	agent_info->hostlist	= hostlist_create(node_ptr->name);
+	agent_info->hostlist	= hostlist_create(node_name);
+#ifdef HAVE_FRONT_END
+	debug("Aborting job %u on front end node %s", job_id, node_name);
+#else
+
+	debug("Aborting job %u on node %s", job_id, node_name);
+#endif
 	agent_info->msg_type	= REQUEST_ABORT_JOB;
 	agent_info->msg_args	= kill_req;
 
@@ -7337,8 +8319,6 @@ kill_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 	agent_arg_t *agent_info;
 	kill_job_msg_t *kill_req;
 
-	debug("Killing job %u on node %s", job_id, node_ptr->name);
-
 	kill_req = xmalloc(sizeof(kill_job_msg_t));
 	kill_req->job_id	= job_id;
 	kill_req->step_id	= NO_VAL;
@@ -7357,7 +8337,15 @@ kill_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 	agent_info = xmalloc(sizeof(agent_arg_t));
 	agent_info->node_count	= 1;
 	agent_info->retry	= 0;
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	agent_info->hostlist	= hostlist_create(job_ptr->batch_host);
+	debug("Killing job %u on front end node %s", job_id,
+	      job_ptr->batch_host);
+#else
 	agent_info->hostlist	= hostlist_create(node_ptr->name);
+	debug("Killing job %u on node %s", job_id, node_ptr->name);
+#endif
 	agent_info->msg_type	= REQUEST_TERMINATE_JOB;
 	agent_info->msg_args	= kill_req;
 
@@ -7517,28 +8505,35 @@ static void _remove_defunct_batch_dirs(List batch_dirs)
 static void
 _xmit_new_end_time(struct job_record *job_ptr)
 {
+#ifndef HAVE_FRONT_END
+	int i;
+#endif
 	job_time_msg_t *job_time_msg_ptr;
 	agent_arg_t *agent_args;
-	int i;
 
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	agent_args->msg_type = REQUEST_UPDATE_JOB_TIME;
 	agent_args->retry = 1;
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	job_time_msg_ptr = xmalloc(sizeof(job_time_msg_t));
 	job_time_msg_ptr->job_id          = job_ptr->job_id;
 	job_time_msg_ptr->expiration_time = job_ptr->end_time;
 
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+	agent_args->node_count  = 1;
+#else
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
 		hostlist_push(agent_args->hostlist,
 			      node_record_table_ptr[i].name);
 		agent_args->node_count++;
-#ifdef HAVE_FRONT_END		/* operate only on front-end node */
-		break;
-#endif
 	}
+#endif
 
 	agent_args->msg_args = job_time_msg_ptr;
 	agent_queue_request(agent_args);
@@ -7557,6 +8552,9 @@ _xmit_new_end_time(struct job_record *job_ptr)
 extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 		uint32_t return_code)
 {
+#ifdef HAVE_FRONT_END
+	int i;
+#endif
 	struct job_record  *job_ptr = find_job_record(job_id);
 	struct node_record *node_ptr;
 
@@ -7572,6 +8570,7 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 	 * hasn't really started. Very rare obviously. */
 	if ((IS_JOB_PENDING(job_ptr) && (!IS_JOB_COMPLETING(job_ptr))) ||
 	    (job_ptr->node_bitmap == NULL)) {
+#ifndef HAVE_FRONT_END
 		uint16_t base_state = NODE_STATE_UNKNOWN;
 		node_ptr = find_node_record(node_name);
 		if (node_ptr)
@@ -7583,26 +8582,49 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 			error("Epilog complete response for non-running job "
 			      "%u, slurmctld and slurmd out of sync", job_id);
 		}
+#endif
 		return false;
 	}
 
-#ifdef HAVE_FRONT_END		/* operate only on front-end node */
-{
-	int i;
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	if (return_code) {
+		error("Epilog error for job %u on %s, setting DOWN",
+		      job_ptr->job_id, job_ptr->batch_host);
+		if (job_ptr->front_end_ptr) {
+			set_front_end_down(job_ptr->front_end_ptr,
+					  "Epilog error");
+		}
+	} else if (job_ptr->front_end_ptr && IS_JOB_COMPLETING(job_ptr)) {
+		front_end_record_t *front_end_ptr = job_ptr->front_end_ptr;
+		if (front_end_ptr->job_cnt_comp)
+			front_end_ptr->job_cnt_comp--;
+		else {
+			error("job_cnt_comp underflow for for job %u on "
+			      "front end %s",
+			      job_ptr->job_id, front_end_ptr->name);
+		}
+		if (front_end_ptr->job_cnt_comp == 0)
+			front_end_ptr->node_state &= (~NODE_STATE_COMPLETING);
+	}
 
-	if (return_code)
-		error("Epilog error on %s, setting DOWN",
-			job_ptr->nodes);
-	for (i=0; i<node_record_count; i++) {
-		if (!bit_test(job_ptr->node_bitmap, i))
-			continue;
-		node_ptr = &node_record_table_ptr[i];
-		if (return_code)
-			set_node_down(node_ptr->name, "Epilog error");
-		else
-			make_node_idle(node_ptr, job_ptr);
+	if ((job_ptr->total_nodes == 0) && IS_JOB_COMPLETING(job_ptr)) {
+		/* Job resources moved into another job and
+		 *  tasks already killed */
+		front_end_record_t *front_end_ptr = job_ptr->front_end_ptr;
+		if (front_end_ptr)
+			front_end_ptr->node_state &= (~NODE_STATE_COMPLETING);
+	} else {
+		for (i = 0; i < node_record_count; i++) {
+			if (!bit_test(job_ptr->node_bitmap, i))
+				continue;
+			node_ptr = &node_record_table_ptr[i];
+			if (return_code)
+				set_node_down_ptr(node_ptr, "Epilog error");
+			else
+				make_node_idle(node_ptr, job_ptr);
+		}
 	}
-}
 #else
 	if (return_code) {
 		error("Epilog error on %s, setting DOWN", node_name);
@@ -7620,18 +8642,14 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 	if (!IS_JOB_COMPLETING(job_ptr)) {	/* COMPLETED */
 		if (IS_JOB_PENDING(job_ptr) && (job_ptr->batch_flag)) {
 			info("requeue batch job %u", job_ptr->job_id);
-			/* Clear everything so this appears to
-			   be a new job and then restart it
-			   up in accounting.
-			*/
+			/* Clear everything so this appears to be a new job
+			 * and then restart it in accounting. */
 			job_ptr->start_time = job_ptr->end_time = 0;
 			job_ptr->total_cpus = 0;
 			/* Current code (<= 2.1) has it so we start the new
-			   job with the next step id.  This could be
-			   used when restarting to figure out which
-			   step the previous run of this job stopped
-			   on.
-			*/
+			 * job with the next step id.  This could be used
+			 * when restarting to figure out which step the
+			 * previous run of this job stopped on. */
 
 			//job_ptr->next_step_id = 0;
 			job_ptr->node_cnt = 0;
@@ -7676,7 +8694,6 @@ void job_fini (void)
 extern void job_completion_logger(struct job_record  *job_ptr, bool requeue)
 {
 	int base_state;
-	bool sent_start = false;
 
 	xassert(job_ptr);
 
@@ -7698,7 +8715,7 @@ extern void job_completion_logger(struct job_record  *job_ptr, bool requeue)
 				mail_job_info(job_ptr, MAIL_JOB_REQUEUE);
 			if (!requeue && (job_ptr->mail_type & MAIL_JOB_END))
 				mail_job_info(job_ptr, MAIL_JOB_END);
-		} else {	/* JOB_FAILED, JOB_NODE_FAIL, or JOB_TIMEOUT */
+		} else {	/* JOB_FAILED, JOB_TIMEOUT, etc. */
 			if (job_ptr->mail_type & MAIL_JOB_FAIL)
 				mail_job_info(job_ptr, MAIL_JOB_FAIL);
 			else if (job_ptr->mail_type & MAIL_JOB_END)
@@ -7729,7 +8746,6 @@ extern void job_completion_logger(struct job_record  *job_ptr, bool requeue)
 			/* we have to call job start again because the
 			 * associd does not get updated in job complete */
 			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
-			sent_start = true;
 		}
 	}
 
@@ -7771,6 +8787,7 @@ extern bool job_independent(struct job_record *job_ptr, int will_run)
 		job_ptr->end_time	= now;
 		srun_allocate_abort(job_ptr);
 		job_completion_logger(job_ptr, false);
+		srun_allocate_abort(job_ptr);
 		return false;
 	}
 
@@ -7840,28 +8857,35 @@ extern int job_node_ready(uint32_t job_id, int *ready)
 /* Send specified signal to all steps associated with a job */
 static void _signal_job(struct job_record *job_ptr, int signal)
 {
+#ifndef HAVE_FRONT_END
+	int i;
+#endif
 	agent_arg_t *agent_args = NULL;
 	signal_job_msg_t *signal_job_msg = NULL;
-	int i;
 
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	agent_args->msg_type = REQUEST_SIGNAL_JOB;
 	agent_args->retry = 1;
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	signal_job_msg = xmalloc(sizeof(kill_tasks_msg_t));
 	signal_job_msg->job_id = job_ptr->job_id;
 	signal_job_msg->signal = signal;
 
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+	agent_args->node_count = 1;
+#else
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
 		hostlist_push(agent_args->hostlist,
 			      node_record_table_ptr[i].name);
 		agent_args->node_count++;
-#ifdef HAVE_FRONT_END	/* Operate only on front-end */
-		break;
-#endif
 	}
+#endif
 
 	if (agent_args->node_count == 0) {
 		xfree(signal_job_msg);
@@ -7877,9 +8901,11 @@ static void _signal_job(struct job_record *job_ptr, int signal)
 /* Send suspend request to slumrd of all nodes associated with a job */
 static void _suspend_job(struct job_record *job_ptr, uint16_t op)
 {
+#ifndef HAVE_FRONT_END
+	int i;
+#endif
 	agent_arg_t *agent_args;
 	suspend_msg_t *sus_ptr;
-	int i;
 
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	agent_args->msg_type = REQUEST_SUSPEND;
@@ -7888,20 +8914,25 @@ static void _suspend_job(struct job_record *job_ptr, uint16_t op)
 				 * quickly induce huge backlog
 				 * of agent.c RPCs */
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	sus_ptr = xmalloc(sizeof(suspend_msg_t));
 	sus_ptr->job_id = job_ptr->job_id;
 	sus_ptr->op = op;
 
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+	agent_args->node_count = 1;
+#else
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
 		hostlist_push(agent_args->hostlist,
 			      node_record_table_ptr[i].name);
 		agent_args->node_count++;
-#ifdef HAVE_FRONT_END	/* Operate only on front-end */
-		break;
-#endif
 	}
+#endif
 
 	if (agent_args->node_count == 0) {
 		xfree(sus_ptr);
@@ -7913,16 +8944,21 @@ static void _suspend_job(struct job_record *job_ptr, uint16_t op)
 	agent_queue_request(agent_args);
 	return;
 }
-/* Specified job is being suspended, release allocated nodes */
-static int _suspend_job_nodes(struct job_record *job_ptr, bool clear_prio)
+
+/*
+ * Specified job is being suspended, release allocated nodes
+ * job_ptr IN - job to be suspended
+ * indf_susp IN - set if job is being suspended indefinitely by user
+ *                or admin, otherwise suspended for gang scheduling
+ */
+static int _suspend_job_nodes(struct job_record *job_ptr, bool indf_susp)
 {
 	int i, rc = SLURM_SUCCESS;
 	struct node_record *node_ptr = node_record_table_ptr;
 	uint16_t node_flags;
 	time_t now = time(NULL);
 
-	if (clear_prio &&
-	    (rc = select_g_job_suspend(job_ptr)) != SLURM_SUCCESS)
+	if ((rc = select_g_job_suspend(job_ptr, indf_susp)) != SLURM_SUCCESS)
 		return rc;
 
 	for (i=0; i<node_record_count; i++, node_ptr++) {
@@ -7966,15 +9002,19 @@ static int _suspend_job_nodes(struct job_record *job_ptr, bool clear_prio)
 	return rc;
 }
 
-/* Specified job is being resumed, re-allocate the nodes */
-static int _resume_job_nodes(struct job_record *job_ptr, bool clear_prio)
+/*
+ * Specified job is being resumed, re-allocate the nodes
+ * job_ptr IN - job to be resumed
+ * indf_susp IN - set i f job is being resumed from indefinite suspend by user
+ *                or admin, otherwise resume from gang scheduling
+ */
+static int _resume_job_nodes(struct job_record *job_ptr, bool indf_susp)
 {
 	int i, rc = SLURM_SUCCESS;
 	struct node_record *node_ptr = node_record_table_ptr;
 	uint16_t node_flags;
 
-	if (clear_prio &&
-	    (rc = select_g_job_resume(job_ptr)) != SLURM_SUCCESS)
+	if ((rc = select_g_job_resume(job_ptr, indf_susp)) != SLURM_SUCCESS)
 		return rc;
 
 	for (i=0; i<node_record_count; i++, node_ptr++) {
@@ -8010,23 +9050,20 @@ static int _resume_job_nodes(struct job_record *job_ptr, bool clear_prio)
 	return rc;
 }
 
-
 /*
  * job_suspend - perform some suspend/resume operation
  * IN sus_ptr - suspend/resume request message
  * IN uid - user id of the user issuing the RPC
  * IN conn_fd - file descriptor on which to send reply,
  *              -1 if none
- * IN clear_prio - if set, then clear the job's priority after
- *		   suspending it, this is used to distinguish
- *		   jobs explicitly suspended by admins/users from
- *		   jobs suspended through automatic preemption
- *		   (the gang scheduler)
+ * indf_susp IN - set if job is being suspended indefinitely by user or admin
+ *                and we should clear it's priority, otherwise suspended
+ *		  temporarily for gang scheduling
  * IN protocol_version - slurm protocol version of client
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid,
-		       slurm_fd_t conn_fd, bool clear_prio,
+		       slurm_fd_t conn_fd, bool indf_susp,
 		       uint16_t protocol_version)
 {
 	int rc = SLURM_SUCCESS;
@@ -8071,18 +9108,21 @@ extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid,
 		goto reply;
 	}
 
+	/* Notify salloc/srun of suspend/resume */
+	srun_job_suspend(job_ptr, sus_ptr->op);
+
 	/* perform the operation */
 	if (sus_ptr->op == SUSPEND_JOB) {
 		if (!IS_JOB_RUNNING(job_ptr)) {
 			rc = ESLURM_DISABLED;
 			goto reply;
 		}
-		rc = _suspend_job_nodes(job_ptr, clear_prio);
+		rc = _suspend_job_nodes(job_ptr, indf_susp);
 		if (rc != SLURM_SUCCESS)
 			goto reply;
 		_suspend_job(job_ptr, sus_ptr->op);
 		job_ptr->job_state = JOB_SUSPENDED;
-		if (clear_prio)
+		if (indf_susp)
 			job_ptr->priority = 0;
 		if (job_ptr->suspend_time) {
 			job_ptr->pre_sus_time +=
@@ -8099,7 +9139,7 @@ extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid,
 			rc = ESLURM_DISABLED;
 			goto reply;
 		}
-		rc = _resume_job_nodes(job_ptr, clear_prio);
+		rc = _resume_job_nodes(job_ptr, indf_susp);
 		if (rc != SLURM_SUCCESS)
 			goto reply;
 		_suspend_job(job_ptr, sus_ptr->op);
@@ -8149,10 +9189,11 @@ extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid,
  * IN job_id - id of the job to be requeued
  * IN conn_fd - file descriptor on which to send reply
  * IN protocol_version - slurm protocol version of client
+ * IN preempt - true if job being preempted
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd_t conn_fd,
-			uint16_t protocol_version)
+			uint16_t protocol_version, bool preempt)
 {
 	int rc = SLURM_SUCCESS;
 	struct job_record *job_ptr = NULL;
@@ -8184,6 +9225,8 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd_t conn_fd,
 		goto reply;
 	}
 	if (IS_JOB_COMPLETING(job_ptr)) {
+		if (IS_JOB_PENDING(job_ptr))
+			goto reply;	/* already requeued */
 		rc = ESLURM_TRANSITION_STATE_NO_UPDATE;
 		goto reply;
 	}
@@ -8230,7 +9273,7 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd_t conn_fd,
 	 * job looks like a new job. */
 	job_ptr->job_state  = JOB_CANCELLED;
 	build_cg_bitmap(job_ptr);
-	deallocate_nodes(job_ptr, false, suspended);
+	deallocate_nodes(job_ptr, false, suspended, preempt);
 	xfree(job_ptr->details->req_node_layout);
 	job_completion_logger(job_ptr, true);
 	job_ptr->db_index = 0;
@@ -8354,7 +9397,7 @@ extern int job_cancel_by_assoc_id(uint32_t assoc_id)
 		info("Association deleted, cancelling job %u",
 		     job_ptr->job_id);
 		/* make sure the assoc_mgr_lock isn't locked before this. */
-		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+		job_signal(job_ptr->job_id, SIGKILL, 0, 0, false);
 		job_ptr->state_reason = FAIL_ACCOUNT;
 		xfree(job_ptr->state_desc);
 		cnt++;
@@ -8408,7 +9451,7 @@ extern int job_cancel_by_qos_id(uint32_t qos_id)
 		info("QOS deleted, cancelling job %u",
 		     job_ptr->job_id);
 		/* make sure the assoc_mgr_lock isn't locked before this. */
-		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+		job_signal(job_ptr->job_id, SIGKILL, 0, 0, false);
 		job_ptr->state_reason = FAIL_QOS;
 		xfree(job_ptr->state_desc);
 		cnt++;
@@ -8695,7 +9738,7 @@ extern int job_checkpoint(checkpoint_msg_t *ckpt_ptr, uid_t uid,
 			} else {
 				image_dir = xstrdup(step_ptr->ckpt_dir);
 			}
-			xstrfmtcat(image_dir, "/%u.%hu", job_ptr->job_id,
+			xstrfmtcat(image_dir, "/%u.%u", job_ptr->job_id,
 				   step_ptr->step_id);
 			update_rc = checkpoint_op(ckpt_ptr->job_id,
 						  step_ptr->step_id,
@@ -9196,9 +10239,12 @@ extern void build_cg_bitmap(struct job_record *job_ptr)
 	FREE_NULL_BITMAP(job_ptr->node_bitmap_cg);
 	if (job_ptr->node_bitmap) {
 		job_ptr->node_bitmap_cg = bit_copy(job_ptr->node_bitmap);
+		if (bit_set_count(job_ptr->node_bitmap_cg) == 0)
+			job_ptr->job_state &= (~JOB_COMPLETING);
 	} else {
 		error("build_cg_bitmap: node_bitmap is NULL");
 		job_ptr->node_bitmap_cg = bit_alloc(node_record_count);
+		job_ptr->job_state &= (~JOB_COMPLETING);
 	}
 	if (job_ptr->node_bitmap_cg == NULL)
 		fatal("bit_copy: memory allocation failure");
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 046854ffc..d39c3cd9c 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,16 +50,19 @@
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/env.h"
+#include "src/common/gres.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/slurm_accounting_storage.h"
+#include "src/common/timers.h"
 #include "src/common/uid.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
@@ -85,6 +88,7 @@ static bool	_scan_depend(List dependency_list, uint32_t job_id);
 static int	_valid_feature_list(uint32_t job_id, List feature_list);
 static int	_valid_node_feature(char *feature);
 
+static int	save_last_part_update = 0;
 
 /*
  * _build_user_job_list - build list of jobs for a given user
@@ -149,6 +153,7 @@ extern List build_job_queue(bool clear_start)
 	struct job_record *job_ptr = NULL;
 	struct part_record *part_ptr;
 	bool job_is_pending;
+	bool job_indepen = false;
 
 	job_queue = list_create(_job_queue_rec_del);
 	if (job_queue == NULL)
@@ -161,6 +166,8 @@ extern List build_job_queue(bool clear_start)
 		job_is_pending = IS_JOB_PENDING(job_ptr);
 		if (!job_is_pending || IS_JOB_COMPLETING(job_ptr))
 			continue;
+		/* ensure dependency shows current values behind a hold */
+		job_indepen = job_independent(job_ptr, 0);
 		if (job_is_pending && clear_start)
 			job_ptr->start_time = (time_t) 0;
 		if (job_ptr->priority == 0)	{ /* held */
@@ -176,8 +183,15 @@ extern List build_job_queue(bool clear_start)
 			       job_reason_string(job_ptr->state_reason),
 			       job_ptr->priority);
 			continue;
+		} else if ((job_ptr->priority == 1) && !job_indepen &&
+			   ((job_ptr->state_reason == WAIT_HELD) ||
+			    (job_ptr->state_reason == WAIT_HELD_USER))) {
+			/* released behind active dependency? */
+			job_ptr->state_reason = WAIT_DEPENDENCY;
+			xfree(job_ptr->state_desc);
 		}
-		if (!job_independent(job_ptr, 0))	/* can not run now */
+
+		if (!job_indepen)	/* can not run now */
 			continue;
 		if (job_ptr->part_ptr_list) {
 			part_iterator = list_iterator_create(job_ptr->
@@ -327,8 +341,8 @@ extern int schedule(uint32_t job_limit)
 #ifdef HAVE_BG
 	char *ionodes = NULL;
 	char tmp_char[256];
-#endif
 	static bool backfill_sched = false;
+#endif
 	static time_t sched_update = 0;
 	static bool wiki_sched = false;
 	static int sched_timeout = 0;
@@ -348,9 +362,11 @@ extern int schedule(uint32_t job_limit)
 	if (sched_update != slurmctld_conf.last_update) {
 		char *sched_params, *tmp_ptr;
 		char *sched_type = slurm_get_sched_type();
+#ifdef HAVE_BG
 		/* On BlueGene, do FIFO only with sched/backfill */
 		if (strcmp(sched_type, "sched/backfill") == 0)
 			backfill_sched = true;
+#endif
 		/* Disable avoiding of fragmentation with sched/wiki */
 		if ((strcmp(sched_type, "sched/wiki") == 0) ||
 		    (strcmp(sched_type, "sched/wiki2") == 0))
@@ -359,8 +375,8 @@ extern int schedule(uint32_t job_limit)
 
 		sched_params = slurm_get_sched_params();
 		if (sched_params &&
-		    (tmp_ptr=strstr(sched_params, "default_queue_depth="))) {
-		/*                                 01234567890123456789 */
+		    (tmp_ptr = strstr(sched_params, "default_queue_depth="))) {
+		/*                                   01234567890123456789 */
 			i = atoi(tmp_ptr + 20);
 			if (i < 0) {
 				error("ignoring SchedulerParameters: "
@@ -376,6 +392,12 @@ extern int schedule(uint32_t job_limit)
 		job_limit = def_job_limit;
 
 	lock_slurmctld(job_write_lock);
+	if (!avail_front_end()) {
+		unlock_slurmctld(job_write_lock);
+		debug("sched: schedule() returning, no front end nodes are "
+		       "available");
+		return SLURM_SUCCESS;
+	}
 	/* Avoid resource fragmentation if important */
 	if ((!wiki_sched) && job_is_completing()) {
 		unlock_slurmctld(job_write_lock);
@@ -384,6 +406,20 @@ extern int schedule(uint32_t job_limit)
 		return SLURM_SUCCESS;
 	}
 
+#ifdef HAVE_CRAY
+	/*
+	 * Run a Basil Inventory immediately before scheduling, to avoid
+	 * race conditions caused by ALPS node state change (caused e.g.
+	 * by the node health checker).
+	 * This relies on the above write lock for the node state.
+	 */
+	if (select_g_reconfigure()) {
+		unlock_slurmctld(job_write_lock);
+		debug4("sched: not scheduling due to ALPS");
+		return SLURM_SUCCESS;
+	}
+#endif
+
 	failed_parts = xmalloc(sizeof(struct part_record *) *
 			       list_count(part_list));
 	save_avail_node_bitmap = bit_copy(avail_node_bitmap);
@@ -414,6 +450,21 @@ extern int schedule(uint32_t job_limit)
 			       job_ptr->priority);
 			continue;
 		}
+
+		/* If a patition update has occurred, then do a limit check. */
+		if (save_last_part_update != last_part_update) {
+			int fail_reason = job_limits_check(&job_ptr);
+			if (fail_reason != WAIT_NO_REASON) {
+				job_ptr->state_reason = fail_reason;
+				job_ptr->priority = 1;
+				continue;
+			}
+		} else if ((job_ptr->state_reason == WAIT_PART_TIME_LIMIT) ||
+			   (job_ptr->state_reason == WAIT_PART_NODE_LIMIT)) {
+				job_ptr->start_time = 0;
+				job_ptr->priority = 1;
+				continue;
+		}
 		if (job_ptr->part_ptr != part_ptr) {
 			/* Cycle through partitions usable for this job */
 			job_ptr->part_ptr = part_ptr;
@@ -581,6 +632,7 @@ extern int schedule(uint32_t job_limit)
 		}
 	}
 
+	save_last_part_update = last_part_update;
 	FREE_NULL_BITMAP(avail_node_bitmap);
 	avail_node_bitmap = save_avail_node_bitmap;
 	xfree(failed_parts);
@@ -634,11 +686,6 @@ extern void launch_job(struct job_record *job_ptr)
 {
 	batch_job_launch_msg_t *launch_msg_ptr;
 	agent_arg_t *agent_arg_ptr;
-	struct node_record *node_ptr;
-
-	node_ptr = find_first_node_record(job_ptr->node_bitmap);
-	if (node_ptr == NULL)
-		return;
 
 	/* Initialization of data structures */
 	launch_msg_ptr = (batch_job_launch_msg_t *)
@@ -653,6 +700,7 @@ extern void launch_job(struct job_record *job_ptr)
 	launch_msg_ptr->open_mode  = job_ptr->details->open_mode;
 	launch_msg_ptr->acctg_freq = job_ptr->details->acctg_freq;
 	launch_msg_ptr->cpus_per_task = job_ptr->details->cpus_per_task;
+	launch_msg_ptr->pn_min_memory = job_ptr->details->pn_min_memory;
 	launch_msg_ptr->restart_cnt   = job_ptr->restart_cnt;
 
 	if (make_batch_job_cred(launch_msg_ptr, job_ptr)) {
@@ -701,7 +749,8 @@ extern void launch_job(struct job_record *job_ptr)
 	agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
 	agent_arg_ptr->node_count = 1;
 	agent_arg_ptr->retry = 0;
-	agent_arg_ptr->hostlist = hostlist_create(node_ptr->name);
+	xassert(job_ptr->batch_host);
+	agent_arg_ptr->hostlist = hostlist_create(job_ptr->batch_host);
 	agent_arg_ptr->msg_type = REQUEST_BATCH_JOB_LAUNCH;
 	agent_arg_ptr->msg_args = (void *) launch_msg_ptr;
 
@@ -739,7 +788,8 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 /*	cred_arg.step_gres_list      = NULL; */
 
 #ifdef HAVE_FRONT_END
-	cred_arg.step_hostlist       = node_record_table_ptr[0].name;
+	xassert(job_ptr->batch_host);
+	cred_arg.step_hostlist       = job_ptr->batch_host;
 #else
 	cred_arg.step_hostlist       = launch_msg_ptr->nodes;
 #endif
@@ -792,6 +842,8 @@ extern void print_job_dependency(struct job_record *job_ptr)
 			dep_str = "afternotok";
 		else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_OK)
 			dep_str = "afterok";
+		else if (dep_ptr->depend_type == SLURM_DEPEND_EXPAND)
+			dep_str = "expand";
 		else
 			dep_str = "unknown";
 		info("  %s:%u", dep_str, dep_ptr->job_id);
@@ -809,25 +861,29 @@ extern int test_job_dependency(struct job_record *job_ptr)
 {
 	ListIterator depend_iter, job_iterator;
 	struct depend_spec *dep_ptr;
-	bool failure = false, depends = false;
+	bool failure = false, depends = false, expands = false;
  	List job_queue = NULL;
- 	int now;
+ 	bool run_now;
+	int count = 0;
  	struct job_record *qjob_ptr;
 
 	if ((job_ptr->details == NULL) ||
 	    (job_ptr->details->depend_list == NULL))
 		return 0;
 
+	count = list_count(job_ptr->details->depend_list);
 	depend_iter = list_iterator_create(job_ptr->details->depend_list);
 	if (!depend_iter)
 		fatal("list_iterator_create memory allocation failure");
 	while ((dep_ptr = list_next(depend_iter))) {
+		bool clear_dep = false;
+		count--;
  		if ((dep_ptr->depend_type == SLURM_DEPEND_SINGLETON) &&
  		    job_ptr->name) {
  			/* get user jobs with the same user and name */
  			job_queue = _build_user_job_list(job_ptr->user_id,
 							 job_ptr->name);
- 			now = 1;
+ 			run_now = true;
 			job_iterator = list_iterator_create(job_queue);
 			if (job_iterator == NULL)
 				fatal("list_iterator_create malloc failure");
@@ -839,14 +895,14 @@ extern int test_job_dependency(struct job_record *job_ptr)
 				    IS_JOB_SUSPENDED(qjob_ptr) ||
 				    (IS_JOB_PENDING(qjob_ptr) &&
 				     (qjob_ptr->job_id < job_ptr->job_id))) {
-					now = 0;
+					run_now = false;
 					break;
  				}
  			}
 			list_iterator_destroy(job_iterator);
 			list_destroy(job_queue);
 			/* job can run now, delete dependency */
- 			if (now)
+ 			if (run_now)
  				list_delete_item(depend_iter);
  			else
 				depends = true;
@@ -854,38 +910,71 @@ extern int test_job_dependency(struct job_record *job_ptr)
 			   (dep_ptr->job_ptr->job_id != dep_ptr->job_id)) {
 			/* job is gone, dependency lifted */
 			list_delete_item(depend_iter);
+			clear_dep = true;
 		} else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER) {
-			if (!IS_JOB_PENDING(dep_ptr->job_ptr))
+			if (!IS_JOB_PENDING(dep_ptr->job_ptr)) {
 				list_delete_item(depend_iter);
-			else
+				clear_dep = true;
+			} else
 				depends = true;
 		} else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_ANY) {
-			if (IS_JOB_FINISHED(dep_ptr->job_ptr))
+			if (IS_JOB_FINISHED(dep_ptr->job_ptr)) {
 				list_delete_item(depend_iter);
-			else
+				clear_dep = true;
+			} else
 				depends = true;
 		} else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_NOT_OK) {
 			if (!IS_JOB_FINISHED(dep_ptr->job_ptr))
 				depends = true;
-			else if (!IS_JOB_COMPLETE(dep_ptr->job_ptr))
+			else if (!IS_JOB_COMPLETE(dep_ptr->job_ptr)) {
 				list_delete_item(depend_iter);
-			else {
+				clear_dep = true;
+			} else {
 				failure = true;
 				break;
 			}
 		} else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_OK) {
 			if (!IS_JOB_FINISHED(dep_ptr->job_ptr))
 				depends = true;
-			else if (IS_JOB_COMPLETE(dep_ptr->job_ptr))
+			else if (IS_JOB_COMPLETE(dep_ptr->job_ptr)) {
 				list_delete_item(depend_iter);
-			else {
+				clear_dep = true;
+			} else {
 				failure = true;
 				break;
 			}
+		} else if (dep_ptr->depend_type == SLURM_DEPEND_EXPAND) {
+			time_t now = time(NULL);
+			expands = true;
+			if (IS_JOB_PENDING(dep_ptr->job_ptr)) {
+				depends = true;
+			} else if (IS_JOB_FINISHED(dep_ptr->job_ptr)) {
+				failure = true;
+				break;
+			} else if ((dep_ptr->job_ptr->end_time != 0) &&
+				   (dep_ptr->job_ptr->end_time > now)) {
+				job_ptr->time_limit = dep_ptr->job_ptr->
+						      end_time - now;
+				job_ptr->time_limit /= 60;  /* sec to min */
+			}
+			if (job_ptr->details && dep_ptr->job_ptr->details) {
+				job_ptr->details->shared =
+					dep_ptr->job_ptr->details->shared;
+			}
 		} else
 			failure = true;
+		if (clear_dep) {
+ 			char *rmv_dep;
+ 			rmv_dep = xstrdup_printf(":%u",
+						 dep_ptr->job_ptr->job_id);
+			xstrsubstitute(job_ptr->details->dependency,
+				       rmv_dep, "");
+			xfree(rmv_dep);
+		}
 	}
 	list_iterator_destroy(depend_iter);
+	if (!depends && !expands && (count == 0))
+		xfree(job_ptr->details->dependency);
 
 	if (failure)
 		return 2;
@@ -912,11 +1001,13 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	struct depend_spec *dep_ptr;
 	struct job_record *dep_job_ptr;
 	char dep_buf[32];
+	bool expand_cnt = 0;
 
 	if (job_ptr->details == NULL)
 		return EINVAL;
 
 	/* Clear dependencies on NULL, "0", or empty dependency input */
+	job_ptr->details->expanding_jobid = 0;
 	if ((new_depend == NULL) || (new_depend[0] == '\0') ||
 	    ((new_depend[0] == '0') && (new_depend[1] == '\0'))) {
 		xfree(job_ptr->details->dependency);
@@ -989,7 +1080,13 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 			depend_type = SLURM_DEPEND_AFTER_OK;
 		else if (strncasecmp(tok, "after", 5) == 0)
 			depend_type = SLURM_DEPEND_AFTER;
-		else {
+		else if (strncasecmp(tok, "expand", 6) == 0) {
+			if (!select_g_job_expand_allow()) {
+				rc = ESLURM_DEPENDENCY;
+				break;
+			}
+			depend_type = SLURM_DEPEND_EXPAND;
+		} else {
 			rc = ESLURM_DEPENDENCY;
 			break;
 		}
@@ -1004,6 +1101,29 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 				break;
 			}
 			dep_job_ptr = find_job_record(job_id);
+			if ((depend_type == SLURM_DEPEND_EXPAND) &&
+			    ((expand_cnt++ > 0) || (dep_job_ptr == NULL) ||
+			     (!IS_JOB_RUNNING(dep_job_ptr))              ||
+			     (dep_job_ptr->qos_id != job_ptr->qos_id)    ||
+			     (dep_job_ptr->part_ptr == NULL)             ||
+			     (job_ptr->part_ptr     == NULL)             ||
+			     (dep_job_ptr->part_ptr != job_ptr->part_ptr))) {
+				/* Expand only jobs in the same QOS and
+				 * and partition */
+				rc = ESLURM_DEPENDENCY;
+				break;
+			}
+			if (depend_type == SLURM_DEPEND_EXPAND) {
+				job_ptr->details->expanding_jobid = job_id;
+				/* GRES configuration of this job must match
+				 * the job being expanded */
+				xfree(job_ptr->gres);
+				job_ptr->gres = xstrdup(dep_job_ptr->gres);
+				if (job_ptr->gres_list)
+					list_destroy(job_ptr->gres_list);
+				gres_plugin_job_state_validate(job_ptr->gres,
+						&job_ptr->gres_list);
+			}
 			if (dep_job_ptr) {	/* job still active */
 				dep_ptr = xmalloc(sizeof(struct depend_spec));
 				dep_ptr->depend_type = depend_type;
@@ -1083,6 +1203,59 @@ static void _pre_list_del(void *x)
 	xfree(x);
 }
 
+/* If there are higher priority queued jobs in this job's partition, then
+ * delay the job's expected initiation time as needed to run those jobs.
+ * NOTE: This is only a rough estimate of the job's start time as it ignores
+ * job dependencies, feature requirements, specific node requirements, etc. */
+static void _delayed_job_start_time(struct job_record *job_ptr)
+{
+	uint32_t part_node_cnt, part_cpu_cnt, part_cpus_per_node;
+	uint32_t job_size_cpus, job_size_nodes, job_time;
+	uint64_t cume_space_time = 0;
+	struct job_record *job_q_ptr;
+	ListIterator job_iterator;
+
+	if (job_ptr->part_ptr == NULL)
+		return;
+	part_node_cnt = job_ptr->part_ptr->total_nodes;
+	part_cpu_cnt  = job_ptr->part_ptr->total_cpus;
+	if (part_node_cnt > part_cpu_cnt)
+		part_cpus_per_node = part_node_cnt / part_cpu_cnt;
+	else
+		part_cpus_per_node = 1;
+
+	job_iterator = list_iterator_create(job_list);
+	if (job_iterator == NULL)
+		fatal("list_iterator_create memory allocation failure");
+	while ((job_q_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!IS_JOB_PENDING(job_q_ptr) || !job_q_ptr->details ||
+		    (job_q_ptr->part_ptr != job_ptr->part_ptr) ||
+		    (job_q_ptr->priority < job_ptr->priority))
+			continue;
+		if (job_q_ptr->details->min_nodes == NO_VAL)
+			job_size_nodes = 1;
+		else
+			job_size_nodes = job_q_ptr->details->min_nodes;
+		if (job_q_ptr->details->min_cpus == NO_VAL)
+			job_size_cpus = 1;
+		else
+			job_size_cpus = job_q_ptr->details->min_nodes;
+		job_size_cpus = MAX(job_size_cpus,
+				    (job_size_nodes * part_cpus_per_node));
+		if (job_ptr->time_limit == NO_VAL)
+			job_time = job_q_ptr->part_ptr->max_time;
+		else
+			job_time = job_q_ptr->time_limit;
+		cume_space_time += job_size_cpus * job_time;
+	}
+	list_iterator_destroy(job_iterator);
+	cume_space_time /= part_cpu_cnt;/* Factor out size */
+	cume_space_time *= 60;		/* Minutes to seconds */
+	debug2("Increasing estimated start of job %u by %"PRIu64" secs",
+	       job_ptr->job_id, cume_space_time);
+	job_ptr->start_time += cume_space_time;
+}
+
 /* Determine if a pending job will run using only the specified nodes
  * (in job_desc_msg->req_nodes), build response message and return
  * SLURM_SUCCESS on success. Otherwise return an error code. Caller
@@ -1197,6 +1370,7 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 #else
 		resp_data->proc_cnt = job_ptr->total_cpus;
 #endif
+		_delayed_job_start_time(job_ptr);
 		resp_data->start_time = MAX(job_ptr->start_time,
 					    orig_start_time);
 		resp_data->start_time = MAX(resp_data->start_time, start_res);
@@ -1288,12 +1462,6 @@ static char **_build_env(struct job_record *job_ptr)
 				(const char **) job_ptr->spank_job_env);
 	}
 
-#ifdef HAVE_CRAY
-	name = select_g_select_jobinfo_xstrdup(job_ptr->select_jobinfo,
-						SELECT_PRINT_RESV_ID);
-	setenvf(&my_env, "BASIL_RESERVATION_ID", "%s", name);
-	xfree(name);
-#endif
 #ifdef HAVE_BG
 	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 				    SELECT_JOBDATA_BLOCK_ID, &name);
@@ -1312,6 +1480,11 @@ static char **_build_env(struct job_record *job_ptr)
 	}
 # endif
 	xfree(name);
+#elif defined HAVE_CRAY
+	name = select_g_select_jobinfo_xstrdup(job_ptr->select_jobinfo,
+						SELECT_PRINT_RESV_ID);
+	setenvf(&my_env, "BASIL_RESERVATION_ID", "%s", name);
+	xfree(name);
 #endif
 	setenvf(&my_env, "SLURM_JOB_ACCOUNT", "%s", job_ptr->account);
 	if (job_ptr->details) {
@@ -1386,7 +1559,7 @@ static void *_run_epilog(void *arg)
 		error("epilog_slurmctld job %u epilog exit status %u:%u",
 		      job_id, WEXITSTATUS(status), WTERMSIG(status));
 	} else
-		debug2("epilog_slurmctld job %u prolog completed", job_id);
+		debug2("epilog_slurmctld job %u epilog completed", job_id);
 
  fini:	xfree(argv[0]);
 	for (i=0; my_env[i]; i++)
@@ -1505,7 +1678,7 @@ static void *_run_prolog(void *arg)
 			     job_id);
 			kill_job = true;
 		} else if ((rc = job_requeue(0, job_id, -1,
-					     (uint16_t)NO_VAL))) {
+					     (uint16_t)NO_VAL, false))) {
 			info("unable to requeue job %u: %m", job_id);
 			kill_job = true;
 		} else
@@ -1513,7 +1686,7 @@ static void *_run_prolog(void *arg)
 		if (kill_job) {
 			srun_user_message(job_ptr,
 					  "PrologSlurmctld failed, job killed");
-			(void) job_signal(job_id, SIGKILL, 0, 0);
+			(void) job_signal(job_id, SIGKILL, 0, 0, false);
 		}
 
 		unlock_slurmctld(job_write_lock);
@@ -1568,7 +1741,7 @@ static void *_run_prolog(void *arg)
 extern int build_feature_list(struct job_record *job_ptr)
 {
 	struct job_details *detail_ptr = job_ptr->details;
-	char *tmp_requested, *str_ptr1, *str_ptr2, *feature = NULL;
+	char *tmp_requested, *str_ptr, *feature = NULL;
 	int bracket = 0, count = 0, i;
 	bool have_count = false, have_or = false;
 	struct feature_record *feat;
@@ -1579,20 +1752,19 @@ extern int build_feature_list(struct job_record *job_ptr)
 		return SLURM_SUCCESS;
 
 	tmp_requested = xstrdup(detail_ptr->features);
-	str_ptr1 = tmp_requested;
 	detail_ptr->feature_list = list_create(_feature_list_delete);
 	for (i=0; ; i++) {
 		if (tmp_requested[i] == '*') {
 			tmp_requested[i] = '\0';
 			have_count = true;
-			count = strtol(&tmp_requested[i+1], &str_ptr2, 10);
+			count = strtol(&tmp_requested[i+1], &str_ptr, 10);
 			if ((feature == NULL) || (count <= 0)) {
 				info("Job %u invalid constraint %s",
 					job_ptr->job_id, detail_ptr->features);
 				xfree(tmp_requested);
 				return ESLURM_INVALID_FEATURE;
 			}
-			i = str_ptr2 - tmp_requested - 1;
+			i = str_ptr - tmp_requested - 1;
 		} else if (tmp_requested[i] == '&') {
 			tmp_requested[i] = '\0';
 			if ((feature == NULL) || (bracket != 0)) {
diff --git a/src/slurmctld/job_scheduler.h b/src/slurmctld/job_scheduler.h
index 2b968cc1a..0ddd1f7d0 100644
--- a/src/slurmctld/job_scheduler.h
+++ b/src/slurmctld/job_scheduler.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/job_submit.c b/src/slurmctld/job_submit.c
index 493bef05c..7ded61704 100644
--- a/src/slurmctld/job_submit.c
+++ b/src/slurmctld/job_submit.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,10 +60,10 @@
 #  include <stdint.h>
 #  include <string.h>
 #endif /* HAVE_CONFIG_H */
-
 #include <stdio.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
 
 #include "src/common/macros.h"
 #include "src/common/plugin.h"
diff --git a/src/slurmctld/job_submit.h b/src/slurmctld/job_submit.h
index 70110ba5f..c05c606e8 100644
--- a/src/slurmctld/job_submit.h
+++ b/src/slurmctld/job_submit.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,7 @@
 #ifndef _JOB_SUBMIT_H
 #define _JOB_SUBMIT_H
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 /*
  * Initialize the job submit plugin.
diff --git a/src/slurmctld/licenses.c b/src/slurmctld/licenses.c
index 8f8d53fd6..53b9e027f 100644
--- a/src/slurmctld/licenses.c
+++ b/src/slurmctld/licenses.c
@@ -1,13 +1,13 @@
 /*****************************************************************************\
  *  licenses.c - Functions for handling cluster-wide consumable resources
  *****************************************************************************
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,10 +39,11 @@
 #include <ctype.h>
 #include <errno.h>
 #include <pthread.h>
-#include <slurm/slurm_errno.h>
 #include <stdlib.h>
 #include <string.h>
 
+#include "slurm/slurm_errno.h"
+
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
@@ -121,25 +122,32 @@ static List _build_license_list(char *licenses, bool *valid)
 	tmp_str = xstrdup(licenses);
 	token = strtok_r(tmp_str, ",;", &last);
 	while (token && *valid) {
-		uint16_t num = 1;
-		for (i=0; token[i]; i++) {
+		uint32_t num = 1;
+		for (i = 0; token[i]; i++) {
 			if (isspace(token[i])) {
 				*valid = false;
 				break;
 			}
 			if (token[i] == '*') {
 				token[i++] = '\0';
-				num = (uint16_t)strtol(&token[i], &end_num,10);
+				num = (uint32_t)strtol(&token[i], &end_num,10);
 			}
 		}
 		if (num <= 0) {
 			*valid = false;
 			break;
 		}
-		license_entry = xmalloc(sizeof(licenses_t));
-		license_entry->name = xstrdup(token);
-		license_entry->total = num;
-		list_push(lic_list, license_entry);
+
+		license_entry = list_find_first(lic_list, _license_find_rec,
+						token);
+		if (license_entry) {
+			license_entry->total += num;
+		} else {
+			license_entry = xmalloc(sizeof(licenses_t));
+			license_entry->name = xstrdup(token);
+			license_entry->total = num;
+			list_push(lic_list, license_entry);
+		}
 		token = strtok_r(NULL, ",;", &last);
 	}
 	xfree(tmp_str);
@@ -151,6 +159,36 @@ static List _build_license_list(char *licenses, bool *valid)
 	return lic_list;
 }
 
+/* Given a list of license_t records, return a license string.
+ * This can be combined with _build_license_list() to eliminate duplicates
+ * (e.g. "tux*2,tux*3" gets changed to "tux*5"). */
+static char * _build_license_string(List license_list)
+{
+	char buf[128], *sep;
+	char *licenses = NULL;
+	ListIterator iter;
+	licenses_t *license_entry;
+
+	if (!license_list)
+		return licenses;
+
+	iter = list_iterator_create(license_list);
+	if (iter == NULL)
+		fatal("malloc failure from list_iterator_create");
+	while ((license_entry = (licenses_t *) list_next(iter))) {
+		if (licenses)
+			sep = ",";
+		else
+			sep = "";
+		snprintf(buf, sizeof(buf), "%s%s*%u", sep, license_entry->name,
+			 license_entry->total);
+		xstrcat(licenses, buf);
+	}
+	list_iterator_destroy(iter);
+
+	return licenses;
+}
+
 /* Initialize licenses on this system based upon slurm.conf */
 extern int license_init(char *licenses)
 {
@@ -262,7 +300,7 @@ extern List license_validate(char *licenses, bool *valid)
 			break;
 		} else if (license_entry->total > match->total) {
 			debug("job wants more %s licenses than configured",
-			     match->name);
+			      match->name);
 			*valid = false;
 			break;
 		}
@@ -277,6 +315,22 @@ extern List license_validate(char *licenses, bool *valid)
 	return job_license_list;
 }
 
+/*
+ * license_job_merge - The licenses from one job have just been merged into
+ *	another job by appending one job's licenses to another, possibly
+ *	including duplicate names. Reconstruct this job's licenses and
+ *	license_list fields to eliminate duplicates.
+ */
+extern void license_job_merge(struct job_record *job_ptr)
+{
+	bool valid;
+
+	FREE_NULL_LIST(job_ptr->license_list);
+	job_ptr->license_list = _build_license_list(job_ptr->licenses, &valid);
+	xfree(job_ptr->licenses);
+	job_ptr->licenses = _build_license_string(job_ptr->license_list);
+}
+
 /*
  * license_job_test - Test if the licenses required for a job are available
  * IN job_ptr - job identification
diff --git a/src/slurmctld/licenses.h b/src/slurmctld/licenses.h
index 511be695d..f8fa9f53a 100644
--- a/src/slurmctld/licenses.h
+++ b/src/slurmctld/licenses.h
@@ -1,13 +1,13 @@
 /*****************************************************************************\
  *  licenses.h - Definitions for handling cluster-wide consumable resources
  *****************************************************************************
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,8 +44,8 @@
 
 typedef struct licenses {
 	char *		name;		/* name associated with a license */
-	uint16_t	total;		/* total license configued */
-	uint16_t	used;		/* used licenses */
+	uint32_t	total;		/* total license configued */
+	uint32_t	used;		/* used licenses */
 } licenses_t;
 
 extern List license_list;
@@ -71,6 +71,14 @@ extern void license_free_rec(void *x);
  */
 extern int license_job_get(struct job_record *job_ptr);
 
+/*
+ * license_job_merge - The licenses from one job have just been merged into
+ *	another job by appending one job's licenses to another, possibly
+ *	including duplicate names. Reconstruct this job's licenses and
+ *	license_list fields to eliminate duplicates.
+ */
+extern void license_job_merge(struct job_record *job_ptr);
+
 /*
  * license_job_return - Return the licenses allocated to a job
  * IN job_ptr - job identification
diff --git a/src/slurmctld/locks.c b/src/slurmctld/locks.c
index 34ed56812..a54e356e2 100644
--- a/src/slurmctld/locks.c
+++ b/src/slurmctld/locks.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/locks.h b/src/slurmctld/locks.h
index 90f678d1e..0d8f38eab 100644
--- a/src/slurmctld/locks.h
+++ b/src/slurmctld/locks.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 4e40d76b2..1763dedc2 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -66,14 +66,16 @@
 #include "src/common/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/ping_nodes.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/state_save.h"
+#include "src/common/timers.h"
 #include "src/slurmctld/trigger_mgr.h"
-#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 #define _DEBUG		0
 #define MAX_RETRIES	10
@@ -93,9 +95,10 @@ bitstr_t *up_node_bitmap    = NULL;  	/* bitmap of non-down nodes */
 
 static void 	_dump_node_state (struct node_record *dump_node_ptr,
 				  Buf buffer);
+static front_end_record_t * _front_end_reg(
+				slurm_node_registration_status_msg_t *reg_msg);
 static void 	_make_node_down(struct node_record *node_ptr,
 				time_t event_time);
-static void	_node_did_resp(struct node_record *node_ptr);
 static bool	_node_is_hidden(struct node_record *node_ptr);
 static int	_open_node_state_file(char **state_file);
 static void 	_pack_node (struct node_record *dump_node_ptr, Buf buffer,
@@ -107,9 +110,6 @@ static int	_update_node_features(char *node_names, char *features);
 static int	_update_node_gres(char *node_names, char *gres);
 static int	_update_node_weight(char *node_names, uint32_t weight);
 static bool 	_valid_node_state_change(uint16_t old, uint16_t new);
-#ifndef HAVE_FRONT_END
-static void	_node_not_resp (struct node_record *node_ptr, time_t msg_time);
-#endif
 
 
 /* dump_all_node_state - save the state of all nodes to file */
@@ -430,6 +430,14 @@ extern int load_all_node_state ( bool state_only )
 						hostset_insert(hs, node_name);
 					else
 						hs = hostset_create(node_name);
+					/* Recover hardware state for powered
+					 * down nodes */
+					node_ptr->cpus          = cpus;
+					node_ptr->sockets       = sockets;
+					node_ptr->cores         = cores;
+					node_ptr->threads       = threads;
+					node_ptr->real_memory   = real_memory;
+					node_ptr->tmp_disk      = tmp_disk;
 				}
 				if (node_state & NODE_STATE_POWER_UP) {
 					if (power_save_mode) {
@@ -487,11 +495,9 @@ extern int load_all_node_state ( bool state_only )
 			node_ptr->last_idle	= now;
 		}
 
-		if (node_ptr) {
-			select_g_update_node_state(
-				(node_ptr - node_record_table_ptr),
-				node_ptr->node_state);
-		}
+		if (node_ptr)
+			select_g_update_node_state(node_ptr);
+
 		xfree(features);
 		xfree(gres);
 		if (gres_list) {
@@ -585,8 +591,8 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 	buffer = init_buf (BUF_SIZE*16);
 	nodes_packed = 0;
 
-	if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
-		/* write header: version and time */
+	if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+		/* write header: count and time */
 		pack32(nodes_packed, buffer);
 		select_g_alter_node_cnt(SELECT_GET_NODE_SCALING,
 					&node_scaling);
@@ -653,8 +659,10 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 			uint16_t protocol_version)
 {
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		packstr (dump_node_ptr->name, buffer);
+		packstr (dump_node_ptr->node_hostname, buffer);
+		packstr (dump_node_ptr->comm_name, buffer);
 		pack16  (dump_node_ptr->node_state, buffer);
 		if (slurmctld_conf.fast_schedule) {
 			/* Only data from config_record used for scheduling */
@@ -691,7 +699,45 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 			packstr(dump_node_ptr->config_ptr->gres, buffer);
 		packstr(dump_node_ptr->os, buffer);
 		packstr(dump_node_ptr->reason, buffer);
-	} else if(protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		packstr (dump_node_ptr->name, buffer);
+		pack16  (dump_node_ptr->node_state, buffer);
+		if (slurmctld_conf.fast_schedule) {
+			/* Only data from config_record used for scheduling */
+			pack16(dump_node_ptr->config_ptr->cpus, buffer);
+			pack16(dump_node_ptr->config_ptr->sockets, buffer);
+			pack16(dump_node_ptr->config_ptr->cores, buffer);
+			pack16(dump_node_ptr->config_ptr->threads, buffer);
+			pack32(dump_node_ptr->config_ptr->real_memory, buffer);
+			pack32(dump_node_ptr->config_ptr->tmp_disk, buffer);
+		} else {
+			/* Individual node data used for scheduling */
+			pack16(dump_node_ptr->cpus, buffer);
+			pack16(dump_node_ptr->sockets, buffer);
+			pack16(dump_node_ptr->cores, buffer);
+			pack16(dump_node_ptr->threads, buffer);
+			pack32(dump_node_ptr->real_memory, buffer);
+			pack32(dump_node_ptr->tmp_disk, buffer);
+		}
+		pack32(dump_node_ptr->config_ptr->weight, buffer);
+		pack32(dump_node_ptr->reason_uid, buffer);
+
+		pack_time(dump_node_ptr->boot_time, buffer);
+		pack_time(dump_node_ptr->reason_time, buffer);
+		pack_time(dump_node_ptr->slurmd_start_time, buffer);
+
+		select_g_select_nodeinfo_pack(dump_node_ptr->select_nodeinfo,
+					      buffer, protocol_version);
+
+		packstr(dump_node_ptr->arch, buffer);
+		packstr(dump_node_ptr->features, buffer);
+		if (dump_node_ptr->gres)
+			packstr(dump_node_ptr->gres, buffer);
+		else
+			packstr(dump_node_ptr->config_ptr->gres, buffer);
+		packstr(dump_node_ptr->os, buffer);
+		packstr(dump_node_ptr->reason, buffer);
+	} else if (protocol_version >= SLURM_2_1_PROTOCOL_VERSION) {
 		packstr (dump_node_ptr->name, buffer);
 		pack16  (dump_node_ptr->node_state, buffer);
 		if (slurmctld_conf.fast_schedule) {
@@ -734,6 +780,7 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
  */
 void set_slurmd_addr (void)
 {
+#ifndef HAVE_FRONT_END
 	int i;
 	struct node_record *node_ptr = node_record_table_ptr;
 	DEF_TIMERS;
@@ -757,7 +804,7 @@ void set_slurmd_addr (void)
 	}
 
 	END_TIMER2("set_slurmd_addr");
-	return;
+#endif
 }
 
 /*
@@ -965,8 +1012,7 @@ int update_node ( update_node_msg_t * update_node_msg )
 				node_ptr->node_state = state_val |
 						(node_ptr->node_state &
 						 NODE_STATE_FLAGS);
-				select_g_update_node_state(node_inx,
-							   state_val);
+				select_g_update_node_state(node_ptr);
 
 				info ("update_node: node %s state set to %s",
 					this_node_name,
@@ -976,9 +1022,11 @@ int update_node ( update_node_msg_t * update_node_msg )
 
 		if (!IS_NODE_DOWN(node_ptr) &&
 		    !IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr)) {
-			xfree(node_ptr->reason);
-			node_ptr->reason_time = 0;
-			node_ptr->reason_uid = NO_VAL;
+			/* reason information is handled in
+			   clusteracct_storage_g_node_up()
+			*/
+			clusteracct_storage_g_node_up(
+				acct_db_conn, node_ptr, now);
 		}
 
 		free (this_node_name);
@@ -1399,7 +1447,7 @@ extern int drain_nodes ( char *nodes, char *reason, uint32_t reason_uid )
 							reason_uid);
 		}
 
-		select_g_update_node_state(node_inx, node_ptr->node_state);
+		select_g_update_node_state(node_ptr);
 
 		free (this_node_name);
 	}
@@ -1412,6 +1460,7 @@ extern int drain_nodes ( char *nodes, char *reason, uint32_t reason_uid )
 static bool _valid_node_state_change(uint16_t old, uint16_t new)
 {
 	uint16_t base_state, node_flags;
+
 	if (old == new)
 		return true;
 
@@ -1425,11 +1474,8 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new)
 		case NODE_STATE_POWER_SAVE:
 		case NODE_STATE_POWER_UP:
 			return true;
-			break;
 
 		case NODE_RESUME:
-			if (base_state == NODE_STATE_UNKNOWN)
-				return false;
 			if ((base_state == NODE_STATE_DOWN)   ||
 			    (base_state == NODE_STATE_FUTURE) ||
 			    (node_flags & NODE_STATE_DRAIN)   ||
@@ -1619,20 +1665,17 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 		last_node_update = time (NULL);
 	} else if (reg_msg->status == ESLURMD_PROLOG_FAILED) {
 		if (!IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr)) {
-#ifdef HAVE_BG
-			info("Prolog failure on node %s", reg_msg->node_name);
-#else
-			error("Prolog failure on node %s, state to DOWN",
-				reg_msg->node_name);
+			error("Prolog failure on node %s, setting state DOWN",
+			      reg_msg->node_name);
 			set_node_down(reg_msg->node_name, "Prolog failed");
 			last_node_update = time (NULL);
-#endif
 		}
 	} else {
 		if (IS_NODE_UNKNOWN(node_ptr)) {
 			reset_job_priority();
-			debug("validate_node_specs: node %s has registered",
-				reg_msg->node_name);
+			debug("validate_node_specs: node %s registered with "
+			      "%u jobs",
+			      reg_msg->node_name,reg_msg->job_count);
 			if (reg_msg->job_count) {
 				node_ptr->node_state = NODE_STATE_ALLOCATED |
 					node_flags;
@@ -1644,9 +1687,9 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 			last_node_update = now;
 			if (!IS_NODE_DRAIN(node_ptr)
 			    && !IS_NODE_FAIL(node_ptr)) {
-				xfree(node_ptr->reason);
-				node_ptr->reason_time = 0;
-				node_ptr->reason_uid = NO_VAL;
+				/* reason information is handled in
+				   clusteracct_storage_g_node_up()
+				*/
 				clusteracct_storage_g_node_up(
 					acct_db_conn, node_ptr, now);
 			}
@@ -1671,25 +1714,25 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 			last_node_update = now;
 			if (!IS_NODE_DRAIN(node_ptr)
 			    && !IS_NODE_FAIL(node_ptr)) {
-				xfree(node_ptr->reason);
-				node_ptr->reason_time = 0;
-				node_ptr->reason_uid = NO_VAL;
+				/* reason information is handled in
+				   clusteracct_storage_g_node_up()
+				*/
 				clusteracct_storage_g_node_up(
 					acct_db_conn, node_ptr, now);
 			}
 		} else if (node_ptr->last_response
 			   && (node_ptr->boot_time > node_ptr->last_response)
 			   && (slurmctld_conf.ret2service != 2)) {
-			if(!node_ptr->reason) {
+			if (!node_ptr->reason) {
 				node_ptr->reason_time = now;
 				node_ptr->reason_uid =
 					slurm_get_slurm_user_id();
 				node_ptr->reason = xstrdup(
-					"Node silently failed and came back");
+					"Node unexpectedly rebooted");
 			}
-			info("Node %s silently failed and came back",
+			info("Node %s unexpectedly rebooted",
 			     reg_msg->node_name);
-			_make_node_down(node_ptr, last_node_update);
+			_make_node_down(node_ptr, now);
 			kill_running_job_by_node_name(reg_msg->node_name);
 			last_node_update = now;
 			reg_msg->job_count = 0;
@@ -1725,7 +1768,7 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 		}
 
 		select_g_update_node_config(node_inx);
-		select_g_update_node_state(node_inx, node_ptr->node_state);
+		select_g_update_node_state(node_ptr);
 		_sync_bitmaps(node_ptr, reg_msg->job_count);
 	}
 
@@ -1734,6 +1777,59 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	return error_code;
 }
 
+static front_end_record_t * _front_end_reg(
+		slurm_node_registration_status_msg_t *reg_msg)
+{
+	front_end_record_t *front_end_ptr;
+	uint16_t state_base, state_flags;
+	time_t now = time(NULL);
+
+	debug2("name:%s boot_time:%u up_time:%u",
+	       reg_msg->node_name, (unsigned int) reg_msg->slurmd_start_time,
+	       reg_msg->up_time);
+
+	front_end_ptr = find_front_end_record(reg_msg->node_name);
+	if (front_end_ptr == NULL) {
+		error("Registration message from unknown node %s",
+		      reg_msg->node_name);
+		return NULL;
+	}
+
+	front_end_ptr->boot_time = now - reg_msg->up_time;
+	if (front_end_ptr->last_response &&
+	    (front_end_ptr->boot_time > front_end_ptr->last_response) &&
+	    (slurmctld_conf.ret2service != 2)) {
+		set_front_end_down(front_end_ptr,
+				   "Front end unexpectedly rebooted");
+		info("Front end %s unexpectedly rebooted",
+		     reg_msg->node_name);
+		reg_msg->job_count = 0;
+	}
+
+	front_end_ptr->last_response = now;
+	front_end_ptr->slurmd_start_time = reg_msg->slurmd_start_time;
+	state_base  = front_end_ptr->node_state & JOB_STATE_BASE;
+	state_flags = front_end_ptr->node_state & JOB_STATE_FLAGS;
+	if ((state_base == NODE_STATE_DOWN) &&
+	    (!strncmp(front_end_ptr->reason, "Not responding", 14))) {
+		info("FrontEnd node %s returned to service",
+		     reg_msg->node_name);
+		state_base = NODE_STATE_IDLE;
+		xfree(front_end_ptr->reason);
+		front_end_ptr->reason_time = (time_t) 0;
+		front_end_ptr->reason_uid = 0;
+	}
+	if (state_base == NODE_STATE_UNKNOWN)
+		state_base = NODE_STATE_IDLE;
+#ifndef HAVE_CRAY
+	/* This is handled by the select/cray plugin */
+	state_flags &= (~NODE_STATE_NO_RESPOND);
+#endif
+	front_end_ptr->node_state = state_base | state_flags;
+	last_front_end_update = now;
+	return front_end_ptr;
+}
+
 /*
  * validate_nodes_via_front_end - validate all nodes on a cluster as having
  *	a valid configuration as soon as the front-end registers. Individual
@@ -1745,30 +1841,37 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 extern int validate_nodes_via_front_end(
 		slurm_node_registration_status_msg_t *reg_msg)
 {
-	int error_code = 0, i, jobs_on_node;
+	int error_code = 0, i, j;
 	bool update_node_state = false;
-#ifdef HAVE_BG
-	bool failure_logged = false;
-#endif
 	struct job_record *job_ptr;
 	struct config_record *config_ptr;
 	struct node_record *node_ptr;
 	time_t now = time(NULL);
 	ListIterator job_iterator;
-	hostlist_t return_hostlist = NULL, reg_hostlist = NULL;
-	hostlist_t prolog_hostlist = NULL;
+	hostlist_t reg_hostlist = NULL;
 	char *host_str = NULL;
 	uint16_t node_flags;
+	front_end_record_t *front_end_ptr;
 
 	if (reg_msg->up_time > now) {
-		error("Node up_time is invalid: %u>%u", reg_msg->up_time,
-		      (uint32_t) now);
+		error("Node up_time on %s is invalid: %u>%u",
+		      reg_msg->node_name, reg_msg->up_time, (uint32_t) now);
 		reg_msg->up_time = 0;
 	}
 
+	front_end_ptr = _front_end_reg(reg_msg);
+	if (front_end_ptr == NULL)
+		return ESLURM_INVALID_NODE_NAME;
+
+	if (reg_msg->status == ESLURMD_PROLOG_FAILED) {
+		error("Prolog failed on node %s", reg_msg->node_name);
+		/* Do NOT set the node DOWN here. Unlike non-front-end systems,
+		 * this failure is likely due to some problem in the underlying
+		 * infrastructure (e.g. the block failed to boot). */
+		/* set_front_end_down(front_end_ptr, "Prolog failed"); */
+	}
+
 	/* First validate the job info */
-	node_ptr = node_record_table_ptr;	/* All msg send to node zero,
-				 * the front-end for the whole cluster */
 	for (i = 0; i < reg_msg->job_count; i++) {
 		if ( (reg_msg->job_id[i] >= MIN_NOALLOC_JOBID) &&
 		     (reg_msg->job_id[i] <= MAX_NOALLOC_JOBID) ) {
@@ -1778,17 +1881,29 @@ extern int validate_nodes_via_front_end(
 		}
 
 		job_ptr = find_job_record(reg_msg->job_id[i]);
+		node_ptr = node_record_table_ptr;
+		if (job_ptr && job_ptr->node_bitmap &&
+		    ((j = bit_ffs(job_ptr->node_bitmap)) >= 0))
+			node_ptr += j;
+
 		if (job_ptr == NULL) {
-			error("Orphan job %u.%u reported",
-			      reg_msg->job_id[i], reg_msg->step_id[i]);
+			error("Orphan job %u.%u reported on %s",
+			      reg_msg->job_id[i], reg_msg->step_id[i],
+			      front_end_ptr->name);
 			abort_job_on_node(reg_msg->job_id[i],
-					  job_ptr, node_ptr);
+					  job_ptr, front_end_ptr->name);
+			continue;
+		} else if (job_ptr->batch_host == NULL) {
+			error("Resetting NULL batch_host of job %u to %s",
+			      reg_msg->job_id[i], front_end_ptr->name);
+			job_ptr->batch_host = xstrdup(front_end_ptr->name);
 		}
 
-		else if (IS_JOB_RUNNING(job_ptr) ||
-			 IS_JOB_SUSPENDED(job_ptr)) {
-			debug3("Registered job %u.%u",
-			       reg_msg->job_id[i], reg_msg->step_id[i]);
+
+		if (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr)) {
+			debug3("Registered job %u.%u on %s",
+			       reg_msg->job_id[i], reg_msg->step_id[i],
+			       front_end_ptr->name);
 			if (job_ptr->batch_flag) {
 				/* NOTE: Used for purging defunct batch jobs */
 				job_ptr->time_last_active = now;
@@ -1802,25 +1917,32 @@ extern int validate_nodes_via_front_end(
 					 node_ptr);
 		}
 
-
 		else if (IS_JOB_PENDING(job_ptr)) {
 			/* Typically indicates a job requeue and the hung
 			 * slurmd that went DOWN is now responding */
-			error("Registered PENDING job %u.%u",
-				reg_msg->job_id[i], reg_msg->step_id[i]);
+			error("Registered PENDING job %u.%u on %s",
+			      reg_msg->job_id[i], reg_msg->step_id[i],
+			      front_end_ptr->name);
 			abort_job_on_node(reg_msg->job_id[i], job_ptr,
-					  node_ptr);
+					  front_end_ptr->name);
 		}
 
 		else {		/* else job is supposed to be done */
-			error("Registered job %u.%u in state %s",
+			error("Registered job %u.%u in state %s on %s",
 				reg_msg->job_id[i], reg_msg->step_id[i],
-				job_state_string(job_ptr->job_state));
+				job_state_string(job_ptr->job_state),
+			        front_end_ptr->name);
 			kill_job_on_node(reg_msg->job_id[i], job_ptr,
 					 node_ptr);
 		}
 	}
 
+	if (reg_msg->job_count == 0) {
+		front_end_ptr->job_cnt_comp = 0;
+		front_end_ptr->node_state &= (~NODE_STATE_COMPLETING);
+	} else if (front_end_ptr->job_cnt_comp != 0)
+		front_end_ptr->node_state |= NODE_STATE_COMPLETING;
+
 	/* purge orphan batch jobs */
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
@@ -1828,12 +1950,13 @@ extern int validate_nodes_via_front_end(
 		    IS_JOB_CONFIGURING(job_ptr) ||
 		    (job_ptr->batch_flag == 0))
 			continue;
+		if (job_ptr->front_end_ptr != front_end_ptr)
+			continue;
 #ifdef HAVE_BG
-		    /* slurmd does not report job presence until after prolog
-		     * completes which waits for bgblock boot to complete.
-		     * This can take several minutes on BlueGene. */
+		/* slurmd does not report job presence until after prolog
+		 * completes which waits for bgblock boot to complete.
+		 * This can take several minutes on BlueGene. */
 		if (difftime(now, job_ptr->time_last_active) <=
-
 		    (BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
 		     BG_INCR_BLOCK_BOOT * job_ptr->node_cnt))
 			continue;
@@ -1841,7 +1964,6 @@ extern int validate_nodes_via_front_end(
 		if (difftime(now, job_ptr->time_last_active) <= 5)
 			continue;
 #endif
-
 		info("Killing orphan batch job %u", job_ptr->job_id);
 		job_complete(job_ptr->job_id, 0, false, false, 0);
 	}
@@ -1849,11 +1971,10 @@ extern int validate_nodes_via_front_end(
 
 	(void) gres_plugin_node_config_unpack(reg_msg->gres_info,
 					      node_record_table_ptr->name);
-	for (i=0; i<node_record_count; i++) {
-		node_ptr = &node_record_table_ptr[i];
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
 		config_ptr = node_ptr->config_ptr;
-		jobs_on_node = node_ptr->run_job_cnt + node_ptr->comp_job_cnt;
-		node_ptr->last_response = time (NULL);
+		node_ptr->last_response = now;
 
 		(void) gres_plugin_node_config_validate(node_ptr->name,
 							config_ptr->gres,
@@ -1872,31 +1993,14 @@ extern int validate_nodes_via_front_end(
 
 		if (IS_NODE_NO_RESPOND(node_ptr)) {
 			update_node_state = true;
+#ifndef HAVE_CRAY
+			/* This is handled by the select/cray plugin */
 			node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
+#endif
 			node_ptr->node_state &= (~NODE_STATE_POWER_UP);
 		}
 
-		if (reg_msg->status == ESLURMD_PROLOG_FAILED) {
-			if (!IS_NODE_DRAIN(node_ptr) &&
-			    !IS_NODE_FAIL(node_ptr)) {
-#ifdef HAVE_BG
-				if (!failure_logged) {
-					error("Prolog failure");
-					failure_logged = true;
-				}
-#else
-				update_node_state = true;
-				if (prolog_hostlist)
-					(void) hostlist_push_host(
-						prolog_hostlist,
-						node_ptr->name);
-				else
-					prolog_hostlist = hostlist_create(
-						node_ptr->name);
-				set_node_down(node_ptr->name, "Prolog failed");
-#endif
-			}
-		} else {
+		if (reg_msg->status != ESLURMD_PROLOG_FAILED) {
 			if (reg_hostlist)
 				(void) hostlist_push_host(reg_hostlist,
 							  node_ptr->name);
@@ -1906,7 +2010,7 @@ extern int validate_nodes_via_front_end(
 			node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
 			if (IS_NODE_UNKNOWN(node_ptr)) {
 				update_node_state = true;
-				if (jobs_on_node) {
+				if (node_ptr->run_job_cnt) {
 					node_ptr->node_state =
 						NODE_STATE_ALLOCATED |
 						node_flags;
@@ -1916,11 +2020,11 @@ extern int validate_nodes_via_front_end(
 						node_flags;
 					node_ptr->last_idle = now;
 				}
-				if (!IS_NODE_DRAIN(node_ptr)
-				    && !IS_NODE_FAIL(node_ptr)) {
-					xfree(node_ptr->reason);
-					node_ptr->reason_time = 0;
-					node_ptr->reason_uid = NO_VAL;
+				if (!IS_NODE_DRAIN(node_ptr) &&
+				    !IS_NODE_FAIL(node_ptr)) {
+					/* reason information is handled in
+					   clusteracct_storage_g_node_up()
+					*/
 					clusteracct_storage_g_node_up(
 						acct_db_conn,
 						node_ptr, now);
@@ -1932,7 +2036,7 @@ extern int validate_nodes_via_front_end(
 				     (strncmp(node_ptr->reason,
 					      "Not responding", 14) == 0)))) {
 				update_node_state = true;
-				if (jobs_on_node) {
+				if (node_ptr->run_job_cnt) {
 					node_ptr->node_state =
 						NODE_STATE_ALLOCATED |
 						node_flags;
@@ -1943,31 +2047,31 @@ extern int validate_nodes_via_front_end(
 					node_ptr->last_idle = now;
 				}
 				trigger_node_up(node_ptr);
-				if (!IS_NODE_DRAIN(node_ptr)
-				    && !IS_NODE_FAIL(node_ptr)) {
-					xfree(node_ptr->reason);
-					node_ptr->reason_time = 0;
-					node_ptr->reason_uid = NO_VAL;
+				if (!IS_NODE_DRAIN(node_ptr) &&
+				    !IS_NODE_FAIL(node_ptr)) {
+					/* reason information is handled in
+					   clusteracct_storage_g_node_up()
+					*/
 					clusteracct_storage_g_node_up(
 						acct_db_conn,
 						node_ptr, now);
 				}
 			} else if (IS_NODE_ALLOCATED(node_ptr) &&
-				   (jobs_on_node == 0)) {
+				   (node_ptr->run_job_cnt == 0)) {
 				/* job vanished */
 				update_node_state = true;
 				node_ptr->node_state = NODE_STATE_IDLE |
 					node_flags;
 				node_ptr->last_idle = now;
 			} else if (IS_NODE_COMPLETING(node_ptr) &&
-				   (jobs_on_node == 0)) {
+				   (node_ptr->comp_job_cnt == 0)) {
 				/* job already done */
 				update_node_state = true;
 				node_ptr->node_state &=
 					(~NODE_STATE_COMPLETING);
 				bit_clear(cg_node_bitmap, i);
 			} else if (IS_NODE_IDLE(node_ptr) &&
-				   (jobs_on_node != 0)) {
+				   (node_ptr->run_job_cnt != 0)) {
 				update_node_state = true;
 				node_ptr->node_state = NODE_STATE_ALLOCATED |
 						       node_flags;
@@ -1977,18 +2081,13 @@ extern int validate_nodes_via_front_end(
 			}
 
 			select_g_update_node_config(i);
-			select_g_update_node_state(i, node_ptr->node_state);
-			_sync_bitmaps(node_ptr, jobs_on_node);
+			select_g_update_node_state(node_ptr);
+			_sync_bitmaps(node_ptr,
+				      (node_ptr->run_job_cnt +
+				       node_ptr->comp_job_cnt));
 		}
 	}
 
-	if (prolog_hostlist) {
-		hostlist_uniq(prolog_hostlist);
-		host_str = hostlist_ranged_string_xmalloc(prolog_hostlist);
-		error("Prolog failure on nodes %s, set to DOWN", host_str);
-		xfree(host_str);
-		hostlist_destroy(prolog_hostlist);
-	}
 	if (reg_hostlist) {
 		hostlist_uniq(reg_hostlist);
 		host_str = hostlist_ranged_string_xmalloc(reg_hostlist);
@@ -1996,13 +2095,6 @@ extern int validate_nodes_via_front_end(
 		xfree(host_str);
 		hostlist_destroy(reg_hostlist);
 	}
-	if (return_hostlist) {
-		hostlist_uniq(return_hostlist);
-		host_str = hostlist_ranged_string_xmalloc(return_hostlist);
-		info("Nodes %s returned to service", host_str);
-		xfree(host_str);
-		hostlist_destroy(return_hostlist);
-	}
 
 	if (update_node_state) {
 		reset_job_priority();
@@ -2031,44 +2123,53 @@ static void _sync_bitmaps(struct node_record *node_ptr, int job_count)
 		bit_set   (up_node_bitmap, node_inx);
 }
 
-/*
- * node_did_resp - record that the specified node is responding
- * IN name - name of the node
- * NOTE: READ lock_slurmctld config before entry
- */
-void node_did_resp (char *name)
+#ifdef HAVE_FRONT_END
+static void _node_did_resp(front_end_record_t *fe_ptr)
 {
-	struct node_record *node_ptr;
-#ifdef HAVE_FRONT_END		/* Fake all other nodes */
-	int i;
+	uint16_t node_flags;
+	time_t now = time(NULL);
 
-	for (i=0; i<node_record_count; i++) {
-		node_ptr = &node_record_table_ptr[i];
-		_node_did_resp(node_ptr);
+	fe_ptr->last_response = now;
+#ifndef HAVE_CRAY
+	/* This is handled by the select/cray plugin */
+	if (IS_NODE_NO_RESPOND(fe_ptr)) {
+		info("Node %s now responding", fe_ptr->name);
+		last_front_end_update = now;
+		fe_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
 	}
-	debug2("node_did_resp %s",name);
-#else
-	node_ptr = find_node_record (name);
-	if (node_ptr == NULL) {
-		error ("node_did_resp unable to find node %s", name);
-		return;
-	}
-	_node_did_resp(node_ptr);
-	debug2("node_did_resp %s",name);
 #endif
+	node_flags = fe_ptr->node_state & NODE_STATE_FLAGS;
+	if (IS_NODE_UNKNOWN(fe_ptr)) {
+		last_front_end_update = now;
+		fe_ptr->node_state = NODE_STATE_IDLE | node_flags;
+	}
+	if (IS_NODE_DOWN(fe_ptr) &&
+	    (slurmctld_conf.ret2service == 1) &&
+	    (fe_ptr->reason != NULL) &&
+	    (strncmp(fe_ptr->reason, "Not responding", 14) == 0)) {
+		last_front_end_update = now;
+		fe_ptr->node_state = NODE_STATE_IDLE | node_flags;
+		info("node_did_resp: node %s returned to service",
+		     fe_ptr->name);
+		trigger_front_end_up(fe_ptr);
+		if (!IS_NODE_DRAIN(fe_ptr) && !IS_NODE_FAIL(fe_ptr)) {
+			xfree(fe_ptr->reason);
+			fe_ptr->reason_time = 0;
+			fe_ptr->reason_uid = NO_VAL;
+		}
+	}
+	return;
 }
-
+#else
 static void _node_did_resp(struct node_record *node_ptr)
 {
 	int node_inx;
-	uint16_t resp_state, node_flags;
+	uint16_t node_flags;
 	time_t now = time(NULL);
 
 	node_inx = node_ptr - node_record_table_ptr;
 	node_ptr->last_response = now;
-	resp_state = node_ptr->node_state & (NODE_STATE_NO_RESPOND |
-					     NODE_STATE_POWER_UP);
-	if (resp_state) {
+	if (IS_NODE_NO_RESPOND(node_ptr) || IS_NODE_POWER_UP(node_ptr)) {
 		info("Node %s now responding", node_ptr->name);
 		reset_job_priority();
 		node_ptr->node_state &= (~NODE_STATE_NO_RESPOND);
@@ -2096,13 +2197,13 @@ static void _node_did_resp(struct node_record *node_ptr)
 		node_ptr->last_idle = now;
 		node_ptr->node_state = NODE_STATE_IDLE | node_flags;
 		info("node_did_resp: node %s returned to service",
-			node_ptr->name);
+		     node_ptr->name);
 		trigger_node_up(node_ptr);
 		last_node_update = now;
 		if (!IS_NODE_DRAIN(node_ptr) && !IS_NODE_FAIL(node_ptr)) {
-			xfree(node_ptr->reason);
-			node_ptr->reason_time = 0;
-			node_ptr->reason_uid = NO_VAL;
+			/* reason information is handled in
+			   clusteracct_storage_g_node_up()
+			*/
 			clusteracct_storage_g_node_up(acct_db_conn,
 						      node_ptr, now);
 		}
@@ -2122,6 +2223,29 @@ static void _node_did_resp(struct node_record *node_ptr)
 		bit_set   (up_node_bitmap, node_inx);
 	return;
 }
+#endif
+
+/*
+ * node_did_resp - record that the specified node is responding
+ * IN name - name of the node
+ * NOTE: READ lock_slurmctld config before entry
+ */
+void node_did_resp (char *name)
+{
+#ifdef HAVE_FRONT_END
+	front_end_record_t *node_ptr;
+	node_ptr = find_front_end_record (name);
+#else
+	struct node_record *node_ptr;
+	node_ptr = find_node_record (name);
+#endif
+	if (node_ptr == NULL) {
+		error ("node_did_resp unable to find node %s", name);
+		return;
+	}
+	_node_did_resp(node_ptr);
+	debug2("node_did_resp %s",name);
+}
 
 /*
  * node_not_resp - record that the specified node is not responding
@@ -2130,22 +2254,15 @@ static void _node_did_resp(struct node_record *node_ptr)
  */
 void node_not_resp (char *name, time_t msg_time)
 {
-	struct node_record *node_ptr;
-#ifdef HAVE_FRONT_END		/* Fake all other nodes */
-	int i;
-
-	for (i=0; i<node_record_count; i++) {
-		node_ptr = node_record_table_ptr + i;
-		if (!IS_NODE_DOWN(node_ptr)) {
-			node_ptr->not_responding = true;
-			bit_clear (avail_node_bitmap, i);
-			node_ptr->node_state |= NODE_STATE_NO_RESPOND;
-			last_node_update = time(NULL);
-		}
-	}
+#ifdef HAVE_FRONT_END
+	front_end_record_t *node_ptr;
 
+	node_ptr = find_front_end_record (name);
 #else
+	struct node_record *node_ptr;
+
 	node_ptr = find_node_record (name);
+#endif
 	if (node_ptr == NULL) {
 		error ("node_not_resp unable to find node %s", name);
 		return;
@@ -2154,8 +2271,23 @@ void node_not_resp (char *name, time_t msg_time)
 		/* Logged by node_no_resp_msg() on periodic basis */
 		node_ptr->not_responding = true;
 	}
-	_node_not_resp(node_ptr, msg_time);
+
+	if (IS_NODE_NO_RESPOND(node_ptr))
+		return;		/* Already known to be not responding */
+
+	if (node_ptr->last_response >= msg_time) {
+		debug("node_not_resp: node %s responded since msg sent",
+		      node_ptr->name);
+		return;
+	}
+	node_ptr->node_state |= NODE_STATE_NO_RESPOND;
+#ifdef HAVE_FRONT_END
+	last_front_end_update = time(NULL);
+#else
+	last_node_update = time(NULL);
+	bit_clear (avail_node_bitmap, (node_ptr - node_record_table_ptr));
 #endif
+	return;
 }
 
 /* For every node with the "not_responding" flag set, clear the flag
@@ -2187,29 +2319,8 @@ extern void node_no_resp_msg(void)
 	}
 }
 
-#ifndef HAVE_FRONT_END
-static void _node_not_resp (struct node_record *node_ptr, time_t msg_time)
-{
-	int i;
-
-	i = node_ptr - node_record_table_ptr;
-	if (IS_NODE_NO_RESPOND(node_ptr))
-		return;		/* Already known to be not responding */
-
-	if (node_ptr->last_response >= msg_time) {
-		debug("node_not_resp: node %s responded since msg sent",
-			node_ptr->name);
-		return;
-	}
-	bit_clear (avail_node_bitmap, i);
-	node_ptr->node_state |= NODE_STATE_NO_RESPOND;
-	last_node_update = time (NULL);
-	return;
-}
-#endif
-
 /*
- * set_node_down - make the specified node's state DOWN and
+ * set_node_down - make the specified compute node's state DOWN and
  *	kill jobs as needed
  * IN name - name of the node
  * IN reason - why the node is DOWN
@@ -2217,23 +2328,36 @@ static void _node_not_resp (struct node_record *node_ptr, time_t msg_time)
 void set_node_down (char *name, char *reason)
 {
 	struct node_record *node_ptr;
-	time_t now = time(NULL);
 
 	node_ptr = find_node_record (name);
 	if (node_ptr == NULL) {
 		error ("set_node_down unable to find node %s", name);
 		return;
 	}
+	set_node_down_ptr (node_ptr, reason);
+
+	return;
+}
+
+/*
+ * set_node_down_ptr - make the specified compute node's state DOWN and
+ *	kill jobs as needed
+ * IN node_ptr - node_ptr to the node
+ * IN reason - why the node is DOWN
+ */
+void set_node_down_ptr (struct node_record *node_ptr, char *reason)
+{
+	time_t now = time(NULL);
 
 	if ((node_ptr->reason == NULL) ||
 	    (strncmp(node_ptr->reason, "Not responding", 14) == 0)) {
 		xfree(node_ptr->reason);
 		node_ptr->reason = xstrdup(reason);
-		node_ptr->reason_time = time(NULL);
+		node_ptr->reason_time = now;
 		node_ptr->reason_uid = slurm_get_slurm_user_id();
 	}
 	_make_node_down(node_ptr, now);
-	(void) kill_running_job_by_node_name(name);
+	(void) kill_running_job_by_node_name(node_ptr->name);
 	_sync_bitmaps(node_ptr, 0);
 
 	return;
@@ -2266,9 +2390,15 @@ bool is_node_down (char *name)
  */
 bool is_node_resp (char *name)
 {
+#ifdef HAVE_FRONT_END
+	front_end_record_t *node_ptr;
+
+	node_ptr = find_front_end_record (name);
+#else
 	struct node_record *node_ptr;
 
 	node_ptr = find_node_record (name);
+#endif
 	if (node_ptr == NULL) {
 		error ("is_node_resp unable to find node %s", name);
 		return false;
@@ -2307,28 +2437,39 @@ void msg_to_slurmd (slurm_msg_type_t msg_type)
 	int i;
 	shutdown_msg_t *shutdown_req;
 	agent_arg_t *kill_agent_args;
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+#else
 	struct node_record *node_ptr;
+#endif
 
 	kill_agent_args = xmalloc (sizeof (agent_arg_t));
 	kill_agent_args->msg_type = msg_type;
 	kill_agent_args->retry = 0;
 	kill_agent_args->hostlist = hostlist_create("");
+	if (kill_agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	if (msg_type == REQUEST_SHUTDOWN) {
  		shutdown_req = xmalloc(sizeof(shutdown_msg_t));
 		shutdown_req->options = 0;
 		kill_agent_args->msg_args = shutdown_req;
 	}
 
+#ifdef HAVE_FRONT_END
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		hostlist_push(kill_agent_args->hostlist, front_end_ptr->name);
+		kill_agent_args->node_count++;
+	}
+#else
 	node_ptr = node_record_table_ptr;
 	for (i = 0; i < node_record_count; i++, node_ptr++) {
 		if (IS_NODE_FUTURE(node_ptr))
 			continue;
 		hostlist_push(kill_agent_args->hostlist, node_ptr->name);
 		kill_agent_args->node_count++;
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		break;
-#endif
 	}
+#endif
 
 	if (kill_agent_args->node_count == 0) {
 		hostlist_destroy(kill_agent_args->hostlist);
@@ -2450,7 +2591,7 @@ static void _make_node_down(struct node_record *node_ptr, time_t event_time)
 	bit_set   (idle_node_bitmap,  inx);
 	bit_set   (share_node_bitmap, inx);
 	bit_clear (up_node_bitmap,    inx);
-	select_g_update_node_state(inx, node_ptr->node_state);
+	select_g_update_node_state(node_ptr);
 	trigger_node_down(node_ptr);
 	last_node_update = time (NULL);
 	clusteracct_storage_g_node_down(acct_db_conn,
@@ -2495,7 +2636,7 @@ void make_node_idle(struct node_record *node_ptr,
 						"%ld seconds", job_ptr->job_id,
 						(long) delay);
 				job_ptr->job_state &= (~JOB_COMPLETING);
-				delete_step_records(job_ptr, 0);
+				delete_step_records(job_ptr);
 				slurm_sched_schedule();
 			}
 		} else {
@@ -2537,13 +2678,20 @@ void make_node_idle(struct node_record *node_ptr,
 	if (IS_NODE_DOWN(node_ptr)) {
 		debug3("make_node_idle: Node %s being left DOWN",
 			node_ptr->name);
-	} else if ((IS_NODE_DRAIN(node_ptr) || IS_NODE_FAIL(node_ptr)) &&
-		   (node_ptr->run_job_cnt == 0) &&
-		   (node_ptr->comp_job_cnt == 0)) {
-		node_ptr->node_state = NODE_STATE_IDLE | node_flags;
+		return;
+	}
+	bit_set(up_node_bitmap, inx);
+
+	if (IS_NODE_DRAIN(node_ptr) || IS_NODE_FAIL(node_ptr) ||
+	    IS_NODE_NO_RESPOND(node_ptr))
 		bit_clear(avail_node_bitmap, inx);
+	else
+		bit_set(avail_node_bitmap, inx);
+
+	if ((IS_NODE_DRAIN(node_ptr) || IS_NODE_FAIL(node_ptr)) &&
+	    (node_ptr->run_job_cnt == 0) && (node_ptr->comp_job_cnt == 0)) {
+		node_ptr->node_state = NODE_STATE_IDLE | node_flags;
 		bit_set(idle_node_bitmap, inx);
-		bit_set(up_node_bitmap, inx);
 		debug3("make_node_idle: Node %s is DRAINED",
 		       node_ptr->name);
 		node_ptr->last_idle = now;
@@ -2556,7 +2704,6 @@ void make_node_idle(struct node_record *node_ptr,
 		if (!IS_NODE_NO_RESPOND(node_ptr) &&
 		     !IS_NODE_FAIL(node_ptr) && !IS_NODE_DRAIN(node_ptr))
 			bit_set(avail_node_bitmap, inx);
-		bit_set(up_node_bitmap, inx);
 	} else {
 		node_ptr->node_state = NODE_STATE_IDLE | node_flags;
 		if (!IS_NODE_NO_RESPOND(node_ptr) &&
@@ -2565,7 +2712,6 @@ void make_node_idle(struct node_record *node_ptr,
 		if (!IS_NODE_NO_RESPOND(node_ptr) &&
 		    !IS_NODE_COMPLETING(node_ptr))
 			bit_set(idle_node_bitmap, inx);
-		bit_set(up_node_bitmap, inx);
 		node_ptr->last_idle = now;
 	}
 	last_node_update = now;
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index bf20955fe..62e9625d4 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,7 +55,7 @@
 #include <syslog.h>
 #include <unistd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/hostlist.h"
@@ -69,7 +69,7 @@
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
-#include "src/slurmctld/basil_interface.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/node_scheduler.h"
@@ -130,11 +130,21 @@ extern void allocate_nodes(struct job_record *job_ptr)
 {
 	int i;
 
+#ifdef HAVE_FRONT_END
+	job_ptr->front_end_ptr = assign_front_end();
+	xassert(job_ptr->front_end_ptr);
+	xfree(job_ptr->batch_host);
+	job_ptr->batch_host = xstrdup(job_ptr->front_end_ptr->name);
+#endif
+
 	for (i = 0; i < node_record_count; i++) {
-		if (bit_test(job_ptr->node_bitmap, i))
-			make_node_alloc(&node_record_table_ptr[i], job_ptr);
+		if (!bit_test(job_ptr->node_bitmap, i))
+			continue;
+		make_node_alloc(&node_record_table_ptr[i], job_ptr);
+		if (job_ptr->batch_host)
+			continue;
+		job_ptr->batch_host = xstrdup(node_record_table_ptr[i].name);
 	}
-
 	last_node_update = time(NULL);
 
 	license_job_get(job_ptr);
@@ -151,15 +161,19 @@ extern void allocate_nodes(struct job_record *job_ptr)
  *	RPC instead of REQUEST_TERMINATE_JOB
  * IN suspended - true if job was already suspended (node's job_run_cnt
  *	already decremented);
+ * IN preempted - true if job is being preempted
  */
 extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
-		bool suspended)
+		bool suspended, bool preempted)
 {
 	int i;
 	kill_job_msg_t *kill_job = NULL;
 	agent_arg_t *agent_args = NULL;
 	int down_node_cnt = 0;
 	struct node_record *node_ptr;
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+#endif
 
 	xassert(job_ptr);
 	xassert(job_ptr->details);
@@ -172,17 +186,17 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 		error("select_g_job_fini(%u): %m", job_ptr->job_id);
 	(void) epilog_slurmctld(job_ptr);
 
-#ifdef HAVE_CRAY
-	basil_release(job_ptr);
-#endif /* HAVE_CRAY */
-
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	if (timeout)
 		agent_args->msg_type = REQUEST_KILL_TIMELIMIT;
+	else if (preempted)
+		agent_args->msg_type = REQUEST_KILL_PREEMPTED;
 	else
 		agent_args->msg_type = REQUEST_TERMINATE_JOB;
 	agent_args->retry = 0;	/* re_kill_job() resends as needed */
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	kill_job = xmalloc(sizeof(kill_job_msg_t));
 	last_node_update    = time(NULL);
 	kill_job->job_id    = job_ptr->job_id;
@@ -198,9 +212,57 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 					    job_ptr->spank_job_env);
 	kill_job->spank_job_env_size = job_ptr->spank_job_env_size;
 
-	for (i=0, node_ptr=node_record_table_ptr;
+#ifdef HAVE_FRONT_END
+	if (job_ptr->batch_host &&
+	    (front_end_ptr = job_ptr->front_end_ptr)) {
+		if (IS_NODE_DOWN(front_end_ptr)) {
+			/* Issue the KILL RPC, but don't verify response */
+			front_end_ptr->job_cnt_comp = 0;
+			front_end_ptr->job_cnt_run  = 0;
+			down_node_cnt++;
+			if (job_ptr->node_bitmap_cg) {
+				bit_nclear(job_ptr->node_bitmap_cg, 0,
+					   node_record_count - 1);
+			} else {
+				error("deallocate_nodes: node_bitmap_cg is "
+				      "not set");
+				/* Create empty node_bitmap_cg */
+				job_ptr->node_bitmap_cg =
+					bit_alloc(node_record_count);
+			}
+			job_ptr->cpu_cnt  = 0;
+			job_ptr->node_cnt = 0;
+		} else {
+			front_end_ptr->job_cnt_comp++;
+			if (front_end_ptr->job_cnt_run)
+				front_end_ptr->job_cnt_run--;
+			else {
+				error("front_end %s job_cnt_run underflow",
+				      front_end_ptr->name);
+			}
+			if (front_end_ptr->job_cnt_run == 0) {
+				uint16_t state_flags;
+				state_flags = front_end_ptr->node_state &
+					      NODE_STATE_FLAGS;
+				state_flags |= NODE_STATE_COMPLETING;
+				front_end_ptr->node_state = NODE_STATE_IDLE |
+							    state_flags;
+			}
+			for (i = 0, node_ptr = node_record_table_ptr;
+			     i < node_record_count; i++, node_ptr++) {
+				if (!bit_test(job_ptr->node_bitmap, i))
+					continue;
+				make_node_comp(node_ptr, job_ptr, suspended);
+			}
+		}
+
+		hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+		agent_args->node_count++;
+	}
+#else
+	for (i = 0, node_ptr = node_record_table_ptr;
 	     i < node_record_count; i++, node_ptr++) {
-		if (bit_test(job_ptr->node_bitmap, i) == 0)
+		if (!bit_test(job_ptr->node_bitmap, i))
 			continue;
 		if (IS_NODE_DOWN(node_ptr)) {
 			/* Issue the KILL RPC, but don't verify response */
@@ -215,23 +277,23 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 			job_ptr->node_cnt--;
 		}
 		make_node_comp(node_ptr, job_ptr, suspended);
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		if (agent_args->node_count > 0)
-			continue;
-#endif
+
 		hostlist_push(agent_args->hostlist, node_ptr->name);
 		agent_args->node_count++;
 	}
+#endif
 
 	if ((agent_args->node_count - down_node_cnt) == 0) {
 		job_ptr->job_state &= (~JOB_COMPLETING);
-		delete_step_records(job_ptr, 0);
+		delete_step_records(job_ptr);
 		slurm_sched_schedule();
 	}
 
 	if (agent_args->node_count == 0) {
-		error("Job %u allocated no nodes to be killed on",
-		      job_ptr->job_id);
+		if (job_ptr->details->expanding_jobid == 0) {
+			error("Job %u allocated no nodes to be killed on",
+			      job_ptr->job_id);
+		}
 		xfree(kill_job->nodes);
 		select_g_select_jobinfo_free(kill_job->select_jobinfo);
 		xfree(kill_job);
@@ -946,7 +1008,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	/* The job is not able to start right now, return a
 	 * value indicating when the job can start */
 	if (!runable_avail)
-		error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
+		error_code = ESLURM_NODE_NOT_AVAIL;
 	if (!runable_ever) {
 		error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 		info("_pick_best_nodes: job %u never runnable",
@@ -975,12 +1037,14 @@ static void _preempt_jobs(List preemptee_job_list, int *error_code)
 	while ((job_ptr = (struct job_record *) list_next(iter))) {
 		mode = slurm_job_preempt_mode(job_ptr);
 		if (mode == PREEMPT_MODE_CANCEL) {
-			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+			job_cnt++;
+			if (slurm_job_check_grace(job_ptr) == SLURM_SUCCESS)
+				continue;
+			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
 			if (rc == SLURM_SUCCESS) {
 				info("preempted job %u has been killed",
 				     job_ptr->job_id);
 			}
-			job_cnt++;
 		} else if (mode == PREEMPT_MODE_CHECKPOINT) {
 			checkpoint_msg_t ckpt_msg;
 			memset(&ckpt_msg, 0, sizeof(checkpoint_msg_t));
@@ -1002,7 +1066,7 @@ static void _preempt_jobs(List preemptee_job_list, int *error_code)
 			job_cnt++;
 		} else if (mode == PREEMPT_MODE_REQUEUE) {
 			rc = job_requeue(0, job_ptr->job_id, -1,
-					 (uint16_t)NO_VAL);
+					 (uint16_t)NO_VAL, true);
 			if (rc == SLURM_SUCCESS) {
 				info("preempted job %u has been requeued",
 				     job_ptr->job_id);
@@ -1017,7 +1081,7 @@ static void _preempt_jobs(List preemptee_job_list, int *error_code)
 		}
 
 		if (rc != SLURM_SUCCESS) {
-			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0);
+			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
 			if (rc == SLURM_SUCCESS)
 				info("preempted job %u had to be killed",
 				     job_ptr->job_id);
@@ -1110,14 +1174,13 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		fail_reason = WAIT_PART_NODE_LIMIT;
 	else if (qos_ptr && assoc_ptr &&
 		 (qos_ptr->flags & QOS_FLAG_ENFORCE_USAGE_THRES) &&
-		 (qos_ptr->usage_thres != (double)NO_VAL)) {
+		 (!fuzzy_equal(qos_ptr->usage_thres, NO_VAL))) {
 		if (!job_ptr->prio_factors)
 			job_ptr->prio_factors =
 				xmalloc(sizeof(priority_factors_object_t));
 
 		if (!job_ptr->prio_factors->priority_fs) {
-			if (assoc_ptr->usage->usage_efctv
-			    == (long double)NO_VAL)
+			if (fuzzy_equal(assoc_ptr->usage->usage_efctv, NO_VAL))
 				priority_g_set_assoc_usage(assoc_ptr);
 			job_ptr->prio_factors->priority_fs =
 				priority_g_calc_fs_factor(
@@ -1178,6 +1241,17 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		max_nodes = MIN(job_ptr->details->max_nodes,
 				part_ptr->max_nodes);
 
+	if (job_ptr->details->req_node_bitmap && job_ptr->details->max_nodes) {
+		i = bit_set_count(job_ptr->details->req_node_bitmap);
+		if (i > job_ptr->details->max_nodes) {
+			info("Job %u required node list has more node than "
+			     "the job can use (%d > %u)",
+			     job_ptr->job_id, i, job_ptr->details->max_nodes);
+			error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+			goto cleanup;
+		}
+	}
+
 	max_nodes = MIN(max_nodes, 500000);	/* prevent overflows */
 	if (!job_ptr->limit_set_max_nodes && job_ptr->details->max_nodes)
 		req_nodes = max_nodes;
@@ -1199,8 +1273,22 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	 * free up. total_cpus is set within _get_req_features */
 	job_ptr->cpu_cnt = job_ptr->total_cpus;
 
-	if (!test_only && preemptee_job_list && (error_code == SLURM_SUCCESS))
-		_preempt_jobs(preemptee_job_list, &error_code);
+	if (!test_only && preemptee_job_list && (error_code == SLURM_SUCCESS)){
+		struct job_details *detail_ptr = job_ptr->details;
+		time_t now = time(NULL);
+		if ((detail_ptr->preempt_start_time != 0) &&
+		    (detail_ptr->preempt_start_time >
+		     (now - slurmctld_conf.kill_wait -
+		      slurmctld_conf.msg_timeout))) {
+			/* Job preemption still in progress,
+			 * do not preempt any more jobs yet */
+			error_code = ESLURM_NODES_BUSY;
+		} else {
+			_preempt_jobs(preemptee_job_list, &error_code);
+			if (error_code == ESLURM_NODES_BUSY)
+  				detail_ptr->preempt_start_time = now;
+		}
+	}
 	if (error_code) {
 		if (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) {
 			/* Too many nodes requested */
@@ -1237,15 +1325,6 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		goto cleanup;
 	}
 
-#ifdef HAVE_CRAY
-	if (basil_reserve(job_ptr) != SLURM_SUCCESS) {
-		job_ptr->state_reason = WAIT_RESOURCES;
-		xfree(job_ptr->state_desc);
-		error_code = ESLURM_NODES_BUSY;
-		goto cleanup;
-	}
-#endif	/* HAVE_CRAY */
-
 	/* This job may be getting requeued, clear vestigial
 	 * state information before over-writing and leaking
 	 * memory. */
@@ -1982,16 +2061,26 @@ extern void re_kill_job(struct job_record *job_ptr)
 	int i;
 	kill_job_msg_t *kill_job;
 	agent_arg_t *agent_args;
-	hostlist_t kill_hostlist = hostlist_create("");
+	hostlist_t kill_hostlist;
 	char *host_str = NULL;
 	static uint32_t last_job_id = 0;
+	struct node_record *node_ptr;
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+#endif
 
 	xassert(job_ptr);
 	xassert(job_ptr->details);
 
+	kill_hostlist = hostlist_create("");
+	if (kill_hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
+
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	agent_args->msg_type = REQUEST_TERMINATE_JOB;
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	agent_args->retry = 0;
 	kill_job = xmalloc(sizeof(kill_job_msg_t));
 	kill_job->job_id    = job_ptr->job_id;
@@ -2006,40 +2095,65 @@ extern void re_kill_job(struct job_record *job_ptr)
 					    job_ptr->spank_job_env);
 	kill_job->spank_job_env_size = job_ptr->spank_job_env_size;
 
+#ifdef HAVE_FRONT_END
+	if (job_ptr->batch_host &&
+	    (front_end_ptr = find_front_end_record(job_ptr->batch_host))) {
+		if (IS_NODE_DOWN(front_end_ptr)) {
+			for (i = 0, node_ptr = node_record_table_ptr;
+			     i < node_record_count; i++, node_ptr++) {
+				if ((job_ptr->node_bitmap_cg == NULL) ||
+				    (!bit_test(job_ptr->node_bitmap_cg, i)))
+					continue;
+				bit_clear(job_ptr->node_bitmap_cg, i);
+				job_update_cpu_cnt(job_ptr, i);
+				if (node_ptr->comp_job_cnt)
+					(node_ptr->comp_job_cnt)--;
+				if ((job_ptr->node_cnt > 0) &&
+				    ((--job_ptr->node_cnt) == 0)) {
+					last_node_update = time(NULL);
+					job_ptr->job_state &= (~JOB_COMPLETING);
+					delete_step_records(job_ptr);
+					slurm_sched_schedule();
+				}
+			}
+		} else if (!IS_NODE_NO_RESPOND(front_end_ptr)) {
+			(void) hostlist_push_host(kill_hostlist,
+						  job_ptr->batch_host);
+			hostlist_push(agent_args->hostlist,
+				      job_ptr->batch_host);
+			agent_args->node_count++;
+		}
+	}
+#else
 	for (i = 0; i < node_record_count; i++) {
-		struct node_record *node_ptr = &node_record_table_ptr[i];
+		node_ptr = &node_record_table_ptr[i];
 		if ((job_ptr->node_bitmap_cg == NULL) ||
-		    (bit_test(job_ptr->node_bitmap_cg, i) == 0))
+		    (bit_test(job_ptr->node_bitmap_cg, i) == 0)) {
 			continue;
-		if (IS_NODE_DOWN(node_ptr) &&
-		    bit_test(job_ptr->node_bitmap_cg, i)) {
+		} else if (IS_NODE_DOWN(node_ptr)) {
 			/* Consider job already completed */
 			bit_clear(job_ptr->node_bitmap_cg, i);
 			job_update_cpu_cnt(job_ptr, i);
 			if (node_ptr->comp_job_cnt)
 				(node_ptr->comp_job_cnt)--;
-			if ((--job_ptr->node_cnt) == 0) {
+			if ((job_ptr->node_cnt > 0) &&
+			    ((--job_ptr->node_cnt) == 0)) {
 				job_ptr->job_state &= (~JOB_COMPLETING);
-				delete_step_records(job_ptr, 0);
+				delete_step_records(job_ptr);
 				slurm_sched_schedule();
 				last_node_update = time(NULL);
 			}
-			continue;
+		} else if (!IS_NODE_NO_RESPOND(node_ptr)) {
+			(void)hostlist_push_host(kill_hostlist, node_ptr->name);
+			hostlist_push(agent_args->hostlist, node_ptr->name);
+			agent_args->node_count++;
 		}
-		if (IS_NODE_DOWN(node_ptr) || IS_NODE_NO_RESPOND(node_ptr))
-			continue;
-		(void) hostlist_push_host(kill_hostlist, node_ptr->name);
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		if (agent_args->node_count > 0)
-			continue;
-#endif
-		hostlist_push(agent_args->hostlist, node_ptr->name);
-		agent_args->node_count++;
 	}
+#endif
 
 	if (agent_args->node_count == 0) {
 		slurm_free_kill_job_msg(kill_job);
-		if(agent_args->hostlist)
+		if (agent_args->hostlist)
 			hostlist_destroy(agent_args->hostlist);
 		xfree(agent_args);
 		hostlist_destroy(kill_hostlist);
diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h
index a3d8cd102..57abf8f36 100644
--- a/src/slurmctld/node_scheduler.h
+++ b/src/slurmctld/node_scheduler.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -62,9 +62,10 @@ extern void build_node_details(struct job_record *job_ptr);
  *	RPC instead of REQUEST_TERMINATE_JOB
  * IN suspended - true if job was already suspended (node's job_run_cnt
  *	already decremented);
+ * IN preempted - true if job is being preempted
  */
 extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
-		bool suspended);
+		bool suspended, bool preempted);
 
 /*
  * re_kill_job - for a given job, deallocate its nodes for a second time,
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 9d0589813..a5829f9cf 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -2,7 +2,6 @@
  *  partition_mgr.c - manage the partition information of slurm
  *	Note: there is a global partition list (part_list) and
  *	time stamp (last_part_update)
- *  $Id$
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
@@ -12,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -236,6 +235,7 @@ struct part_record *create_part_record(void)
 	part_ptr->max_share         = default_part.max_share;
 	part_ptr->preempt_mode      = default_part.preempt_mode;
 	part_ptr->priority          = default_part.priority;
+	part_ptr->grace_time 	    = default_part.grace_time;
 	if(part_max_priority)
 		part_ptr->norm_priority = (double)default_part.priority
 			/ (double)part_max_priority;
@@ -394,6 +394,7 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 		part_ptr->flags &= (~PART_FLAG_DEFAULT);
 
 	packstr(part_ptr->name,          buffer);
+	pack32(part_ptr->grace_time,	 buffer);
 	pack32(part_ptr->max_time,       buffer);
 	pack32(part_ptr->default_time,   buffer);
 	pack32(part_ptr->max_nodes_orig, buffer);
@@ -453,6 +454,7 @@ int load_all_part_state(void)
 	char *part_name = NULL, *allow_groups = NULL, *nodes = NULL;
 	char *state_file, *data = NULL;
 	uint32_t max_time, default_time, max_nodes, min_nodes;
+	uint32_t grace_time = 0;
 	time_t time;
 	uint16_t def_part_flag, flags, hidden, root_only;
 	uint16_t max_share, preempt_mode, priority, state_up;
@@ -522,8 +524,9 @@ int load_all_part_state(void)
 	safe_unpack_time(&time, buffer);
 
 	while (remaining_buf(buffer) > 0) {
-		if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc(&part_name, &name_len, buffer);
+			safe_unpack32(&grace_time, buffer);
 			safe_unpack32(&max_time, buffer);
 			safe_unpack32(&default_time, buffer);
 			safe_unpack32(&max_nodes, buffer);
@@ -534,7 +537,37 @@ int load_all_part_state(void)
 			safe_unpack16(&preempt_mode, buffer);
 			safe_unpack16(&priority,     buffer);
 
-			if(priority > part_max_priority)
+			if (priority > part_max_priority)
+				part_max_priority = priority;
+
+			safe_unpack16(&state_up, buffer);
+			safe_unpackstr_xmalloc(&allow_groups,
+					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&allow_alloc_nodes,
+					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&alternate, &name_len, buffer);
+			safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
+			if ((flags & PART_FLAG_DEFAULT_CLR) ||
+			    (flags & PART_FLAG_HIDDEN_CLR)  ||
+			    (flags & PART_FLAG_NO_ROOT_CLR) ||
+			    (flags & PART_FLAG_ROOT_ONLY_CLR)) {
+				error("Invalid data for partition %s: flags=%u",
+				      part_name, flags);
+				error_code = EINVAL;
+			}
+		} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+			safe_unpackstr_xmalloc(&part_name, &name_len, buffer);
+			safe_unpack32(&max_time, buffer);
+			safe_unpack32(&default_time, buffer);
+			safe_unpack32(&max_nodes, buffer);
+			safe_unpack32(&min_nodes, buffer);
+
+			safe_unpack16(&flags,        buffer);
+			safe_unpack16(&max_share,    buffer);
+			safe_unpack16(&preempt_mode, buffer);
+			safe_unpack16(&priority,     buffer);
+
+			if (priority > part_max_priority)
 				part_max_priority = priority;
 
 			safe_unpack16(&state_up, buffer);
@@ -637,6 +670,7 @@ int load_all_part_state(void)
 		part_ptr->min_nodes      = min_nodes;
 		part_ptr->min_nodes_orig = min_nodes;
 		part_ptr->max_share      = max_share;
+		part_ptr->grace_time     = grace_time;
 		if (preempt_mode != (uint16_t) NO_VAL)
 			part_ptr->preempt_mode   = preempt_mode;
 		part_ptr->priority       = priority;
@@ -740,6 +774,7 @@ int init_part_conf(void)
 	default_part.norm_priority  = 0;
 	default_part.total_nodes    = 0;
 	default_part.total_cpus     = 0;
+	default_part.grace_time     = 0;
 	xfree(default_part.nodes);
 	xfree(default_part.allow_groups);
 	xfree(default_part.allow_uids);
@@ -927,7 +962,36 @@ void pack_part(struct part_record *part_ptr, Buf buffer,
 {
 	uint32_t altered;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		if (default_part_loc == part_ptr)
+			part_ptr->flags |= PART_FLAG_DEFAULT;
+		else
+			part_ptr->flags &= (~PART_FLAG_DEFAULT);
+
+		packstr(part_ptr->name, buffer);
+		pack32(part_ptr->grace_time, buffer);
+		pack32(part_ptr->max_time, buffer);
+		pack32(part_ptr->default_time, buffer);
+		pack32(part_ptr->max_nodes_orig, buffer);
+		pack32(part_ptr->min_nodes_orig, buffer);
+		altered = part_ptr->total_nodes;
+		select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, &altered);
+		pack32(altered,              buffer);
+		pack32(part_ptr->total_cpus, buffer);
+		pack32(part_ptr->def_mem_per_cpu, buffer);
+		pack32(part_ptr->max_mem_per_cpu, buffer);
+		pack16(part_ptr->flags,      buffer);
+		pack16(part_ptr->max_share,  buffer);
+		pack16(part_ptr->preempt_mode, buffer);
+		pack16(part_ptr->priority,   buffer);
+
+		pack16(part_ptr->state_up, buffer);
+		packstr(part_ptr->allow_groups, buffer);
+		packstr(part_ptr->allow_alloc_nodes, buffer);
+		packstr(part_ptr->alternate, buffer);
+		packstr(part_ptr->nodes, buffer);
+		pack_bit_fmt(part_ptr->node_bitmap, buffer);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		if (default_part_loc == part_ptr)
 			part_ptr->flags |= PART_FLAG_DEFAULT;
 		else
@@ -1066,7 +1130,7 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		     part_desc->max_nodes, part_desc->name);
 		part_ptr->max_nodes      = part_desc->max_nodes;
 		part_ptr->max_nodes_orig = part_desc->max_nodes;
-		select_g_alter_node_cnt(SELECT_SET_BP_CNT,
+		select_g_alter_node_cnt(SELECT_SET_MP_CNT,
 					&part_ptr->max_nodes);
 	}
 
@@ -1075,10 +1139,16 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		     part_desc->min_nodes, part_desc->name);
 		part_ptr->min_nodes      = part_desc->min_nodes;
 		part_ptr->min_nodes_orig = part_desc->min_nodes;
-		select_g_alter_node_cnt(SELECT_SET_BP_CNT,
+		select_g_alter_node_cnt(SELECT_SET_MP_CNT,
 					&part_ptr->min_nodes);
 	}
 
+	if (part_desc->grace_time != NO_VAL) {
+		info("update_part: setting grace_time to %u for partition %s",
+		     part_desc->grace_time, part_desc->name);
+		part_ptr->grace_time = part_desc->grace_time;
+	}
+
 	if (part_desc->flags & PART_FLAG_HIDDEN) {
 		info("update_part: setting hidden for partition %s",
 		     part_desc->name);
@@ -1173,9 +1243,9 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		part_ptr->priority = part_desc->priority;
 
 		/* If the max_priority changes we need to change all
-		   the normalized priorities of all the other
-		   partitions.  If not then just set this partitions.
-		*/
+		 * the normalized priorities of all the other
+		 * partitions. If not then just set this partition.
+		 */
 		if(part_ptr->priority > part_max_priority) {
 			ListIterator itr = list_iterator_create(part_list);
 			struct part_record *part2 = NULL;
@@ -1243,6 +1313,35 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		     part_ptr->alternate, part_desc->name);
 	}
 
+	if (part_desc->def_mem_per_cpu != NO_VAL) {
+		char *key;
+		uint32_t value;
+		if (part_desc->def_mem_per_cpu & MEM_PER_CPU) {
+			key = "DefMemPerCpu";
+			value = part_desc->def_mem_per_cpu & (~MEM_PER_CPU);
+		} else {
+			key = "DefMemPerNode";
+			value = part_desc->def_mem_per_cpu;
+		}
+		info("update_part: setting %s to %u for partition %s",
+		     key, value, part_desc->name);
+		part_ptr->def_mem_per_cpu = part_desc->def_mem_per_cpu;
+	}
+
+	if (part_desc->max_mem_per_cpu != NO_VAL) {
+		char *key;
+		uint32_t value;
+		if (part_desc->max_mem_per_cpu & MEM_PER_CPU) {
+			key = "MaxMemPerCpu";
+			value = part_desc->max_mem_per_cpu & (~MEM_PER_CPU);
+		} else {
+			key = "MaxMemPerNode";
+			value = part_desc->max_mem_per_cpu;
+		}
+		info("update_part: setting %s to %u for partition %s",
+		     key, value, part_desc->name);
+		part_ptr->max_mem_per_cpu = part_desc->max_mem_per_cpu;
+	}
 
 	if (part_desc->nodes != NULL) {
 		char *backup_node_list = part_ptr->nodes;
@@ -1372,7 +1471,7 @@ void load_part_uid_allow_list(int force)
 
 
 /*
- * _get_groups_members - indentify the users in a list of group names
+ * _get_groups_members - identify the users in a list of group names
  * IN group_names - a comma delimited list of group names
  * RET a zero terminated list of its UIDs or NULL on error
  * NOTE: User root has implicitly access to every group
@@ -1465,6 +1564,9 @@ extern int delete_partition(delete_part_msg_t *part_desc_ptr)
 	if (part_ptr == NULL)	/* No such partition */
 		return ESLURM_INVALID_PARTITION_NAME;
 
+	if (partition_in_use(part_desc_ptr->name))
+		return ESLURM_PARTITION_IN_USE;
+
 	if (default_part_loc == part_ptr) {
 		error("Deleting default partition %s", part_ptr->name);
 		default_part_loc = NULL;
diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c
index 57057e50b..6d7430556 100644
--- a/src/slurmctld/ping_nodes.c
+++ b/src/slurmctld/ping_nodes.c
@@ -2,13 +2,13 @@
  *  ping_nodes.c - ping the slurmd daemons to test if they respond
  *****************************************************************************
  *  Copyright (C) 2003-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,6 +51,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/read_config.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/ping_nodes.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -117,6 +118,7 @@ void ping_end (void)
  */
 void ping_nodes (void)
 {
+	static bool restart_flag = true;	/* system just restarted */
 	static int offset = 0;	/* mutex via node table write lock on entry */
 	static int max_reg_threads = 0;	/* max node registration threads
 					 * this can include DOWN nodes, so
@@ -125,12 +127,15 @@ void ping_nodes (void)
 	int i;
 	time_t now, still_live_time, node_dead_time;
 	static time_t last_ping_time = (time_t) 0;
-	bool restart_flag;
 	hostlist_t down_hostlist = NULL;
 	char *host_str = NULL;
 	agent_arg_t *ping_agent_args = NULL;
 	agent_arg_t *reg_agent_args = NULL;
-	struct node_record *node_ptr;
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr = NULL;
+#else
+	struct node_record *node_ptr = NULL;
+#endif
 
 	now = time (NULL);
 
@@ -153,12 +158,12 @@ void ping_nodes (void)
 	 * Because of this, we extend the SlurmdTimeout by the
 	 * time needed to complete a ping of all nodes.
 	 */
-	if ((slurmctld_conf.slurmd_timeout == 0)
-	||  (last_ping_time == (time_t) 0)) {
+	if ((slurmctld_conf.slurmd_timeout == 0) ||
+	    (last_ping_time == (time_t) 0)) {
 		node_dead_time = (time_t) 0;
 	} else {
 		node_dead_time = last_ping_time -
-				slurmctld_conf.slurmd_timeout;
+				 slurmctld_conf.slurmd_timeout;
 	}
 	still_live_time = now - (slurmctld_conf.slurmd_timeout / 3);
 	last_ping_time  = now;
@@ -171,11 +176,71 @@ void ping_nodes (void)
 	    (offset >= (max_reg_threads * MAX_REG_FREQUENCY)))
 		offset = 0;
 
+#ifdef HAVE_FRONT_END
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		if ((slurmctld_conf.slurmd_timeout == 0)	&&
+		    (!restart_flag)				&&
+		    (!IS_NODE_UNKNOWN(front_end_ptr))		&&
+		    (!IS_NODE_NO_RESPOND(front_end_ptr)))
+			continue;
+
+		if ((front_end_ptr->last_response != (time_t) 0)     &&
+		    (front_end_ptr->last_response <= node_dead_time) &&
+		    (!IS_NODE_DOWN(front_end_ptr))) {
+			if (down_hostlist)
+				(void) hostlist_push_host(down_hostlist,
+					front_end_ptr->name);
+			else {
+				down_hostlist =
+					hostlist_create(front_end_ptr->name);
+				if (down_hostlist == NULL)
+					fatal("hostlist_create: malloc error");
+			}
+			set_front_end_down(front_end_ptr, "Not responding");
+			front_end_ptr->not_responding = false;
+			continue;
+		}
+
+		if (restart_flag) {
+			front_end_ptr->last_response =
+				slurmctld_conf.last_update;
+		}
+
+		/* Request a node registration if its state is UNKNOWN or
+		 * on a periodic basis (about every MAX_REG_FREQUENCY ping,
+		 * this mechanism avoids an additional (per node) timer or
+		 * counter and gets updated configuration information
+		 * once in a while). We limit these requests since they
+		 * can generate a flood of incoming RPCs. */
+		if (IS_NODE_UNKNOWN(front_end_ptr) || restart_flag ||
+		    ((i >= offset) && (i < (offset + max_reg_threads)))) {
+			hostlist_push(reg_agent_args->hostlist,
+				      front_end_ptr->name);
+			reg_agent_args->node_count++;
+			continue;
+		}
+
+		if ((!IS_NODE_NO_RESPOND(front_end_ptr)) &&
+		    (front_end_ptr->last_response >= still_live_time))
+			continue;
+
+		/* Do not keep pinging down nodes since this can induce
+		 * huge delays in hierarchical communication fail-over */
+		if (IS_NODE_NO_RESPOND(front_end_ptr) &&
+		    IS_NODE_DOWN(front_end_ptr))
+			continue;
+
+		hostlist_push(ping_agent_args->hostlist, front_end_ptr->name);
+		ping_agent_args->node_count++;
+	}
+#else
 	for (i=0, node_ptr=node_record_table_ptr;
 	     i<node_record_count; i++, node_ptr++) {
 		if (IS_NODE_FUTURE(node_ptr) || IS_NODE_POWER_SAVE(node_ptr))
 			continue;
 		if ((slurmctld_conf.slurmd_timeout == 0) &&
+		    (!restart_flag)			 &&
 		    (!IS_NODE_UNKNOWN(node_ptr))         &&
 		    (!IS_NODE_NO_RESPOND(node_ptr)))
 			continue;
@@ -186,24 +251,19 @@ void ping_nodes (void)
 			if (down_hostlist)
 				(void) hostlist_push_host(down_hostlist,
 					node_ptr->name);
-			else
+			else {
 				down_hostlist =
 					hostlist_create(node_ptr->name);
-			set_node_down(node_ptr->name, "Not responding");
+				if (down_hostlist == NULL)
+					fatal("hostlist_create: malloc error");
+			}
+			set_node_down_ptr(node_ptr, "Not responding");
 			node_ptr->not_responding = false;  /* logged below */
 			continue;
 		}
 
-		if (node_ptr->last_response == (time_t) 0) {
-			restart_flag = true;	/* system just restarted */
+		if (restart_flag)
 			node_ptr->last_response = slurmctld_conf.last_update;
-		} else
-			restart_flag = false;
-
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		if (i > 0)
-			continue;
-#endif
 
 		/* Request a node registration if its state is UNKNOWN or
 		 * on a periodic basis (about every MAX_REG_FREQUENCY ping,
@@ -231,7 +291,9 @@ void ping_nodes (void)
 		hostlist_push(ping_agent_args->hostlist, node_ptr->name);
 		ping_agent_args->node_count++;
 	}
+#endif
 
+	restart_flag = false;
 	if (ping_agent_args->node_count == 0) {
 		hostlist_destroy(ping_agent_args->hostlist);
 		xfree (ping_agent_args);
@@ -271,29 +333,40 @@ void ping_nodes (void)
 /* Spawn health check function for every node that is not DOWN */
 extern void run_health_check(void)
 {
+#ifdef HAVE_FRONT_END
+	front_end_record_t *front_end_ptr;
+#else
+	struct node_record *node_ptr;
+#endif
 	int i;
 	char *host_str = NULL;
 	agent_arg_t *check_agent_args = NULL;
-	struct node_record *node_ptr;
 
 	check_agent_args = xmalloc (sizeof (agent_arg_t));
 	check_agent_args->msg_type = REQUEST_HEALTH_CHECK;
 	check_agent_args->retry = 0;
 	check_agent_args->hostlist = hostlist_create("");
+	if (check_agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 
+#ifdef HAVE_FRONT_END
+	for (i = 0, front_end_ptr = front_end_nodes;
+	     i < front_end_node_cnt; i++, front_end_ptr++) {
+		if (IS_NODE_NO_RESPOND(front_end_ptr))
+			continue;
+		hostlist_push(check_agent_args->hostlist, front_end_ptr->name);
+		check_agent_args->node_count++;
+	}
+#else
 	for (i=0, node_ptr=node_record_table_ptr;
 	     i<node_record_count; i++, node_ptr++) {
-		if (IS_NODE_NO_RESPOND(node_ptr) || IS_NODE_FUTURE(node_ptr))
+		if (IS_NODE_NO_RESPOND(node_ptr) || IS_NODE_FUTURE(node_ptr) ||
+		    IS_NODE_POWER_SAVE(node_ptr))
 			continue;
-
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		if (i > 0)
-			continue;
-#endif
-
 		hostlist_push(check_agent_args->hostlist, node_ptr->name);
 		check_agent_args->node_count++;
 	}
+#endif
 
 	if (check_agent_args->node_count == 0) {
 		hostlist_destroy(check_agent_args->hostlist);
diff --git a/src/slurmctld/ping_nodes.h b/src/slurmctld/ping_nodes.h
index 53d321ca9..3fa9d62be 100644
--- a/src/slurmctld/ping_nodes.h
+++ b/src/slurmctld/ping_nodes.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/port_mgr.c b/src/slurmctld/port_mgr.c
index d72f22952..403a5085e 100644
--- a/src/slurmctld/port_mgr.c
+++ b/src/slurmctld/port_mgr.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/port_mgr.h b/src/slurmctld/port_mgr.h
index 7f3f1bd56..acd44e106 100644
--- a/src/slurmctld/port_mgr.h
+++ b/src/slurmctld/port_mgr.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/power_save.c b/src/slurmctld/power_save.c
index e8a3df033..755d3dfc5 100644
--- a/src/slurmctld/power_save.c
+++ b/src/slurmctld/power_save.c
@@ -13,7 +13,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/preempt.c b/src/slurmctld/preempt.c
index fb8c99732..2b536bbfe 100644
--- a/src/slurmctld/preempt.c
+++ b/src/slurmctld/preempt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -189,6 +189,59 @@ static int _slurm_preempt_context_destroy(slurm_preempt_context_t *c)
 	return SLURM_SUCCESS;
 }
 
+/* *********************************************************************** */
+/*  TAG(                    _preempt_signal                             )  */
+/* *********************************************************************** */
+static void _preempt_signal(struct job_record *job_ptr, uint32_t grace_time)
+{
+	/* allow the job termination mechanism to signal the job */
+
+	job_ptr->preempt_time = time(NULL);
+	job_ptr->end_time = job_ptr->preempt_time + (time_t)grace_time;
+}
+/* *********************************************************************** */
+/*  TAG(                    slurm_job_check_grace                       )  */
+/* *********************************************************************** */
+extern int slurm_job_check_grace(struct job_record *job_ptr)
+{
+	/* Preempt modes: -1 (unset), 0 (none), 1 (partition), 2 (QOS) */
+	static int preempt_mode = 0;
+	static time_t last_update_time = 0;
+	int rc = SLURM_SUCCESS;
+	uint32_t grace_time = 0;
+
+	if (job_ptr->preempt_time) {
+		if (time(NULL) >= job_ptr->end_time)
+			rc = SLURM_ERROR;
+		return rc;
+	}
+
+	if (last_update_time != slurmctld_conf.last_update) {
+		char *preempt_type = slurm_get_preempt_type();
+		if ((strcmp(preempt_type, "preempt/partition_prio") == 0))
+			preempt_mode = 1;
+		else if ((strcmp(preempt_type, "preempt/qos") == 0))
+			preempt_mode = 2;
+		else
+			preempt_mode = 0;
+		xfree(preempt_type);
+	}
+
+	if (preempt_mode == 1)
+		grace_time = job_ptr->part_ptr->grace_time;
+	else if (preempt_mode == 2) {
+		slurmdb_qos_rec_t *qos_ptr = (slurmdb_qos_rec_t *)
+					     job_ptr->qos_ptr;
+		grace_time = qos_ptr->grace_time;
+	}
+
+	if (grace_time)
+		_preempt_signal(job_ptr, grace_time);
+	else
+		rc = SLURM_ERROR;
+
+	return rc;
+}
 
 /* *********************************************************************** */
 /*  TAG(                    slurm_preempt_init                        )  */
diff --git a/src/slurmctld/preempt.h b/src/slurmctld/preempt.h
index b36869d95..d1aacb473 100644
--- a/src/slurmctld/preempt.h
+++ b/src/slurmctld/preempt.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,8 +40,8 @@
 #ifndef __SLURM_CONTROLLER_PREEMPT_H__
 #define __SLURM_CONTROLLER_PREEMPT_H__
 
-#include <slurm/slurm.h>
-#include <src/slurmctld/slurmctld.h>
+#include "slurm/slurm.h"
+#include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/job_scheduler.h"
 
 /*
@@ -89,4 +89,8 @@ extern bool slurm_preemption_enabled(void);
 extern bool slurm_job_preempt_check(job_queue_rec_t *preemptor,
 				    job_queue_rec_t *preemptee);
 
+
+/* Returns a SLURM errno if preempt grace isn't allowed */
+extern int slurm_job_check_grace(struct job_record *job_ptr);
+
 #endif /*__SLURM_CONTROLLER_PREEMPT_H__*/
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index cbc4215b3..8ab0ace34 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -2,13 +2,13 @@
  *  proc_req.c - process incoming messages to slurmctld
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,13 +51,14 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/checkpoint.h"
 #include "src/common/daemonize.h"
 #include "src/common/fd.h"
 #include "src/common/forward.h"
+#include "src/common/gres.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
@@ -73,6 +74,8 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/front_end.h"
+#include "src/slurmctld/gang.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/proc_req.h"
@@ -84,7 +87,7 @@
 #include "src/slurmctld/state_save.h"
 #include "src/slurmctld/trigger_mgr.h"
 
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 static void         _fill_ctld_conf(slurm_ctl_conf_t * build_ptr);
 static void         _kill_job_on_msg_fail(uint32_t job_id);
@@ -93,8 +96,9 @@ static int 	    _launch_batch_step(job_desc_msg_t *job_desc_msg,
 static int          _make_step_cred(struct step_record *step_rec,
 				    slurm_cred_t **slurm_cred);
 
-inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg);
 inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg);
+inline static void  _slurm_rpc_accounting_register_ctld(slurm_msg_t *msg);
+inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg);
 inline static void  _slurm_rpc_allocate_resources(slurm_msg_t * msg);
 inline static void  _slurm_rpc_checkpoint(slurm_msg_t * msg);
 inline static void  _slurm_rpc_checkpoint_comp(slurm_msg_t * msg);
@@ -103,15 +107,16 @@ inline static void  _slurm_rpc_delete_partition(slurm_msg_t * msg);
 inline static void  _slurm_rpc_complete_job_allocation(slurm_msg_t * msg);
 inline static void  _slurm_rpc_complete_batch_script(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_conf(slurm_msg_t * msg);
+inline static void  _slurm_rpc_dump_front_end(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_jobs(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_job_single(slurm_msg_t * msg);
-inline static void  _slurm_rpc_get_shares(slurm_msg_t *msg);
-inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg);
-inline static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg);
 inline static void  _slurm_rpc_dump_nodes(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_partitions(slurm_msg_t * msg);
 inline static void  _slurm_rpc_end_time(slurm_msg_t * msg);
 inline static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg);
+inline static void  _slurm_rpc_get_shares(slurm_msg_t *msg);
+inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg);
+inline static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg);
 inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_ready(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_sbcast_cred(slurm_msg_t * msg);
@@ -131,6 +136,7 @@ inline static void  _slurm_rpc_resv_delete(slurm_msg_t * msg);
 inline static void  _slurm_rpc_resv_show(slurm_msg_t * msg);
 inline static void  _slurm_rpc_requeue(slurm_msg_t * msg);
 inline static void  _slurm_rpc_takeover(slurm_msg_t * msg);
+inline static void  _slurm_rpc_set_debug_flags(slurm_msg_t *msg);
 inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg);
 inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg);
 inline static void  _slurm_rpc_shutdown_controller(slurm_msg_t * msg);
@@ -145,10 +151,12 @@ inline static void  _slurm_rpc_trigger_clear(slurm_msg_t * msg);
 inline static void  _slurm_rpc_trigger_get(slurm_msg_t * msg);
 inline static void  _slurm_rpc_trigger_set(slurm_msg_t * msg);
 inline static void  _slurm_rpc_trigger_pull(slurm_msg_t * msg);
+inline static void  _slurm_rpc_update_front_end(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_job(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_node(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_partition(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_block(slurm_msg_t * msg);
+inline static void _slurm_rpc_dump_spank(slurm_msg_t * msg);
 
 inline static void  _update_cred_key(void);
 
@@ -196,6 +204,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_end_time(msg);
 		slurm_free_job_alloc_info_msg(msg->data);
 		break;
+	case REQUEST_FRONT_END_INFO:
+		_slurm_rpc_dump_front_end(msg);
+		slurm_free_front_end_info_request_msg(msg->data);
+		break;
 	case REQUEST_NODE_INFO:
 		_slurm_rpc_dump_nodes(msg);
 		slurm_free_node_info_request_msg(msg->data);
@@ -276,6 +288,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_submit_batch_job(msg);
 		slurm_free_job_desc_msg(msg->data);
 		break;
+	case REQUEST_UPDATE_FRONT_END:
+		_slurm_rpc_update_front_end(msg);
+		slurm_free_update_front_end_msg(msg->data);
+		break;
 	case REQUEST_UPDATE_JOB:
 		_slurm_rpc_update_job(msg);
 		slurm_free_job_desc_msg(msg->data);
@@ -378,6 +394,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_job_notify(msg);
 		slurm_free_job_notify_msg(msg->data);
 		break;
+	case REQUEST_SET_DEBUG_FLAGS:
+		_slurm_rpc_set_debug_flags(msg);
+		slurm_free_set_debug_flags_msg(msg->data);
+		break;
 	case REQUEST_SET_DEBUG_LEVEL:
 		_slurm_rpc_set_debug_level(msg);
 		slurm_free_set_debug_level_msg(msg->data);
@@ -394,10 +414,18 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_accounting_first_reg(msg);
 		/* No body to free */
 		break;
+	case ACCOUNTING_REGISTER_CTLD:
+		_slurm_rpc_accounting_register_ctld(msg);
+		/* No body to free */
+		break;
 	case REQUEST_TOPO_INFO:
 		_slurm_rpc_get_topo(msg);
 		/* No body to free */
 		break;
+	case REQUEST_SPANK_ENVIRONMENT:
+		_slurm_rpc_dump_spank(msg);
+		slurm_free_spank_env_request_msg(msg->data);
+		break;
 	default:
 		error("invalid RPC msg_type=%d", msg->msg_type);
 		slurm_send_rc_msg(msg, EINVAL);
@@ -429,6 +457,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->accounting_storage_user =
 		xstrdup(conf->accounting_storage_user);
 	conf_ptr->accounting_storage_port = conf->accounting_storage_port;
+	conf_ptr->acctng_store_job_comment = conf->acctng_store_job_comment;
 	conf_ptr->authtype            = xstrdup(conf->authtype);
 
 	conf_ptr->backup_addr         = xstrdup(conf->backup_addr);
@@ -491,7 +520,9 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 
 	conf_ptr->mail_prog           = xstrdup(conf->mail_prog);
 	conf_ptr->max_job_cnt         = conf->max_job_cnt;
+	conf_ptr->max_job_id          = conf->max_job_id;
 	conf_ptr->max_mem_per_cpu     = conf->max_mem_per_cpu;
+	conf_ptr->max_step_cnt        = conf->max_step_cnt;
 	conf_ptr->max_tasks_per_node  = conf->max_tasks_per_node;
 	conf_ptr->min_job_age         = conf->min_job_age;
 	conf_ptr->mpi_default         = xstrdup(conf->mpi_default);
@@ -689,7 +720,8 @@ static int _make_step_cred(struct step_record *step_ptr,
 
 	cred_arg.step_core_bitmap = step_ptr->core_bitmap_job;
 #ifdef HAVE_FRONT_END
-	cred_arg.step_hostlist   = node_record_table_ptr[0].name;
+	xassert(job_ptr->batch_host);
+	cred_arg.step_hostlist   = job_ptr->batch_host;
 #else
 	cred_arg.step_hostlist   = step_ptr->step_layout->node_list;
 #endif
@@ -746,6 +778,18 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		error("REQUEST_RESOURCE_ALLOCATE lacks alloc_node from uid=%d",
 		      uid);
 	}
+#if HAVE_CRAY
+	/*
+	 * Catch attempts to nest salloc sessions. It is not possible to use an
+	 * ALPS session which has the same alloc_sid, it fails even if PAGG
+	 * container IDs are used.
+	 */
+	if (allocated_session_in_use(job_desc_msg)) {
+		error_code = ESLURM_RESERVATION_BUSY;
+		error("attempt to nest ALPS allocation on %s:%d by uid=%d",
+			job_desc_msg->alloc_node, job_desc_msg->alloc_sid, uid);
+	}
+#endif
 	slurm_get_peer_addr(msg->conn_fd, &resp_addr);
 	job_desc_msg->resp_host = xmalloc(16);
 	slurm_get_ip_str(&resp_addr, &port, job_desc_msg->resp_host, 16);
@@ -805,6 +849,12 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		alloc_msg.node_list      = xstrdup(job_ptr->nodes);
 		alloc_msg.select_jobinfo =
 			select_g_select_jobinfo_copy(job_ptr->select_jobinfo);
+		if (job_ptr->details) {
+			alloc_msg.pn_min_memory = job_ptr->details->
+						  pn_min_memory;
+		} else {
+			alloc_msg.pn_min_memory = 0;
+		}
 		unlock_slurmctld(job_write_lock);
 
 		slurm_msg_t_init(&response_msg);
@@ -885,12 +935,12 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_JOB_INFO from uid=%d", uid);
+	debug3("Processing RPC: REQUEST_JOB_INFO from uid=%d", uid);
 	lock_slurmctld(job_read_lock);
 
 	if ((job_info_request_msg->last_update - 1) >= last_job_update) {
 		unlock_slurmctld(job_read_lock);
-		debug2("_slurm_rpc_dump_jobs, no change");
+		debug3("_slurm_rpc_dump_jobs, no change");
 		slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA);
 	} else {
 		pack_all_jobs(&dump, &dump_size,
@@ -931,7 +981,7 @@ static void _slurm_rpc_dump_job_single(slurm_msg_t * msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_JOB_INFO_SINGLE from uid=%d", uid);
+	debug3("Processing RPC: REQUEST_JOB_INFO_SINGLE from uid=%d", uid);
 	lock_slurmctld(job_read_lock);
 
 	rc = pack_one_job(&dump, &dump_size, job_id_msg->job_id,
@@ -1049,6 +1099,51 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg)
 	       time_req_msg->job_id, TIME_STR);
 }
 
+/* _slurm_rpc_dump_front_end - process RPC for front_end state information */
+static void _slurm_rpc_dump_front_end(slurm_msg_t * msg)
+{
+	DEF_TIMERS;
+	char *dump = NULL;
+	int dump_size = 0;
+	slurm_msg_t response_msg;
+	front_end_info_request_msg_t *front_end_req_msg =
+		(front_end_info_request_msg_t *) msg->data;
+	/* Locks: Read config, read node */
+	slurmctld_lock_t node_read_lock = {
+		READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug3("Processing RPC: REQUEST_FRONT_END_INFO from uid=%d", uid);
+	lock_slurmctld(node_read_lock);
+
+	if ((front_end_req_msg->last_update - 1) >= last_front_end_update) {
+		unlock_slurmctld(node_read_lock);
+		debug3("_slurm_rpc_dump_front_end, no change");
+		slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA);
+	} else {
+		pack_all_front_end(&dump, &dump_size, uid,
+				   msg->protocol_version);
+		unlock_slurmctld(node_read_lock);
+		END_TIMER2("_slurm_rpc_dump_front_end");
+		debug2("_slurm_rpc_dump_front_end, size=%d %s",
+		       dump_size, TIME_STR);
+
+		/* init response_msg structure */
+		slurm_msg_t_init(&response_msg);
+		response_msg.flags = msg->flags;
+		response_msg.protocol_version = msg->protocol_version;
+		response_msg.address = msg->address;
+		response_msg.msg_type = RESPONSE_FRONT_END_INFO;
+		response_msg.data = dump;
+		response_msg.data_size = dump_size;
+
+		/* send message */
+		slurm_send_node_msg(msg->conn_fd, &response_msg);
+		xfree(dump);
+	}
+}
+
 /* _slurm_rpc_dump_nodes - process RPC for node state information */
 static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 {
@@ -1058,18 +1153,19 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 	slurm_msg_t response_msg;
 	node_info_request_msg_t *node_req_msg =
 		(node_info_request_msg_t *) msg->data;
-	/* Locks: Read config, read node, write node (for hiding) */
-	slurmctld_lock_t node_read_lock = {
-		READ_LOCK, NO_LOCK, READ_LOCK, WRITE_LOCK };
+	/* Locks: Read config, write node (reset allocated CPU count in some
+	 * select plugins) */
+	slurmctld_lock_t node_write_lock = {
+		READ_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_NODE_INFO from uid=%d", uid);
-	lock_slurmctld(node_read_lock);
+	debug3("Processing RPC: REQUEST_NODE_INFO from uid=%d", uid);
+	lock_slurmctld(node_write_lock);
 
-	if ((slurmctld_conf.private_data & PRIVATE_DATA_NODES)
-	    &&  (!validate_operator(uid))) {
-		unlock_slurmctld(node_read_lock);
+	if ((slurmctld_conf.private_data & PRIVATE_DATA_NODES) &&
+	    (!validate_operator(uid))) {
+		unlock_slurmctld(node_write_lock);
 		error("Security violation, REQUEST_NODE_INFO RPC from uid=%d",
 		      uid);
 		slurm_send_rc_msg(msg, ESLURM_ACCESS_DENIED);
@@ -1079,16 +1175,16 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 	select_g_select_nodeinfo_set_all(node_req_msg->last_update - 1);
 
 	if ((node_req_msg->last_update - 1) >= last_node_update) {
-		unlock_slurmctld(node_read_lock);
-		debug2("_slurm_rpc_dump_nodes, no change");
+		unlock_slurmctld(node_write_lock);
+		debug3("_slurm_rpc_dump_nodes, no change");
 		slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA);
 	} else {
 
 		pack_all_node(&dump, &dump_size, node_req_msg->show_flags,
 			      uid, msg->protocol_version);
-		unlock_slurmctld(node_read_lock);
+		unlock_slurmctld(node_write_lock);
 		END_TIMER2("_slurm_rpc_dump_nodes");
-		debug2("_slurm_rpc_dump_nodes, size=%d %s",
+		debug3("_slurm_rpc_dump_nodes, size=%d %s",
 		       dump_size, TIME_STR);
 
 		/* init response_msg structure */
@@ -1229,7 +1325,8 @@ static void _slurm_rpc_job_step_kill(slurm_msg_t * msg)
 		/* NOTE: SLURM_BATCH_SCRIPT == NO_VAL */
 		error_code = job_signal(job_step_kill_msg->job_id,
 					job_step_kill_msg->signal,
-					job_step_kill_msg->batch_flag, uid);
+					job_step_kill_msg->batch_flag, uid,
+					false);
 		unlock_slurmctld(job_write_lock);
 		END_TIMER2("_slurm_rpc_job_step_kill");
 
@@ -1353,6 +1450,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	bool job_requeue = false;
 	bool dump_job = false, dump_node = false;
 	struct job_record *job_ptr = NULL;
+	char *msg_title = "node(s)";
 	char *nodes = comp_msg->node_name;
 #ifdef HAVE_BG
 	update_block_msg_t block_desc;
@@ -1376,15 +1474,15 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	lock_slurmctld(job_write_lock);
 
 	/* Send batch step info to accounting */
-	if (association_based_accounting
-	    && (job_ptr = find_job_record(comp_msg->job_id))) {
+	if (association_based_accounting &&
+	    (job_ptr = find_job_record(comp_msg->job_id))) {
 		struct step_record batch_step;
 		memset(&batch_step, 0, sizeof(struct step_record));
 		batch_step.job_ptr = job_ptr;
 		batch_step.step_id = SLURM_BATCH_SCRIPT;
 		batch_step.jobacct = comp_msg->jobacct;
 		batch_step.exit_code = comp_msg->job_rc;
-#ifdef HAVE_BG
+#ifdef HAVE_FRONT_END
 		nodes = job_ptr->nodes;
 #endif
 		batch_step.gres = nodes;
@@ -1393,47 +1491,78 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		batch_step.requid = -1;
 		batch_step.start_time = job_ptr->start_time;
 		batch_step.name = "batch";
+		batch_step.select_jobinfo = job_ptr->select_jobinfo;
+
 		jobacct_storage_g_step_start(acct_db_conn, &batch_step);
 		jobacct_storage_g_step_complete(acct_db_conn, &batch_step);
 		FREE_NULL_BITMAP(batch_step.step_node_bitmap);
-	} else if (!association_based_accounting) {
-#ifdef HAVE_BG
-		job_ptr = find_job_record(comp_msg->job_id);
+	}
 
-		if (job_ptr)
-			nodes = job_ptr->nodes;
+#ifdef HAVE_FRONT_END
+	if (!job_ptr)
+		job_ptr = find_job_record(comp_msg->job_id);
+	if (job_ptr && job_ptr->front_end_ptr)
+		nodes = job_ptr->front_end_ptr->name;
+	msg_title = "front_end";
 #endif
-	}
 
 	/* do RPC call */
 	/* First set node DOWN if fatal error */
-	if (comp_msg->slurm_rc == ESLURM_ALREADY_DONE) {
+	if ((comp_msg->slurm_rc == ESLURM_ALREADY_DONE) ||
+	    (comp_msg->slurm_rc == ESLURMD_CREDENTIAL_REVOKED)) {
 		/* race condition on job termination, not a real error */
-		info("slurmd error running JobId=%u from node(s)=%s: %s",
+		info("slurmd error running JobId=%u from %s=%s: %s",
 		     comp_msg->job_id,
-		     nodes,
+		     msg_title, nodes,
 		     slurm_strerror(comp_msg->slurm_rc));
 		comp_msg->slurm_rc = SLURM_SUCCESS;
-	}
-
+#ifdef HAVE_CRAY
+	} else if (comp_msg->slurm_rc == ESLURM_RESERVATION_NOT_USABLE) {
+		/*
+		 * Confirmation of ALPS reservation failed.
+		 *
+		 * This is non-fatal, it may be a transient error (e.g. ALPS
+		 * temporary unavailable). Give job one more chance to run.
+		 */
+		error("ALPS reservation for JobId %u failed: %s",
+			comp_msg->job_id, slurm_strerror(comp_msg->slurm_rc));
+		dump_job = job_requeue = true;
+#endif
 	/* Handle non-fatal errors here */
-	if (comp_msg->slurm_rc == SLURM_COMMUNICATIONS_SEND_ERROR
-	    || comp_msg->slurm_rc == ESLURMD_CREDENTIAL_REVOKED
-	    || comp_msg->slurm_rc == ESLURM_USER_ID_MISSING) {
-		error("slurmd error %u running JobId=%u on node(s)=%s: %s",
-		      comp_msg->slurm_rc,
-		      comp_msg->job_id,
-		      nodes,
+	} else if ((comp_msg->slurm_rc == SLURM_COMMUNICATIONS_SEND_ERROR) ||
+	           (comp_msg->slurm_rc == ESLURM_USER_ID_MISSING) ||
+		   (comp_msg->slurm_rc == ESLURMD_UID_NOT_FOUND) ||
+		   (comp_msg->slurm_rc == ESLURMD_GID_NOT_FOUND)) {
+		error("Slurmd error running JobId=%u on %s=%s: %s",
+		      comp_msg->job_id, msg_title, nodes,
 		      slurm_strerror(comp_msg->slurm_rc));
 	} else if (comp_msg->slurm_rc != SLURM_SUCCESS) {
-		error("Fatal slurmd error %u running JobId=%u "
-		      "on node(s)=%s: %s",
-		      comp_msg->slurm_rc,
+		error("slurmd error running JobId=%u on %s=%s: %s",
 		      comp_msg->job_id,
-		      nodes,
+		      msg_title, nodes,
 		      slurm_strerror(comp_msg->slurm_rc));
 		if (error_code == SLURM_SUCCESS) {
-#ifndef HAVE_BG
+#ifdef HAVE_BG
+			if (job_ptr) {
+				select_g_select_jobinfo_get(
+					job_ptr->select_jobinfo,
+					SELECT_JOBDATA_BLOCK_ID,
+					&block_desc.bg_block_id);
+			}
+#else
+#ifdef HAVE_FRONT_END
+			if (job_ptr && job_ptr->front_end_ptr) {
+				update_front_end_msg_t update_node_msg;
+				memset(&update_node_msg, 0,
+				       sizeof(update_front_end_msg_t));
+				update_node_msg.name = job_ptr->front_end_ptr->
+						       name;
+				update_node_msg.node_state = NODE_STATE_DRAIN;
+				update_node_msg.reason =
+					"batch job complete failure";
+				error_code = update_front_end(&update_node_msg);
+			}
+#else
 			update_node_msg_t update_node_msg;
 			memset(&update_node_msg, 0, sizeof(update_node_msg_t));
 			update_node_msg.node_names = comp_msg->node_name;
@@ -1441,14 +1570,8 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 			update_node_msg.reason = "batch job complete failure";
 			update_node_msg.weight = NO_VAL;
 			error_code = update_node(&update_node_msg);
-#else
-			if (job_ptr) {
-				select_g_select_jobinfo_get(
-					job_ptr->select_jobinfo,
-					SELECT_JOBDATA_BLOCK_ID,
-					&block_desc.bg_block_id);
-			}
-#endif
+#endif	/* !HAVE_FRONT_END */
+#endif	/* !HAVE_BG */
 			if (comp_msg->job_rc != SLURM_SUCCESS)
 				job_requeue = true;
 			dump_job = true;
@@ -1465,7 +1588,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 #ifdef HAVE_BG
 	if (block_desc.bg_block_id) {
 		block_desc.reason = slurm_strerror(comp_msg->slurm_rc);
-		block_desc.state = RM_PARTITION_ERROR;
+		block_desc.state = BG_BLOCK_ERROR_FLAG;
 		i = select_g_update_block(&block_desc);
 		error_code = MAX(error_code, i);
 		xfree(block_desc.bg_block_id);
@@ -1494,7 +1617,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		(void) schedule_node_save();	/* Has own locking */
 }
 
-/* _slurm_rpc_job_step_create - process RPC to creates/registers a job step
+/* _slurm_rpc_job_step_create - process RPC to create/register a job step
  *	with the step_mgr */
 static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 {
@@ -1524,7 +1647,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		return;
 	}
 
-#ifdef HAVE_FRONT_END	/* Limited job step support */
+#if defined HAVE_FRONT_END && !defined HAVE_BGQ	/* Limited job step support */
 	/* Non-super users not permitted to run job steps on front-end.
 	 * A single slurmd can not handle a heavy load. */
 	if (!validate_slurm_user(uid)) {
@@ -1545,8 +1668,13 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	/* return result */
 	if (error_code) {
 		unlock_slurmctld(job_write_lock);
-		info("_slurm_rpc_job_step_create: %s",
-		     slurm_strerror(error_code));
+		if (error_code == ESLURM_PROLOG_RUNNING) {
+			debug("_slurm_rpc_job_step_create for job %u: %s",
+			      req_step_msg->job_id, slurm_strerror(error_code));
+		} else {
+			info("_slurm_rpc_job_step_create for job %u: %s",
+			     req_step_msg->job_id, slurm_strerror(error_code));
+		}
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		slurm_step_layout_t *layout = step_rec->step_layout;
@@ -1556,12 +1684,17 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		     req_step_msg->node_list, TIME_STR);
 
 		job_step_resp.job_step_id = step_rec->step_id;
-		job_step_resp.resv_ports  = xstrdup(step_rec->resv_ports);
-		job_step_resp.step_layout = slurm_step_layout_copy(layout);
-
-		job_step_resp.cred        = slurm_cred;
-		job_step_resp.switch_job  = switch_copy_jobinfo(
-			step_rec->switch_job);
+		job_step_resp.resv_ports  = step_rec->resv_ports;
+		job_step_resp.step_layout = layout;
+#ifdef HAVE_FRONT_END
+		if (step_rec->job_ptr->batch_host) {
+			job_step_resp.step_layout->front_end =
+				xstrdup(step_rec->job_ptr->batch_host);
+		}
+#endif
+		job_step_resp.cred           = slurm_cred;
+		job_step_resp.select_jobinfo = step_rec->select_jobinfo;
+		job_step_resp.switch_job     =  step_rec->switch_job;
 
 		unlock_slurmctld(job_write_lock);
 		slurm_msg_t_init(&resp);
@@ -1572,10 +1705,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		resp.data = &job_step_resp;
 
 		slurm_send_node_msg(msg->conn_fd, &resp);
-		xfree(job_step_resp.resv_ports);
-		slurm_step_layout_destroy(job_step_resp.step_layout);
 		slurm_cred_destroy(slurm_cred);
-		switch_free_jobinfo(job_step_resp.switch_job);
 		schedule_job_save();	/* Sets own locks */
 	}
 }
@@ -1595,13 +1725,13 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_JOB_STEP_INFO from uid=%d", uid);
+	debug3("Processing RPC: REQUEST_JOB_STEP_INFO from uid=%d", uid);
 
 	lock_slurmctld(job_read_lock);
 
 	if ((request->last_update - 1) >= last_job_update) {
 		unlock_slurmctld(job_read_lock);
-		debug2("_slurm_rpc_job_step_get_info, no change");
+		debug3("_slurm_rpc_job_step_get_info, no change");
 		error_code = SLURM_NO_CHANGE_IN_DATA;
 	} else {
 		Buf buffer = init_buf(BUF_SIZE);
@@ -1614,13 +1744,13 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg)
 		if (error_code) {
 			/* job_id:step_id not found or otherwise *\
 			   \* error message is printed elsewhere    */
-			debug2("_slurm_rpc_job_step_get_info: %s",
+			debug3("_slurm_rpc_job_step_get_info: %s",
 			       slurm_strerror(error_code));
 			free_buf(buffer);
 		} else {
 			resp_buffer_size = get_buf_offset(buffer);
 			resp_buffer = xfer_buf_data(buffer);
-			debug2("_slurm_rpc_job_step_get_info size=%d %s",
+			debug3("_slurm_rpc_job_step_get_info size=%d %s",
 			       resp_buffer_size, TIME_STR);
 		}
 	}
@@ -1738,9 +1868,9 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 	}
 	if (error_code == SLURM_SUCCESS) {
 		/* do RPC call */
-		if(!(slurm_get_debug_flags() & DEBUG_FLAG_NO_CONF_HASH)
-		   && (node_reg_stat_msg->hash_val != NO_VAL)
-		   && (node_reg_stat_msg->hash_val != slurm_get_hash_val())) {
+		if (!(slurm_get_debug_flags() & DEBUG_FLAG_NO_CONF_HASH) &&
+		    (node_reg_stat_msg->hash_val != NO_VAL) &&
+		    (node_reg_stat_msg->hash_val != slurm_get_hash_val())) {
 			error("Node %s appears to have a different slurm.conf "
 			      "than the slurmctld.  This could cause issues "
 			      "with communication and functionality.  "
@@ -2323,7 +2453,7 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg)
 	}
 
 	step_ptr = find_step_record(job_ptr, req->step_id);
-	if(!step_ptr) {
+	if (!step_ptr) {
 		unlock_slurmctld(job_read_lock);
 		debug2("_slurm_rpc_step_layout: "
 		       "JobId=%u.%u Not Found",
@@ -2332,6 +2462,10 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg)
 		return;
 	}
 	step_layout = slurm_step_layout_copy(step_ptr->step_layout);
+#ifdef HAVE_FRONT_END
+	if (job_ptr->batch_host)
+		step_layout->front_end = xstrdup(job_ptr->batch_host);
+#endif
 	unlock_slurmctld(job_read_lock);
 
 	slurm_msg_t_init(&response_msg);
@@ -2422,7 +2556,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 			job_ptr = NULL;
 
 		if (job_ptr) {	/* Active job allocation */
-#ifdef HAVE_FRONT_END	/* Limited job step support */
+#if defined HAVE_FRONT_END && !defined HAVE_BGQ	/* Limited job step support */
 			/* Non-super users not permitted to run job steps on
 			 * front-end. A single slurmd can not handle a heavy
 			 * load. */
@@ -2537,8 +2671,8 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 
 	/* return result */
 	if (error_code) {
-		error("_slurm_rpc_update_job JobId=%u uid=%d: %s",
-		      job_desc_msg->job_id, uid, slurm_strerror(error_code));
+		info("_slurm_rpc_update_job JobId=%u uid=%d: %s",
+		     job_desc_msg->job_id, uid, slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		info("_slurm_rpc_update_job complete JobId=%u uid=%d %s",
@@ -2603,8 +2737,54 @@ extern int slurm_fail_job(uint32_t job_id)
 	return error_code;
 }
 
-/* _slurm_rpc_update_node - process RPC to update the configuration of a
- *	node (e.g. UP/DOWN) */
+/*
+ * _slurm_rpc_update_front_end - process RPC to update the configuration of a
+ *	front_end node (e.g. UP/DOWN)
+ */
+static void _slurm_rpc_update_front_end(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	DEF_TIMERS;
+	update_front_end_msg_t *update_front_end_msg_ptr =
+		(update_front_end_msg_t *) msg->data;
+	/* Locks: write node */
+	slurmctld_lock_t node_write_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_UPDATE_FRONT_END from uid=%d", uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error("Security violation, UPDATE_FRONT_END RPC from uid=%d",
+		      uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		lock_slurmctld(node_write_lock);
+		error_code = update_front_end(update_front_end_msg_ptr);
+		unlock_slurmctld(node_write_lock);
+		END_TIMER2("_slurm_rpc_update_front_end");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_update_front_end for %s: %s",
+		     update_front_end_msg_ptr->name,
+		     slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		debug2("_slurm_rpc_update_front_end complete for %s %s",
+		       update_front_end_msg_ptr->name, TIME_STR);
+		slurm_send_rc_msg(msg, SLURM_SUCCESS);
+	}
+}
+
+/*
+ * _slurm_rpc_update_node - process RPC to update the configuration of a
+ *	node (e.g. UP/DOWN)
+ */
 static void _slurm_rpc_update_node(slurm_msg_t * msg)
 {
 	int error_code = SLURM_SUCCESS;
@@ -2966,8 +3146,8 @@ static void _slurm_rpc_update_block(slurm_msg_t * msg)
 		error("Security violation, UPDATE_BLOCK RPC from uid=%d", uid);
 		if (block_desc_ptr->bg_block_id) {
 			name = block_desc_ptr->bg_block_id;
-		} else if (block_desc_ptr->nodes) {
-			name = block_desc_ptr->nodes;
+		} else if (block_desc_ptr->mp_str) {
+			name = block_desc_ptr->mp_str;
 		}
 	}
 
@@ -2977,10 +3157,10 @@ static void _slurm_rpc_update_block(slurm_msg_t * msg)
 			error_code = select_g_update_block(block_desc_ptr);
 			END_TIMER2("_slurm_rpc_update_block");
 			name = block_desc_ptr->bg_block_id;
-		} else if (block_desc_ptr->nodes) {
+		} else if (block_desc_ptr->mp_str) {
 			error_code = select_g_update_sub_node(block_desc_ptr);
 			END_TIMER2("_slurm_rpc_update_subbp");
-			name = block_desc_ptr->nodes;
+			name = block_desc_ptr->mp_str;
 		} else {
 			error("Unknown update for blocks");
 			error_code = SLURM_ERROR;
@@ -3156,7 +3336,7 @@ inline static void _slurm_rpc_requeue(slurm_msg_t * msg)
 
 	lock_slurmctld(job_write_lock);
 	error_code = job_requeue(uid, requeue_ptr->job_id,
-				 msg->conn_fd, msg->protocol_version);
+				 msg->conn_fd, msg->protocol_version, false);
 	unlock_slurmctld(job_write_lock);
 	END_TIMER2("_slurm_rpc_requeue");
 
@@ -3454,6 +3634,10 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 	launch_msg_ptr->uid = uid;
 	launch_msg_ptr->nodes = xstrdup(job_ptr->nodes);
 	launch_msg_ptr->restart_cnt = job_ptr->restart_cnt;
+	if (job_ptr->details) {
+		launch_msg_ptr->pn_min_memory = job_ptr->details->
+						pn_min_memory;
+	}
 
 	if (make_batch_job_cred(launch_msg_ptr, job_ptr)) {
 		error("aborting batch step %u.%u", job_ptr->job_id,
@@ -3519,7 +3703,10 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 	agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
 	agent_arg_ptr->node_count = 1;
 	agent_arg_ptr->retry = 0;
-	agent_arg_ptr->hostlist = hostlist_create(node_ptr->name);
+	xassert(job_ptr->batch_host);
+	agent_arg_ptr->hostlist = hostlist_create(job_ptr->batch_host);
+	if (agent_arg_ptr->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	agent_arg_ptr->msg_type = REQUEST_BATCH_JOB_LAUNCH;
 	agent_arg_ptr->msg_args = (void *) launch_msg_ptr;
 
@@ -3654,46 +3841,82 @@ inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg)
 
 inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg)
 {
-	int error_code = SLURM_SUCCESS;
+	int error_code;
 	/* Locks: read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 	job_notify_msg_t * notify_msg = (job_notify_msg_t *) msg->data;
+	struct job_record *job_ptr;
 	DEF_TIMERS;
 
 	START_TIMER;
 	debug("Processing RPC: REQUEST_JOB_NOTIFY from uid=%d", uid);
-	if (!validate_slurm_user(uid)) {
-		error_code = ESLURM_USER_ID_MISSING;
-		error("Security violation, REQUEST_JOB_NOTIFY RPC from uid=%d",
-		      uid);
-	}
 
-	if (error_code == SLURM_SUCCESS) {
-		/* do RPC call */
-		struct job_record *job_ptr;
-		lock_slurmctld(job_read_lock);
-		job_ptr = find_job_record(notify_msg->job_id);
-		if (job_ptr) {
-			error_code = srun_user_message(job_ptr,
-						       notify_msg->message);
-		} else
-			error_code = ESLURM_INVALID_JOB_ID;
-		unlock_slurmctld(job_read_lock);
+	/* do RPC call */
+	lock_slurmctld(job_read_lock);
+	job_ptr = find_job_record(notify_msg->job_id);
+	if (!job_ptr)
+		error_code = ESLURM_INVALID_JOB_ID;
+	else if ((job_ptr->user_id == uid) || validate_slurm_user(uid))
+		error_code = srun_user_message(job_ptr, notify_msg->message);
+	else {
+		error_code = ESLURM_USER_ID_MISSING;
+		error("Security violation, REQUEST_JOB_NOTIFY RPC "
+		      "from uid=%d for jobid %u owner %d",
+		      uid, notify_msg->job_id, job_ptr->user_id);
 	}
+	unlock_slurmctld(job_read_lock);
 
 	END_TIMER2("_slurm_rpc_job_notify");
 	slurm_send_rc_msg(msg, error_code);
 }
 
-/* defined in controller.c */
+inline static void  _slurm_rpc_set_debug_flags(slurm_msg_t *msg)
+{
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	slurmctld_lock_t config_write_lock =
+		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	set_debug_flags_msg_t *request_msg =
+		(set_debug_flags_msg_t *) msg->data;
+	uint32_t debug_flags;
+	char *flag_string;
+
+	debug2("Processing RPC: REQUEST_SET_DEBUG_FLAGS from uid=%d", uid);
+	if (!validate_super_user(uid)) {
+		error("set debug flags request from non-super user uid=%d",
+		      uid);
+		slurm_send_rc_msg(msg, EACCES);
+		return;
+	}
+
+	lock_slurmctld (config_write_lock);
+	debug_flags  = slurm_get_debug_flags();
+	debug_flags &= (~request_msg->debug_flags_minus);
+	debug_flags |= request_msg->debug_flags_plus;
+	slurm_set_debug_flags(debug_flags);
+	slurmctld_conf.last_update = time(NULL);
+
+	/* Reset cached debug_flags values */
+	gs_reconfig();
+	gres_plugin_reconfig(NULL);
+	priority_g_reconfig();
+	select_g_reconfigure();
+	(void) slurm_sched_reconfig();
+
+	unlock_slurmctld (config_write_lock);
+	flag_string = debug_flags2str(debug_flags);
+	info("Set DebugFlags to %s", flag_string);
+	xfree(flag_string);
+	slurm_send_rc_msg(msg, SLURM_SUCCESS);
+}
+
 inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 {
 	int debug_level, old_debug_level;
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	slurmctld_lock_t config_read_lock =
-		{ READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmctld_lock_t config_write_lock =
+		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	set_debug_level_msg_t *request_msg =
 		(set_debug_level_msg_t *) msg->data;
 	log_options_t log_opts = LOG_OPTS_INITIALIZER;
@@ -3712,7 +3935,7 @@ inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 	debug_level = MIN (request_msg->debug_level, (LOG_LEVEL_END - 1));
 	debug_level = MAX (debug_level, LOG_LEVEL_QUIET);
 
-	lock_slurmctld (config_read_lock);
+	lock_slurmctld (config_write_lock);
 	if (slurmctld_config.daemonize) {
 		log_opts.stderr_level = LOG_LEVEL_QUIET;
 		if (slurmctld_conf.slurmctld_logfile) {
@@ -3731,7 +3954,7 @@ inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 			log_opts.logfile_level = LOG_LEVEL_QUIET;
 	}
 	log_alter(log_opts, LOG_DAEMON, slurmctld_conf.slurmctld_logfile);
-	unlock_slurmctld (config_read_lock);
+	unlock_slurmctld (config_write_lock);
 
 	conf = slurm_conf_lock();
 	old_debug_level = conf->slurmctld_debug;
@@ -3763,6 +3986,19 @@ inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg)
 		return;
 	}
 
+	/*
+	 * If slurmctld_conf.sched_logfile is NULL, then this operation
+	 *  will fail, since there is no sched logfile for which to alter
+	 *  the log level. (Calling sched_log_alter with a NULL filename
+	 *  is likely to cause a segfault at the next sched log call)
+	 *  So just give up and return "Operation Disabled"
+	 */
+	if (slurmctld_conf.sched_logfile == NULL) {
+		error("set scheduler log level failed: no log file!");
+		slurm_send_rc_msg (msg, ESLURM_DISABLED);
+		return;
+	}
+
 	schedlog_level = MIN (request_msg->debug_level, (LOG_LEVEL_QUIET + 1));
 	schedlog_level = MAX (schedlog_level, LOG_LEVEL_QUIET);
 
@@ -3830,3 +4066,80 @@ inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg)
 
 	END_TIMER2("_slurm_rpc_accounting_first_reg");
 }
+
+inline static void  _slurm_rpc_accounting_register_ctld(slurm_msg_t *msg)
+{
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	DEF_TIMERS;
+
+	START_TIMER;
+	debug2("Processing RPC: ACCOUNTING_REGISTER_CTLD from uid=%d", uid);
+	if (!validate_slurm_user(uid)
+	    && (assoc_mgr_get_admin_level(acct_db_conn, uid)
+		< SLURMDB_ADMIN_SUPER_USER)) {
+		error("Registration request from non-super user uid=%d",
+		      uid);
+		return;
+	}
+
+	clusteracct_storage_g_register_ctld(acct_db_conn,
+					    slurmctld_conf.slurmctld_port);
+
+	END_TIMER2("_slurm_rpc_accounting_register_ctld");
+}
+
+inline static void _slurm_rpc_dump_spank(slurm_msg_t * msg)
+{
+	int rc = SLURM_SUCCESS;
+	spank_env_request_msg_t *spank_req_msg = (spank_env_request_msg_t *)
+						 msg->data;
+	spank_env_responce_msg_t *spank_resp_msg;
+	/* Locks: read job */
+	slurmctld_lock_t job_read_lock = {
+		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	slurm_msg_t response_msg;
+	DEF_TIMERS;
+
+	START_TIMER;
+	debug("Processing RPC: REQUEST_SPANK_ENVIRONMENT from uid=%d JobId=%u",
+	      uid, spank_req_msg->job_id);
+	if (!validate_slurm_user(uid)) {
+		rc = ESLURM_USER_ID_MISSING;
+		error("Security violation, REQUEST_SPANK_ENVIRONMENT RPC "
+		      "from uid=%d", uid);
+	}
+
+	spank_resp_msg = xmalloc(sizeof(spank_env_responce_msg_t));
+	if (rc == SLURM_SUCCESS) {
+		/* do RPC call */
+		struct job_record *job_ptr;
+		uint32_t i;
+
+		lock_slurmctld(job_read_lock);
+		job_ptr = find_job_record(spank_req_msg->job_id);
+		if (job_ptr) {
+			spank_resp_msg->spank_job_env_size =
+				job_ptr->spank_job_env_size;
+			spank_resp_msg->spank_job_env = xmalloc(
+				spank_resp_msg->spank_job_env_size *
+				sizeof(char *));
+			for (i = 0; i < spank_resp_msg->spank_job_env_size; i++)
+				spank_resp_msg->spank_job_env[i] = xstrdup(
+					job_ptr->spank_job_env[i]);
+		} else
+			rc = ESLURM_INVALID_JOB_ID;
+		unlock_slurmctld(job_read_lock);
+	}
+	END_TIMER2("_slurm_rpc_dump_spank");
+
+	slurm_msg_t_init(&response_msg);
+	response_msg.flags = msg->flags;
+	response_msg.protocol_version = msg->protocol_version;
+	response_msg.address  = msg->address;
+	response_msg.msg_type = RESPONCE_SPANK_ENVIRONMENT;
+	response_msg.data     = spank_resp_msg;
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	slurm_free_spank_env_responce_msg(spank_resp_msg);
+}
diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h
index d2cc6a50f..5e335dba8 100644
--- a/src/slurmctld/proc_req.h
+++ b/src/slurmctld/proc_req.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 7c388adf0..a4442e605 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -73,7 +73,7 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
-#include "src/slurmctld/basil_interface.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/gang.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/job_submit.h"
@@ -120,6 +120,51 @@ static int  _update_preempt(uint16_t old_enable_preempt);
 static void _validate_node_proc_count(void);
 #endif
 
+/*
+ * _reorder_node_record_table - order node table in ascending order of node_rank
+ * This depends on the TopologyPlugin and/or SelectPlugin, which may generate
+ * such a ranking.
+ */
+static void _reorder_node_record_table(void)
+{
+	struct node_record *node_ptr, *node_ptr2;
+	int i, j, min_inx;
+	uint32_t min_val;
+
+	/* Now we need to sort the node records */
+	for (i = 0; i < node_record_count; i++) {
+		min_val = node_record_table_ptr[i].node_rank;
+		min_inx = i;
+		for (j = i + 1; j < node_record_count; j++) {
+			if (node_record_table_ptr[j].node_rank < min_val) {
+				min_val = node_record_table_ptr[j].node_rank;
+				min_inx = j;
+			}
+		}
+
+		if (min_inx != i) {	/* swap records */
+			struct node_record node_record_tmp;
+
+			j = sizeof(struct node_record);
+			node_ptr =  node_record_table_ptr + i;
+			node_ptr2 = node_record_table_ptr + min_inx;
+
+			memcpy(&node_record_tmp, node_ptr, j);
+			memcpy(node_ptr, node_ptr2, j);
+			memcpy(node_ptr2, &node_record_tmp, j);
+		}
+	}
+
+#if _DEBUG
+	/* Log the results */
+	for (i=0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		info("%s: %u", node_ptr->name, node_ptr->node_rank);
+	}
+#endif
+}
+
+
 /*
  * _build_bitmaps_pre_select - recover some state for jobs and nodes prior to
  *	calling the select_* functions
@@ -327,7 +372,7 @@ static int _handle_downnodes_line(slurm_conf_downnodes_t *down)
 	int state_val = NODE_STATE_DOWN;
 
 	if (down->state != NULL) {
-		state_val = state_str2int(down->state);
+		state_val = state_str2int(down->state, down->nodenames);
 		if (state_val == NO_VAL) {
 			error("Invalid State \"%s\"", down->state);
 			goto cleanup;
@@ -399,6 +444,7 @@ static int _build_all_nodeline_info(void)
 
 	/* Load the node table here */
 	rc = build_all_nodeline_info(false);
+	rc = MAX(build_all_frontend_info(false), rc);
 
 	/* Now perform operations on the node table as needed by slurmctld */
 #ifdef HAVE_BG
@@ -490,8 +536,10 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	if (part->root_only_flag)
 		part_ptr->flags |= PART_FLAG_ROOT_ONLY;
 	part_ptr->max_time       = part->max_time;
+	part_ptr->def_mem_per_cpu = part->def_mem_per_cpu;
 	part_ptr->default_time   = part->default_time;
 	part_ptr->max_share      = part->max_share;
+	part_ptr->max_mem_per_cpu = part->max_mem_per_cpu;
 	part_ptr->max_nodes      = part->max_nodes;
 	part_ptr->max_nodes_orig = part->max_nodes;
 	part_ptr->min_nodes      = part->min_nodes;
@@ -499,13 +547,15 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	part_ptr->preempt_mode   = part->preempt_mode;
 	part_ptr->priority       = part->priority;
 	part_ptr->state_up       = part->state_up;
+	part_ptr->grace_time     = part->grace_time;
+
 	if (part->allow_groups) {
 		xfree(part_ptr->allow_groups);
 		part_ptr->allow_groups = xstrdup(part->allow_groups);
 	}
  	if (part->allow_alloc_nodes) {
  		if (part_ptr->allow_alloc_nodes) {
- 			int cnt_tot, cnt_uniq, buf_size;
+ 			int cnt_tot, cnt_uniq;
  			hostlist_t hl = hostlist_create(part_ptr->
 							allow_alloc_nodes);
 
@@ -517,8 +567,6 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
  				fatal("Duplicate Allowed Allocating Nodes for "
 				      "Partition %s", part->name);
  			}
- 			buf_size = strlen(part_ptr->allow_alloc_nodes) + 1 +
-				   strlen(part->allow_alloc_nodes) + 1;
  			xfree(part_ptr->allow_alloc_nodes);
  			part_ptr->allow_alloc_nodes =
 				hostlist_ranged_string_xmalloc(hl);
@@ -534,7 +582,7 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	}
 	if (part->nodes) {
 		if (part_ptr->nodes) {
-			int cnt_tot, cnt_uniq, buf_size;
+			int cnt_tot, cnt_uniq;
 			hostlist_t hl = hostlist_create(part_ptr->nodes);
 
 			hostlist_push(hl, part->nodes);
@@ -545,8 +593,6 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 				fatal("Duplicate Nodes for Partition %s",
 					part->name);
 			}
-			buf_size = strlen(part_ptr->nodes) + 1 +
-				   strlen(part->nodes) + 1;
 			xfree(part_ptr->nodes);
 			part_ptr->nodes = hostlist_ranged_string_xmalloc(hl);
 			hostlist_destroy(hl);
@@ -631,6 +677,7 @@ int read_slurm_conf(int recover, bool reconfig)
 	int error_code, i, rc, load_job_ret = SLURM_SUCCESS;
 	int old_node_record_count = 0;
 	struct node_record *old_node_table_ptr = NULL, *node_ptr;
+	bool do_reorder_nodes = false;
 	List old_part_list = NULL;
 	char *old_def_part_name = NULL;
 	char *old_auth_type       = xstrdup(slurmctld_conf.authtype);
@@ -688,6 +735,8 @@ int read_slurm_conf(int recover, bool reconfig)
 	_build_all_nodeline_info();
 	_handle_all_downnodes();
 	_build_all_partitionline_info();
+	if (!reconfig)
+		restore_front_end_state(recover);
 
 	update_logging();
 	g_slurm_jobcomp_init(slurmctld_conf.job_comp_loc);
@@ -707,6 +756,17 @@ int read_slurm_conf(int recover, bool reconfig)
 		return EINVAL;
 	}
 
+	/*
+	 * Node reordering needs to be done by the topology and/or select
+	 * plugin. Reordering the table must be done before hashing the
+	 * nodes, and before any position-relative bitmaps are created.
+	 */
+	do_reorder_nodes |= slurm_topo_generate_node_ranking();
+	do_reorder_nodes |= select_g_node_ranking(node_record_table_ptr,
+						  node_record_count);
+	if (do_reorder_nodes)
+		_reorder_node_record_table();
+
 	rehash_node();
 	rehash_jobs();
 	set_slurmd_addr();
@@ -727,21 +787,24 @@ int read_slurm_conf(int recover, bool reconfig)
 		load_last_job_id();
 		reset_first_job_id();
 		(void) slurm_sched_reconfig();
-		xfree(state_save_dir);	/* No select plugin state restore */
 	} else if (recover == 0) {	/* Build everything from slurm.conf */
 		load_last_job_id();
 		reset_first_job_id();
 		(void) slurm_sched_reconfig();
-		xfree(state_save_dir);	/* No select plugin state restore */
 	} else if (recover == 1) {	/* Load job & node state files */
 		(void) load_all_node_state(true);
+		(void) load_all_front_end_state(true);
 		load_job_ret = load_all_job_state();
+		sync_job_priorities();
 	} else if (recover > 1) {	/* Load node, part & job state files */
 		(void) load_all_node_state(false);
+		(void) load_all_front_end_state(false);
 		(void) load_all_part_state();
 		load_job_ret = load_all_job_state();
+		sync_job_priorities();
 	}
 
+	sync_front_end_state();
 	_sync_part_prio();
 	_build_bitmaps_pre_select();
 	if ((select_g_node_init(node_record_table_ptr, node_record_count)
@@ -822,10 +885,6 @@ int read_slurm_conf(int recover, bool reconfig)
 	if (load_job_ret)
 		_acct_restore_active_jobs();
 
-#ifdef HAVE_CRAY
-	basil_query();
-#endif
-
 	/* Sync select plugin with synchronized job/node/part data */
 	select_g_reconfigure();
 
@@ -927,6 +986,8 @@ static int _restore_node_state(int recover,
 		node_ptr->tmp_disk      = old_node_ptr->tmp_disk;
 		node_ptr->weight        = old_node_ptr->weight;
 
+		node_ptr->sus_job_cnt   = old_node_ptr->sus_job_cnt;
+
 		if (node_ptr->gres_list)
 			list_destroy(node_ptr->gres_list);
 		node_ptr->gres_list = old_node_ptr->gres_list;
@@ -1108,6 +1169,11 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name)
 				      "slurm.conf", part_ptr->name);
 				part_ptr->max_time = old_part_ptr->max_time;
 			}
+			if (part_ptr->grace_time != old_part_ptr->grace_time) {
+				error("Partition %s GraceTime differs from "
+				      "slurm.conf", part_ptr->name);
+				part_ptr->grace_time = old_part_ptr->grace_time;
+			}
 			if (part_ptr->min_nodes_orig !=
 			    old_part_ptr->min_nodes_orig) {
 				error("Partition %s MinNodes differs from "
@@ -1158,6 +1224,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name)
 						   max_nodes_orig;
 			part_ptr->max_share = old_part_ptr->max_share;
 			part_ptr->max_time = old_part_ptr->max_time;
+			part_ptr->grace_time = old_part_ptr->grace_time;
 			part_ptr->min_nodes = old_part_ptr->min_nodes;
 			part_ptr->min_nodes_orig = old_part_ptr->
 						   min_nodes_orig;
@@ -1362,7 +1429,7 @@ static int _sync_nodes_to_comp_job(void)
 			info("Job %u in completing state", job_ptr->job_id);
 			if (!job_ptr->node_bitmap_cg)
 				build_cg_bitmap(job_ptr);
-			deallocate_nodes(job_ptr, false, false);
+			deallocate_nodes(job_ptr, false, false, false);
 			/* The job in completing state at slurmctld restart or
 			 * reconfiguration, do not log completion again.
 			 * job_completion_logger(job_ptr, false); */
@@ -1414,7 +1481,7 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 			excise_node_from_job(job_ptr, node_ptr);
 			job_post_resize_acctg(job_ptr);
 			accounting_enforce = save_accounting_enforce;
-		} else if (IS_NODE_DOWN(node_ptr)) {
+		} else if (IS_NODE_DOWN(node_ptr) && IS_JOB_RUNNING(job_ptr)) {
 			time_t now = time(NULL);
 			info("Killing job %u on DOWN node %s",
 			     job_ptr->job_id, node_ptr->name);
@@ -1432,6 +1499,10 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 					       node_flags;
 		}
 	}
+
+	if (IS_JOB_RUNNING(job_ptr) && job_ptr->front_end_ptr)
+		job_ptr->front_end_ptr->job_cnt_run++;
+
 	return cnt;
 }
 
@@ -1452,7 +1523,7 @@ static void _sync_nodes_to_suspended_job(struct job_record *job_ptr)
 
 #ifdef 	HAVE_ELAN
 /* Every node in a given partition must have the same processor count
- * at present, this function insure it */
+ * at present, ensured by this function. */
 static void _validate_node_proc_count(void)
 {
 	ListIterator part_iterator;
diff --git a/src/slurmctld/read_config.h b/src/slurmctld/read_config.h
index 41c58ea26..43ea8487e 100644
--- a/src/slurmctld/read_config.h
+++ b/src/slurmctld/read_config.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/reservation.c b/src/slurmctld/reservation.c
index a9b45c991..098cecf30 100644
--- a/src/slurmctld/reservation.c
+++ b/src/slurmctld/reservation.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -49,11 +49,12 @@
 #include <stdlib.h>
 #include <time.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
-#include <slurm/slurm_errno.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
@@ -112,14 +113,12 @@ static List _list_dup(List license_list);
 static int  _open_resv_state_file(char **state_file);
 static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer,
 		       bool internal);
+static bitstr_t *_pick_idle_nodes(bitstr_t *avail_nodes,
+				  resv_desc_msg_t *resv_desc_ptr);
 static int  _post_resv_create(slurmctld_resv_t *resv_ptr);
 static int  _post_resv_delete(slurmctld_resv_t *resv_ptr);
 static int  _post_resv_update(slurmctld_resv_t *resv_ptr,
 			      slurmctld_resv_t *old_resv_ptr);
-static bitstr_t *_pick_idle_nodes(bitstr_t *avail_nodes,
-				  resv_desc_msg_t *resv_desc_ptr);
-static bitstr_t *_pick_idle_nodes2(bitstr_t *avail_nodes,
-				   resv_desc_msg_t *resv_desc_ptr);
 static int  _resize_resv(slurmctld_resv_t *resv_ptr, uint32_t node_cnt);
 static bool _resv_overlap(time_t start_time, time_t end_time,
 			  uint16_t flags, bitstr_t *node_bitmap,
@@ -172,7 +171,7 @@ static List _list_dup(List license_list)
 	if (lic_list == NULL)
 		fatal("list_create malloc failure");
 	iter = list_iterator_create(license_list);
-	if (iter == NULL)
+	if (!iter)
 		fatal("list_interator_create malloc failure");
 	while ((license_src = (licenses_t *) list_next(iter))) {
 		license_dest = xmalloc(sizeof(licenses_t));
@@ -473,20 +472,22 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 		rc = SLURM_ERROR;
 	}
 
-	if(list_count(assoc_list)) {
+	if (list_count(assoc_list)) {
 		ListIterator itr = list_iterator_create(assoc_list);
+		if (!itr)
+			fatal("malloc: list_iterator_create");
 		xfree(resv_ptr->assoc_list);	/* clear for modify */
-		while((assoc_ptr = list_next(itr))) {
-			if(resv_ptr->assoc_list)
+		while ((assoc_ptr = list_next(itr))) {
+			if (resv_ptr->assoc_list) {
 				xstrfmtcat(resv_ptr->assoc_list, "%u,",
 					   assoc_ptr->id);
-			else
+			} else {
 				xstrfmtcat(resv_ptr->assoc_list, ",%u,",
 					   assoc_ptr->id);
+			}
 		}
 		list_iterator_destroy(itr);
 	}
-	//info("list is '%s'", resv_ptr->assoc_list);
 
 end_it:
 	list_destroy(assoc_list);
@@ -1069,6 +1070,8 @@ static bool _job_overlap(time_t start_time, uint16_t flags,
 		return overlap;
 
 	job_iterator = list_iterator_create(job_list);
+	if (!job_iterator)
+		fatal("malloc: list_iterator_create");
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if (IS_JOB_RUNNING(job_ptr)		&&
 		    (job_ptr->end_time > start_time)	&&
@@ -1214,7 +1217,8 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 					RESERVE_FLAG_OVERLAP  |
 					RESERVE_FLAG_IGN_JOBS |
 					RESERVE_FLAG_DAILY    |
-					RESERVE_FLAG_WEEKLY;
+					RESERVE_FLAG_WEEKLY   |
+					RESERVE_FLAG_LIC_ONLY;
 	}
 	if (resv_desc_ptr->partition) {
 		part_ptr = find_part_record(resv_desc_ptr->partition);
@@ -1458,6 +1462,10 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 			resv_ptr->flags |= RESERVE_FLAG_WEEKLY;
 		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_WEEKLY)
 			resv_ptr->flags &= (~RESERVE_FLAG_WEEKLY);
+		if (resv_desc_ptr->flags & RESERVE_FLAG_LIC_ONLY)
+			resv_ptr->flags |= RESERVE_FLAG_LIC_ONLY;
+		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_LIC_ONLY)
+			resv_ptr->flags &= (~RESERVE_FLAG_LIC_ONLY);
 	}
 	if (resv_desc_ptr->partition && (resv_desc_ptr->partition[0] == '\0')){
 		/* Clear the partition */
@@ -1659,9 +1667,10 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 		val2  = resv_ptr->users;
 	} else
 		name2 = val2 = "";
-	info("sched: Updated reservation %s%s%s%s%s nodes=%s start=%s end=%s",
+	info("sched: Updated reservation %s%s%s%s%s nodes=%s licenses=%s "
+	     "start=%s end=%s",
 	     resv_ptr->name, name1, val1, name2, val2,
-	     resv_ptr->node_list, start_time, end_time);
+	     resv_ptr->node_list, resv_ptr->licenses, start_time, end_time);
 
 	_post_resv_update(resv_ptr, resv_backup);
 	_del_resv_rec(resv_backup);
@@ -1683,6 +1692,8 @@ static bool _is_resv_used(slurmctld_resv_t *resv_ptr)
 	bool match = false;
 
 	job_iterator = list_iterator_create(job_list);
+	if (!job_iterator)
+		fatal("malloc: list_iterator_create");
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if ((!IS_JOB_FINISHED(job_ptr)) &&
 		    (job_ptr->resv_id == resv_ptr->resv_id)) {
@@ -1702,6 +1713,8 @@ static void _clear_job_resv(slurmctld_resv_t *resv_ptr)
 	struct job_record *job_ptr;
 
 	job_iterator = list_iterator_create(job_list);
+	if (!job_iterator)
+		fatal("malloc: list_iterator_create");
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if (job_ptr->resv_ptr != resv_ptr)
 			continue;
@@ -2019,6 +2032,8 @@ static void _validate_all_reservations(void)
 
 	/* Validate all job reservation pointers */
 	iter = list_iterator_create(job_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
 	while ((job_ptr = (struct job_record *) list_next(iter))) {
 		if (job_ptr->resv_name == NULL)
 			continue;
@@ -2042,7 +2057,7 @@ static void _validate_all_reservations(void)
 }
 
 /*
- * Validate the the reserved nodes are not DOWN or DRAINED and
+ * Validate that the reserved nodes are not DOWN or DRAINED and
  *	select different nodes as needed.
  */
 static void _validate_node_choice(slurmctld_resv_t *resv_ptr)
@@ -2470,27 +2485,11 @@ static int  _select_nodes(resv_desc_msg_t *resv_desc_ptr,
 		/* Nodes must be available */
 		bit_and(node_bitmap, avail_node_bitmap);
 	}
-	*resv_bitmap = NULL;
-	if (rc != SLURM_SUCCESS)
-		;
-	else if (bit_set_count(node_bitmap) < resv_desc_ptr->node_cnt)
-		verbose("reservation requests more nodes than are available");
-	else if ((i = bit_overlap(node_bitmap, idle_node_bitmap)) >=
-		 resv_desc_ptr->node_cnt) {	/* Reserve idle nodes */
-		bit_and(node_bitmap, idle_node_bitmap);
-		*resv_bitmap = bit_pick_cnt(node_bitmap,
-					    resv_desc_ptr->node_cnt);
-	} else if (resv_desc_ptr->flags & RESERVE_FLAG_IGN_JOBS) {
-		/* Reserve nodes that are idle first, then busy nodes */
-		*resv_bitmap = _pick_idle_nodes2(node_bitmap,
-						 resv_desc_ptr);
-	} else {
-		/* Reserve nodes that are or will be idle.
-		 * This algorithm is slower than above logic that just
-		 * selects from the idle nodes. */
-		*resv_bitmap = _pick_idle_nodes(node_bitmap, resv_desc_ptr);
-	}
 
+	*resv_bitmap = NULL;
+	if (rc == SLURM_SUCCESS)
+		*resv_bitmap = _pick_idle_nodes(node_bitmap,
+						resv_desc_ptr);
 	FREE_NULL_BITMAP(node_bitmap);
 	if (*resv_bitmap == NULL) {
 		if (rc == SLURM_SUCCESS)
@@ -2502,67 +2501,80 @@ static int  _select_nodes(resv_desc_msg_t *resv_desc_ptr,
 	return SLURM_SUCCESS;
 }
 
-/*
- * Select nodes for a reservation to use
- * IN,OUT avail_nodes - nodes to choose from with proper features, partition
- *                      destructively modified by this function
- * IN resv_desc_ptr - reservation request
- * RET bitmap of selected nodes or NULL if request can not be satisfied
- */
-static bitstr_t *_pick_idle_nodes(bitstr_t *avail_nodes,
+static bitstr_t *_pick_idle_nodes(bitstr_t *avail_bitmap,
 				  resv_desc_msg_t *resv_desc_ptr)
 {
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
+	bitstr_t *save_bitmap, *ret_bitmap, *tmp_bitmap;
 
-	job_iterator = list_iterator_create(job_list);
-	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
-		if (!IS_JOB_RUNNING(job_ptr) ||
-		    (job_ptr->end_time < resv_desc_ptr->start_time))
-			continue;
-		bit_not(job_ptr->node_bitmap);
-		bit_and(avail_nodes, job_ptr->node_bitmap);
-		bit_not(job_ptr->node_bitmap);
+	if (bit_set_count(avail_bitmap) < resv_desc_ptr->node_cnt) {
+		verbose("reservation requests more nodes than are available");
+		return NULL;
 	}
-	list_iterator_destroy(job_iterator);
 
-	return bit_pick_cnt(avail_nodes, resv_desc_ptr->node_cnt);
-}
-
-/*
- * Select nodes for a reservation to use
- * IN,OUT avail_nodes - nodes to choose from with proper features, partition
- *                      destructively modified by this function
- * IN resv_desc_ptr - reservation request
- * RET bitmap of selected nodes or NULL if request can not be satisfied
- */
-static bitstr_t *_pick_idle_nodes2(bitstr_t *avail_nodes,
-				   resv_desc_msg_t *resv_desc_ptr)
-{
-	ListIterator job_iterator;
-	struct job_record *job_ptr;
-	bitstr_t *tmp_bitmap;
+	save_bitmap = bit_copy(avail_bitmap);
+	/* First: Try to reserve nodes that are currently IDLE */
+	if (bit_overlap(avail_bitmap, idle_node_bitmap) >=
+	    resv_desc_ptr->node_cnt) {
+		bit_and(avail_bitmap, idle_node_bitmap);
+		ret_bitmap = select_g_resv_test(avail_bitmap,
+						resv_desc_ptr->node_cnt);
+		if (ret_bitmap)
+			goto fini;
+	}
 
+	/* Second: Try to reserve nodes that are will be IDLE */
+	bit_or(avail_bitmap, save_bitmap);	/* restore avail_bitmap */
 	job_iterator = list_iterator_create(job_list);
+	if (job_iterator == NULL)
+		fatal("list_iterator_create: malloc failure");
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
-		if (!IS_JOB_RUNNING(job_ptr) ||
-		    (job_ptr->end_time < resv_desc_ptr->start_time))
+		if (!IS_JOB_RUNNING(job_ptr) && !IS_JOB_SUSPENDED(job_ptr))
+			continue;
+		if (job_ptr->end_time < resv_desc_ptr->start_time)
 			continue;
-		tmp_bitmap = bit_copy(avail_nodes);
-		if (tmp_bitmap == NULL)
-			fatal("malloc failure");
 		bit_not(job_ptr->node_bitmap);
-		bit_and(avail_nodes, job_ptr->node_bitmap);
+		bit_and(avail_bitmap, job_ptr->node_bitmap);
 		bit_not(job_ptr->node_bitmap);
-		if (bit_set_count(avail_nodes) < resv_desc_ptr->node_cnt) {
-			/* Removed too many nodes, put them back */
-			bit_or(avail_nodes, tmp_bitmap);
-		}
-		FREE_NULL_BITMAP(tmp_bitmap);
 	}
 	list_iterator_destroy(job_iterator);
+	ret_bitmap = select_g_resv_test(avail_bitmap, resv_desc_ptr->node_cnt);
+	if (ret_bitmap)
+		goto fini;
+
+	/* Third: Try to reserve nodes that will be allocated to a limited
+	 * number of running jobs. We could sort the jobs by priority, QOS,
+	 * size or other criterion if desired. Right now we just go down
+	 * the unsorted job list. */
+	if (resv_desc_ptr->flags & RESERVE_FLAG_IGN_JOBS) {
+		job_iterator = list_iterator_create(job_list);
+		if (!job_iterator)
+			fatal("list_iterator_create: malloc failure");
+		while ((job_ptr = (struct job_record *)
+			list_next(job_iterator))) {
+			if (!IS_JOB_RUNNING(job_ptr) &&
+			    !IS_JOB_SUSPENDED(job_ptr))
+				continue;
+			if (job_ptr->end_time < resv_desc_ptr->start_time)
+				continue;
+			tmp_bitmap = bit_copy(save_bitmap);
+			bit_and(tmp_bitmap, job_ptr->node_bitmap);
+			if (bit_set_count(tmp_bitmap) > 0) {
+				bit_or(avail_bitmap, tmp_bitmap);
+				ret_bitmap = select_g_resv_test(avail_bitmap,
+								resv_desc_ptr->
+								node_cnt);
+			}
+			FREE_NULL_BITMAP(tmp_bitmap);
+			if (ret_bitmap)
+				break;
+		}
+		list_iterator_destroy(job_iterator);
+	}
 
-	return bit_pick_cnt(avail_nodes, resv_desc_ptr->node_cnt);
+fini:	FREE_NULL_BITMAP(save_bitmap);
+	return ret_bitmap;
 }
 
 /* Determine if a job has access to a reservation
@@ -2658,7 +2670,8 @@ extern int job_test_resv_now(struct job_record *job_ptr)
 		/* reservation ended earlier */
 		return ESLURM_RESERVATION_INVALID;
 	}
-	if (resv_ptr->node_cnt == 0) {
+	if ((resv_ptr->node_cnt == 0) &&
+	    !(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY)) {
 		/* empty reservation treated like it will start later */
 		return ESLURM_INVALID_TIME_VALUE;
 	}
@@ -2681,14 +2694,17 @@ extern void job_time_adj_resv(struct job_record *job_ptr)
 	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
 		if (resv_ptr->end_time <= now)
 			_advance_resv_time(resv_ptr);
-		if ((job_ptr->resv_ptr == resv_ptr) ||
-		    (resv_ptr->start_time <= now))
+		if (job_ptr->resv_ptr == resv_ptr)
 			continue;	/* authorized user of reservation */
+		if (resv_ptr->start_time <= now)
+			continue;	/* already validated */
 		if (resv_ptr->start_time >= job_ptr->end_time)
 			continue;	/* reservation starts after job ends */
-		if ((resv_ptr->node_bitmap == NULL) ||
-		    (bit_overlap(resv_ptr->node_bitmap,
-				 job_ptr->node_bitmap) == 0))
+		if (!license_list_overlap(job_ptr->license_list,
+					  resv_ptr->license_list) &&
+		    ((resv_ptr->node_bitmap == NULL) ||
+		     (bit_overlap(resv_ptr->node_bitmap,
+				  job_ptr->node_bitmap) == 0)))
 			continue;	/* disjoint resources */
 		resv_begin_time = difftime(resv_ptr->start_time, now) / 60;
 		job_ptr->time_limit = MIN(job_ptr->time_limit,resv_begin_time);
@@ -2710,7 +2726,7 @@ static int _license_cnt(List license_list, char *lic_name)
 		return lic_cnt;
 
 	iter = list_iterator_create(license_list);
-	if (iter == NULL)
+	if (!iter)
 		fatal("list_interator_create malloc failure");
 	while ((license_ptr = list_next(iter))) {
 		if (strcmp(license_ptr->name, lic_name) == 0)
@@ -2819,7 +2835,6 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 	*node_bitmap = (bitstr_t *) NULL;
 
 	if (job_ptr->resv_name) {
-		bool overlap_resv = false;
 		resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list,
 				_find_resv_name, job_ptr->resv_name);
 		job_ptr->resv_ptr = resv_ptr;
@@ -2834,7 +2849,8 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			*when = resv_ptr->start_time;
 			return ESLURM_INVALID_TIME_VALUE;
 		}
-		if (resv_ptr->node_cnt == 0) {
+		if ((resv_ptr->node_cnt == 0) &&
+		    (!(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY))) {
 			/* empty reservation treated like it will start later */
 			*when = now + 600;
 			return ESLURM_INVALID_TIME_VALUE;
@@ -2846,11 +2862,18 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			return ESLURM_RESERVATION_INVALID;
 		}
 		if (job_ptr->details->req_node_bitmap &&
+		    (!(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY)) &&
 		    !bit_super_set(job_ptr->details->req_node_bitmap,
 				   resv_ptr->node_bitmap)) {
 			return ESLURM_RESERVATION_INVALID;
 		}
-		*node_bitmap = bit_copy(resv_ptr->node_bitmap);
+		if (resv_ptr->flags & RESERVE_FLAG_LIC_ONLY) {
+			*node_bitmap = bit_alloc(node_record_count);
+			if (*node_bitmap == NULL)
+				fatal("bit_alloc: malloc failure");
+			bit_nset(*node_bitmap, 0, (node_record_count - 1));
+		} else
+			*node_bitmap = bit_copy(resv_ptr->node_bitmap);
 
 		/* if there are any overlapping reservations, we need to
 		 * prevent the job from using those nodes (e.g. MAINT nodes) */
@@ -2868,7 +2891,6 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			bit_not(res2_ptr->node_bitmap);
 			bit_and(*node_bitmap, res2_ptr->node_bitmap);
 			bit_not(res2_ptr->node_bitmap);
-			overlap_resv = true;
 		}
 		list_iterator_destroy(iter);
 
@@ -2883,6 +2905,8 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 
 	job_ptr->resv_ptr = NULL;	/* should be redundant */
 	*node_bitmap = bit_alloc(node_record_count);
+	if (*node_bitmap == NULL)
+		fatal("bit_alloc: malloc failure");
 	bit_nset(*node_bitmap, 0, (node_record_count - 1));
 	if (list_count(resv_list) == 0)
 		return SLURM_SUCCESS;
@@ -2909,6 +2933,9 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 				rc = ESLURM_NODES_BUSY;
 				break;
 			}
+			/* FIXME: This only tracks when ANY licenses required
+			 * by the job are freed by any reservation without
+			 * counting them, so the results are not accurate. */
 			if (license_list_overlap(job_ptr->license_list,
 						 resv_ptr->license_list)) {
 				if ((lic_resv_time == (time_t) 0) ||
@@ -2924,7 +2951,9 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 		if ((rc == SLURM_SUCCESS) && move_time) {
 			if (license_job_test(job_ptr, job_start_time)
 			    == EAGAIN) {
-				/* Need to postpone for licenses */
+				/* Need to postpone for licenses. Time returned
+				 * is best case; first reservation with those
+				 * licenses ends. */
 				rc = ESLURM_NODES_BUSY;
 				*when = lic_resv_time;
 			}
@@ -3084,6 +3113,8 @@ extern int send_resvs_to_accounting(void)
 		return SLURM_SUCCESS;
 
 	itr = list_iterator_create(resv_list);
+	if (!itr)
+		fatal("malloc: list_iterator_create");
 	while ((resv_ptr = list_next(itr))) {
 		_post_resv_create(resv_ptr);
 	}
diff --git a/src/slurmctld/reservation.h b/src/slurmctld/reservation.h
index afb9e65e7..27128d0cc 100644
--- a/src/slurmctld/reservation.h
+++ b/src/slurmctld/reservation.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,7 +41,8 @@
 
 #include <time.h>
 #include <unistd.h>
-#include <slurm/slurm.h>
+
+#include "slurm/slurm.h"
 #include "src/common/bitstring.h"
 #include "src/slurmctld/slurmctld.h"
 
diff --git a/src/slurmctld/sched_plugin.c b/src/slurmctld/sched_plugin.c
index db4ab44fe..55d8908a9 100644
--- a/src/slurmctld/sched_plugin.c
+++ b/src/slurmctld/sched_plugin.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmctld/sched_plugin.h b/src/slurmctld/sched_plugin.h
index 03365cb39..1d3e76f51 100644
--- a/src/slurmctld/sched_plugin.h
+++ b/src/slurmctld/sched_plugin.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,8 +39,8 @@
 #ifndef __SLURM_CONTROLLER_SCHED_PLUGIN_API_H__
 #define __SLURM_CONTROLLER_SCHED_PLUGIN_API_H__
 
-#include <slurm/slurm.h>
-#include <src/slurmctld/slurmctld.h>
+#include "slurm/slurm.h"
+#include "src/slurmctld/slurmctld.h"
 
 /*
  * Initialize the external scheduler adapter.
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index 0cf870c2f..a0e633bfc 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -64,7 +64,7 @@
 #  include <pthread.h>
 #endif				/* WITH_PTHREADS */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/bitstring.h"
 #include "src/common/checkpoint.h"
@@ -206,6 +206,39 @@ extern bitstr_t *power_node_bitmap;	/* Powered down nodes */
 extern bitstr_t *share_node_bitmap;	/* bitmap of sharable nodes */
 extern bitstr_t *up_node_bitmap;	/* bitmap of up nodes, not DOWN */
 
+/*****************************************************************************\
+ *  FRONT_END parameters and data structures
+\*****************************************************************************/
+#define FRONT_END_MAGIC 0xfe9b82fe
+
+typedef struct front_end_record {
+	time_t boot_time;		/* Time of node boot,
+					 * computed from up_time */
+	char *comm_name;		/* communications path name to node */
+	uint32_t job_cnt_comp;		/* count of completing jobs on node */
+	uint16_t job_cnt_run;		/* count of running jobs on node */
+	time_t last_response;		/* Time of last communication */
+	uint32_t magic;			/* magic cookie to test data integrity */
+	char *name;			/* frontend node name */
+	uint16_t node_state;		/* enum node_states, ORed with
+					 * NODE_STATE_NO_RESPOND if not
+					 * responding */
+	bool not_responding;		/* set if fails to respond,
+					 * clear after logging this */
+	slurm_addr_t slurm_addr;	/* network address */
+	uint16_t port;			/* frontend specific port */
+	char *reason;			/* reason for down frontend node */
+	time_t reason_time;		/* Time stamp when reason was set,
+					 * ignore if no reason is set. */
+	uint32_t reason_uid;   		/* User that set the reason, ignore if
+					 * no reason is set. */
+	time_t slurmd_start_time;	/* Time of slurmd startup */
+} front_end_record_t;
+
+extern front_end_record_t *front_end_nodes;
+extern uint16_t front_end_node_cnt;
+extern time_t last_front_end_update;	/* time of last front_end update */
+
 /*****************************************************************************\
  *  PARTITION parameters and data structures
 \*****************************************************************************/
@@ -219,9 +252,12 @@ struct part_record {
 				 * NULL indicates all */
 	uid_t *allow_uids;	/* zero terminated list of allowed users */
 	char *alternate; 	/* name of alternate partition */
+	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
 	uint16_t flags;		/* see PART_FLAG_* in slurm.h */
+	uint32_t grace_time;	/* default preempt grace time in seconds */
 	uint32_t magic;		/* magic cookie to test data integrity */
+	uint32_t max_mem_per_cpu; /* maximum MB memory per allocated CPU */
 	uint32_t max_nodes;	/* per job or INFINITE */
 	uint32_t max_nodes_orig;/* unscaled value (c-nodes on BlueGene) */
 	uint32_t max_offset;	/* select plugin max offset */
@@ -324,11 +360,13 @@ struct job_details {
 					 * each task */
 	List depend_list;		/* list of job_ptr:state pairs */
 	char *dependency;		/* wait for other jobs */
+	char *orig_dependency;		/* original value (for archiving) */
 	uint16_t env_cnt;		/* size of env_sup (see below) */
 	char **env_sup;			/* supplemental environment variables
 					 * as set by Moab */
 	bitstr_t *exc_node_bitmap;	/* bitmap of excluded nodes */
 	char *exc_nodes;		/* excluded nodes */
+	uint32_t expanding_jobid;	/* ID of job to be expanded */
 	List feature_list;		/* required features with
 					 * node counts */
 	char *features;			/* required features */
@@ -360,6 +398,8 @@ struct job_details {
 					 * for this job while it was pending */
 	bitstr_t *req_node_bitmap;	/* bitmap of required nodes */
 	uint16_t *req_node_layout;	/* task layout for required nodes */
+	time_t preempt_start_time;	/* time that preeption began to start
+					 * this job */
 	char *req_nodes;		/* required nodes */
 	uint16_t requeue;		/* controls ability requeue job */
 	char *restart_dir;	        /* restart execution from ckpt images
@@ -375,6 +415,7 @@ struct job_details {
 	uint16_t task_dist;		/* task layout for this job. Only
 					 * useful when Consumable Resources
                                          * is enabled */
+	uint32_t usable_nodes;		/* node count needed by preemption */
 	char *work_dir;			/* pathname of working directory */
 };
 
@@ -390,6 +431,7 @@ struct job_record {
 					 * value before use */
 	uint16_t batch_flag;		/* 1 or 2 if batch job (with script),
 					 * 2 indicates retry mode (one retry) */
+	char *batch_host;		/* host executing batch script */
 	check_jobinfo_t check_job;      /* checkpoint context, opaque */
 	uint16_t ckpt_interval;	        /* checkpoint interval in minutes */
 	time_t ckpt_time;	        /* last time job was periodically
@@ -418,12 +460,15 @@ struct job_record {
 					 * actual or expected */
 	uint32_t exit_code;		/* exit code for job (status from
 					 * wait call) */
+	front_end_record_t *front_end_ptr; /* Pointer to front-end node running
+					 * this job */
 	char *gres;			/* generic resources */
 	List gres_list;			/* generic resource allocation detail */
 	uint32_t group_id;		/* group submitted under */
 	uint32_t job_id;		/* job ID */
 	struct job_record *job_next;	/* next entry with same hash index */
-	enum job_states job_state;	/* state of the job */
+	job_resources_t *job_resrcs;	/* details of allocated cores */
+	uint16_t job_state;	        /* state of the job */
 	uint16_t kill_on_node_fail;	/* 1 if job should be killed on
 					 * node failure */
 	char *licenses;			/* licenses required by the job */
@@ -461,6 +506,7 @@ struct job_record {
 					 * partition */
 	struct part_record *part_ptr;	/* pointer to the partition record */
 	time_t pre_sus_time;		/* time job ran prior to last suspend */
+	time_t preempt_time;		/* job preemption signal time */
 	uint32_t priority;		/* relative priority of the job,
 					 * zero == held (don't initiate) */
 	priority_factors_object_t *prio_factors; /* cached value used
@@ -481,7 +527,6 @@ struct job_record {
 	uint32_t requid;            	/* requester user ID */
 	char *resp_host;		/* host for srun communications */
 	dynamic_plugin_data_t *select_jobinfo;/* opaque data, BlueGene */
-	job_resources_t *job_resrcs;	/* details of allocated cores */
 	char **spank_job_env;		/* environment variables for job prolog
 					 * and epilog scripts as set by SPANK
 					 * plugins */
@@ -511,6 +556,12 @@ struct job_record {
 	uint16_t warn_time;		/* when to send signal before
 					 * end_time (secs) */
 	char *wckey;		        /* optional wckey */
+
+	/* Request number of switches support */
+	uint32_t req_switch;  /* Minimum number of switches                */
+	uint32_t wait4switch; /* Maximum time to wait for minimum switches */
+	bool     best_switch; /* true=min number of switches met           */
+	time_t wait4switch_start; /* Time started waiting for switch       */
 };
 
 /* Job dependency specification, used in "depend_list" within job_record */
@@ -521,6 +572,7 @@ struct job_record {
 						 * successfully */
 #define SLURM_DEPEND_SINGLETON		5	/* Only one job for this
 						 * user/name at a time */
+#define SLURM_DEPEND_EXPAND		6	/* Expand running job */
 struct	depend_spec {
 	uint16_t	depend_type;	/* SLURM_DEPEND_* type */
 	uint32_t	job_id;		/* SLURM job_id */
@@ -561,6 +613,7 @@ struct 	step_record {
 	uint32_t requid;            	/* requester user ID */
 	time_t start_time;      	/* step allocation start time */
 	uint32_t time_limit;      	/* step allocation time limit */
+	dynamic_plugin_data_t *select_jobinfo;/* opaque data, BlueGene */
 	uint32_t step_id;		/* step number */
 	slurm_step_layout_t *step_layout;/* info about how tasks are laid out
 					  * in the step */
@@ -588,14 +641,14 @@ extern List job_list;			/* list of job_record entries */
 */
 enum select_plugindata_info {
 	SELECT_CR_PLUGIN,    /* data-> uint32 1 if CR plugin */
-	SELECT_BITMAP,       /* data-> partially_idle_bitmap (CR support) */
+	SELECT_BITMAP,       /* Unused since version 2.0 */
 	SELECT_ALLOC_CPUS,   /* data-> uint16 alloc cpus (CR support) */
 	SELECT_ALLOC_LPS,    /* data-> uint32 alloc lps  (CR support) */
 	SELECT_AVAIL_MEMORY, /* data-> uint32 avail mem  (CR support) */
-	SELECT_STATIC_PART,   /* data-> uint16, 1 if static partitioning
-			      * BlueGene support */
-	SELECT_CONFIG_INFO  /* data-> List get .conf info from select
-			     * plugin */
+	SELECT_STATIC_PART,  /* data-> uint16, 1 if static partitioning
+	                      * BlueGene support */
+	SELECT_CONFIG_INFO   /* data-> List get .conf info from select
+			      * plugin */
 } ;
 
 /*****************************************************************************\
@@ -612,10 +665,10 @@ enum select_plugindata_info {
  *	agent request per node as they register.
  * IN job_id - id of the job to be killed
  * IN job_ptr - pointer to terminating job (NULL if unknown, e.g. orphaned)
- * IN node_ptr - pointer to the node on which the job resides
+ * IN node_name - name of the node on which the job resides
  */
 extern void abort_job_on_node(uint32_t job_id, struct job_record *job_ptr,
-			      struct node_record *node_ptr);
+			      char *node_name);
 
 /* Build a bitmap of nodes completing this job */
 extern void build_cg_bitmap(struct job_record *job_ptr);
@@ -646,13 +699,11 @@ extern struct job_record * create_job_record (int *error_code);
 extern struct part_record *create_part_record (void);
 
 /*
- * delete_step_records - delete step record for specified job_ptr
- * IN job_ptr - pointer to job table entry to have step records removed
- * IN filter  - determine which job steps to delete
- *              0: delete all job steps
- *              1: delete only job steps without a switch allocation
+ * job_limits_check - check the limits specified for the job.
+ * IN job_ptr - pointer to job table entry.
+ * RET WAIT_NO_REASON on success, fail status otherwise.
  */
-extern void delete_step_records (struct job_record *job_ptr, int filter);
+extern int job_limits_check(struct job_record **job_pptr);
 
 /*
  * delete_job_details - delete a job's detail record and clear it's pointer
@@ -679,6 +730,12 @@ extern int delete_partition(delete_part_msg_t *part_desc_ptr);
  */
 extern int delete_step_record (struct job_record *job_ptr, uint32_t step_id);
 
+/*
+ * delete_step_records - delete step record for specified job_ptr
+ * IN job_ptr - pointer to job table entry to have step records removed
+ */
+extern void delete_step_records (struct job_record *job_ptr);
+
 /*
  * drain_nodes - drain one or more nodes,
  *  no-op for nodes already drained or draining
@@ -835,6 +892,21 @@ extern bool is_node_down (char *name);
  */
 extern bool is_node_resp (char *name);
 
+/*
+ * allocated_session_in_use - check if an interactive session is already running
+ * IN new_alloc - allocation (alloc_node:alloc_sid) to test for
+ * Returns true if an interactive session of the same node:sid already exists.
+ */
+extern bool allocated_session_in_use(job_desc_msg_t *new_alloc);
+
+/*
+ * job_alloc_info - get details about an existing job allocation
+ * IN uid - job issuing the code
+ * IN job_id - ID of job for which info is requested
+ * OUT job_pptr - set to pointer to job record
+ */
+extern int job_alloc_info(uint32_t uid, uint32_t job_id,
+			  struct job_record **job_pptr);
 /*
  * job_allocate - create job_records for the supplied job specification and
  *	allocate nodes for it.
@@ -950,12 +1022,11 @@ extern int job_restart(checkpoint_msg_t *ckpt_ptr, uid_t uid,
  * IN signal - signal to send, SIGKILL == cancel the job
  * IN batch_flag - signal batch shell only if set
  * IN uid - uid of requesting user
+ * IN preempt - true if job being preempted
  * RET 0 on success, otherwise ESLURM error code
- * global: job_list - pointer global job list
- *	last_job_update - time of last job table update
  */
 extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
-		      uid_t uid);
+		      uid_t uid, bool preempt);
 
 /*
  * job_step_cancel - cancel the specified job step
@@ -1006,15 +1077,14 @@ extern int job_step_checkpoint_task_comp(checkpoint_task_comp_msg_t *ckpt_ptr,
  * IN uid - user id of the user issuing the RPC
  * IN conn_fd - file descriptor on which to send reply,
  *              -1 if none
- * IN clear_prio - if set, then clear the job's priority after
- *		   suspending it, this is used to distinguish
- *		   jobs explicitly suspended by admins/users from
- *		   jobs suspended though automatic preemption
+ * indf_susp IN - set if job is being suspended indefinitely by user or admin
+ *                and we should clear it's priority, otherwise suspended
+ *		  temporarily for gang scheduling
  * IN protocol_version - slurm protocol version of client
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid,
-		       slurm_fd_t conn_fd, bool clear_prio,
+		       slurm_fd_t conn_fd, bool indf_susp,
 		       uint16_t protocol_version);
 
 /*
@@ -1032,7 +1102,7 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 			bool node_fail, uint32_t job_return_code);
 
 /*
- * job_independent - determine if this job has a depenentent job pending
+ * job_independent - determine if this job has a dependent job pending
  *	or if the job's scheduled begin time is in the future
  * IN job_ptr - pointer to job being tested
  * IN will_run - is this a test for will_run or not
@@ -1055,12 +1125,13 @@ extern int job_req_node_filter(struct job_record *job_ptr,
  * job_requeue - Requeue a running or pending batch job
  * IN uid - user id of user issuing the RPC
  * IN job_id - id of the job to be requeued
- * IN conn_fd - file descriptor on which to send reply, -1 if none
+ * IN conn_fd - file descriptor on which to send reply
  * IN protocol_version - slurm protocol version of client
+ * IN preempt - true if job being preempted
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd_t conn_fd,
-			uint16_t protocol_version);
+			uint16_t protocol_version, bool preempt);
 
 /*
  * job_step_complete - note normal completion the specified job step
@@ -1131,6 +1202,14 @@ extern int kill_job_by_part_name(char *part_name);
 extern void kill_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 			     struct node_record *node_ptr);
 
+/*
+ * kill_job_by_front_end_name - Given a front end node name, deallocate
+ *	resource for its jobs and kill them.
+ * IN node_name - name of a front end node
+ * RET number of jobs associated with this front end node
+ */
+extern int kill_job_by_front_end_name(char *node_name);
+
 /*
  * kill_running_job_by_node_name - Given a node name, deallocate RUNNING
  *	or COMPLETING jobs from the node or kill them
@@ -1254,15 +1333,6 @@ extern void node_not_resp (char *name, time_t msg_time);
  * and log that the node is not responding using a hostlist expression */
 extern void node_no_resp_msg(void);
 
-/*
- * job_alloc_info - get details about an existing job allocation
- * IN uid - job issuing the code
- * IN job_id - ID of job for which info is requested
- * OUT job_pptr - set to pointer to job record
- */
-extern int job_alloc_info(uint32_t uid, uint32_t job_id,
-			  struct job_record **job_pptr);
-
 /*
  * pack_all_jobs - dump all job information for all jobs in
  *	machine independent form (for network transmission)
@@ -1335,11 +1405,12 @@ extern void pack_all_part(char **buffer_ptr, int *buffer_size,
  * IN show_flags - job filtering options
  * IN/OUT buffer - buffer in which data is placed, pointers automatically
  *	updated
+ * IN uid - user requesting the data
  * NOTE: change _unpack_job_desc_msg() in common/slurm_protocol_pack.c
  *	  whenever the data format changes
  */
 extern void pack_job (struct job_record *dump_job_ptr, uint16_t show_flags,
-		      Buf buffer, uint16_t protocol_version);
+		      Buf buffer, uint16_t protocol_version, uid_t uid);
 
 /*
  * pack_part - dump all configuration information about a specific partition
@@ -1381,6 +1452,14 @@ extern void part_filter_set(uid_t uid);
 /* part_fini - free all memory associated with partition records */
 extern void part_fini (void);
 
+/*
+ * partition_in_use - determine whether a partition is in use by a RUNNING
+ *	PENDING or SUSPENDED job
+ * IN part_name - name of a partition
+ * RET true if the partition is in use, else false
+ */
+extern bool partition_in_use(char *part_name);
+
 /*
  * purge_old_job - purge old job records.
  *	The jobs must have completed at least MIN_JOB_AGE minutes ago.
@@ -1396,6 +1475,14 @@ void purge_old_job(void);
  */
 extern void rehash_jobs(void);
 
+/*
+ * Rebuild a job step's core_bitmap_job after a job has just changed size
+ * job_ptr IN - job that was just re-sized
+ * orig_job_node_bitmap IN - The job's original node bitmap
+ */
+extern void rebuild_step_bitmaps(struct job_record *job_ptr,
+				 bitstr_t *orig_job_node_bitmap);
+
 /* update first assigned job id as needed on reconfigure */
 extern void reset_first_job_id(void);
 
@@ -1455,6 +1542,14 @@ extern int send_nodes_to_accounting(time_t event_time);
  */
 extern void set_node_down (char *name, char *reason);
 
+/*
+ * set_node_down_ptr - make the specified compute node's state DOWN and
+ *	kill jobs as needed
+ * IN node_ptr - node_ptr to the node
+ * IN reason - why the node is DOWN
+ */
+void set_node_down_ptr (struct node_record *node_ptr, char *reason);
+
 /*
  * set_slurmctld_state_loc - create state directory as needed and "cd" to it
  */
@@ -1568,6 +1663,10 @@ extern void suspend_job_step(struct job_record *job_ptr);
  */
 extern int sync_job_files(void);
 
+/* After recovering job state, if using priority/basic then we increment the
+ * priorities of all jobs to avoid decrementing the base down to zero */
+extern void sync_job_priorities(void);
+
 /*
  * update_job - update a job's parameters per the supplied specifications
  * IN job_specs - a job's specification
diff --git a/src/slurmctld/srun_comm.c b/src/slurmctld/srun_comm.c
index 747423361..3108bca7f 100644
--- a/src/slurmctld/srun_comm.c
+++ b/src/slurmctld/srun_comm.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -96,6 +96,10 @@ extern void srun_allocate (uint32_t job_id)
 		msg_arg->num_cpu_groups	= job_resrcs_ptr->cpu_array_cnt;
 		msg_arg->cpus_per_node  = xmalloc(sizeof(uint16_t) *
 					  job_resrcs_ptr->cpu_array_cnt);
+		if (job_ptr->details) {
+			msg_arg->pn_min_memory = job_ptr->details->
+						 pn_min_memory;
+		}
 		memcpy(msg_arg->cpus_per_node,
 		       job_resrcs_ptr->cpu_array_value,
 		       (sizeof(uint16_t) * job_resrcs_ptr->cpu_array_cnt));
@@ -142,9 +146,11 @@ extern void srun_allocate_abort(struct job_record *job_ptr)
  */
 extern void srun_node_fail (uint32_t job_id, char *node_name)
 {
+#ifndef HAVE_FRONT_END
 	struct node_record *node_ptr;
+#endif
 	struct job_record *job_ptr = find_job_record (job_id);
-	int bit_position;
+	int bit_position = -1;
 	slurm_addr_t * addr;
 	srun_node_fail_msg_t *msg_arg;
 	ListIterator step_iterator;
@@ -155,13 +161,18 @@ extern void srun_node_fail (uint32_t job_id, char *node_name)
 	if (!job_ptr || !IS_JOB_RUNNING(job_ptr))
 		return;
 
+#ifdef HAVE_FRONT_END
+	/* Purge all jobs steps in front end mode */
+#else
 	if (!node_name || (node_ptr = find_node_record(node_name)) == NULL)
 		return;
 	bit_position = node_ptr - node_record_table_ptr;
+#endif
 
 	step_iterator = list_iterator_create(job_ptr->step_list);
 	while ((step_ptr = (struct step_record *) list_next(step_iterator))) {
-		if (!bit_test(step_ptr->step_node_bitmap, bit_position))
+		if ((bit_position >= 0) &&
+		    (!bit_test(step_ptr->step_node_bitmap, bit_position)))
 			continue;	/* job step not on this node */
 		if ( (step_ptr->port    == 0)    ||
 		     (step_ptr->host    == NULL) ||
@@ -298,21 +309,31 @@ extern int srun_user_message(struct job_record *job_ptr, char *msg)
 				   msg_arg);
 		return SLURM_SUCCESS;
 	} else if (job_ptr->batch_flag && IS_JOB_RUNNING(job_ptr)) {
+#ifndef HAVE_FRONT_END
 		struct node_record *node_ptr;
+#endif
 		job_notify_msg_t *notify_msg_ptr;
 		agent_arg_t *agent_arg_ptr;
-
+#ifdef HAVE_FRONT_END
+		if (job_ptr->batch_host == NULL)
+			return ESLURM_DISABLED;	/* no allocated nodes */
+		agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
+		agent_arg_ptr->hostlist = hostlist_create(job_ptr->batch_host);
+#else
 		node_ptr = find_first_node_record(job_ptr->node_bitmap);
 		if (node_ptr == NULL)
 			return ESLURM_DISABLED;	/* no allocated nodes */
+		agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
+		agent_arg_ptr->hostlist = hostlist_create(node_ptr->name);
+#endif
+		if (agent_arg_ptr->hostlist == NULL)
+			fatal("hostlist_create: malloc failure");
 		notify_msg_ptr = (job_notify_msg_t *) 
 				 xmalloc(sizeof(job_notify_msg_t));
 		notify_msg_ptr->job_id = job_ptr->job_id;
 		notify_msg_ptr->message = xstrdup(msg);
-		agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
 		agent_arg_ptr->node_count = 1;
 		agent_arg_ptr->retry = 0;
-		agent_arg_ptr->hostlist = hostlist_create(node_ptr->name);
 		agent_arg_ptr->msg_type = REQUEST_JOB_NOTIFY;
 		agent_arg_ptr->msg_args = (void *) notify_msg_ptr;
 		/* Launch the RPC via agent */
@@ -354,6 +375,33 @@ extern void srun_job_complete (struct job_record *job_ptr)
 	list_iterator_destroy(step_iterator);
 }
 
+/*
+ * srun_job_suspend - notify salloc of suspend/resume operation
+ * IN job_ptr - pointer to the slurmctld job record
+ * IN op - SUSPEND_JOB or RESUME_JOB (enum suspend_opts from slurm.h)
+ * RET - true if message send, otherwise false
+ */
+extern bool srun_job_suspend (struct job_record *job_ptr, uint16_t op)
+{
+	slurm_addr_t * addr;
+	suspend_msg_t *msg_arg;
+	bool msg_sent = false;
+
+	xassert(job_ptr);
+
+	if (job_ptr->other_port && job_ptr->alloc_node && job_ptr->resp_host) {
+		addr = xmalloc(sizeof(struct sockaddr_in));
+		slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host);
+		msg_arg = xmalloc(sizeof(suspend_msg_t));
+		msg_arg->job_id  = job_ptr->job_id;
+		msg_arg->op     = op;
+		_srun_agent_launch(addr, job_ptr->alloc_node,
+				   SRUN_REQUEST_SUSPEND, msg_arg);
+		msg_sent = true;
+	}
+	return msg_sent;
+}
+
 /*
  * srun_step_complete - notify srun of a job step's termination
  * IN step_ptr - pointer to the slurmctld job step record
diff --git a/src/slurmctld/srun_comm.h b/src/slurmctld/srun_comm.h
index f213dfd7e..bc5cf3ef3 100644
--- a/src/slurmctld/srun_comm.h
+++ b/src/slurmctld/srun_comm.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -65,13 +65,21 @@ extern void srun_allocate_abort(struct job_record *job_ptr);
  */
 extern void srun_exec(struct step_record *step_ptr, char **argv);
 
-
 /*
  * srun_job_complete - notify srun of a job's termination
  * IN job_ptr - pointer to the slurmctld job record
  */
 extern void srun_job_complete (struct job_record *job_ptr);
 
+
+/*
+ * srun_job_suspend - notify salloc of suspend/resume operation
+ * IN job_ptr - pointer to the slurmctld job record
+ * IN op - SUSPEND_JOB or RESUME_JOB (enum suspend_opts from slurm.h)
+ * RET - true if message send, otherwise false
+ */
+extern bool srun_job_suspend (struct job_record *job_ptr, uint16_t op);
+
 /*
  * srun_step_complete - notify srun of a job step's termination
  * IN step_ptr - pointer to the slurmctld job step record
diff --git a/src/slurmctld/state_save.c b/src/slurmctld/state_save.c
index 0e176549c..45c43fbd5 100644
--- a/src/slurmctld/state_save.c
+++ b/src/slurmctld/state_save.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -46,6 +46,7 @@
 #endif                          /* WITH_PTHREADS */
 
 #include "src/common/macros.h"
+#include "src/slurmctld/front_end.h"
 #include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/trigger_mgr.h"
@@ -55,7 +56,7 @@
 static pthread_mutex_t state_save_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  state_save_cond = PTHREAD_COND_INITIALIZER;
 static int save_jobs = 0, save_nodes = 0, save_parts = 0;
-static int save_triggers = 0, save_resv = 0;
+static int save_front_end = 0, save_triggers = 0, save_resv = 0;
 static bool run_save_thread = true;
 
 /* fsync() and close() a file,
@@ -90,6 +91,15 @@ extern int fsync_and_close(int fd, char *file_type)
 	return rc;
 }
 
+/* Queue saving of front_end state information */
+extern void schedule_front_end_save(void)
+{
+	slurm_mutex_lock(&state_save_lock);
+	save_front_end++;
+	pthread_cond_broadcast(&state_save_cond);
+	slurm_mutex_unlock(&state_save_lock);
+}
+
 /* Queue saving of job state information */
 extern void schedule_job_save(void)
 {
@@ -163,7 +173,8 @@ extern void *slurmctld_state_save(void *no_data)
 		slurm_mutex_lock(&state_save_lock);
 		while (1) {
 			save_count = save_jobs + save_nodes + save_parts +
-				     save_resv + save_triggers;
+				     save_front_end + save_resv +
+				     save_triggers;
 			now = time(NULL);
 			save_delay = difftime(now, last_save);
 			if (save_count &&
@@ -186,9 +197,20 @@ extern void *slurmctld_state_save(void *no_data)
 			}
 		}
 
-		/* save job info if necessary */
+		/* save front_end node info if necessary */
 		run_save = false;
 		/* slurm_mutex_lock(&state_save_lock); done above */
+		if (save_front_end) {
+			run_save = true;
+			save_front_end = 0;
+		}
+		slurm_mutex_unlock(&state_save_lock);
+		if (run_save)
+			(void)dump_all_front_end_state();
+
+		/* save job info if necessary */
+		run_save = false;
+		slurm_mutex_lock(&state_save_lock);
 		if (save_jobs) {
 			run_save = true;
 			save_jobs = 0;
diff --git a/src/slurmctld/state_save.h b/src/slurmctld/state_save.h
index 8b70927fe..32dc75e0c 100644
--- a/src/slurmctld/state_save.h
+++ b/src/slurmctld/state_save.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,9 @@
  * RET 0 on success or -1 on error */
 extern int fsync_and_close(int fd, char *file_type);
 
+/* Queue saving of front_end state information */
+extern void schedule_front_end_save(void);
+
 /* Queue saving of job state information */
 extern void schedule_job_save(void);
 
@@ -65,8 +68,9 @@ extern void shutdown_state_save(void);
 
 /*
  * Run as pthread to keep saving slurmctld state information as needed,
- * Use schedule_job_save(),  schedule_node_save(), schedule_part_save(),
- * schedule_trigger_save() to queue state save of each data structure
+ * Use schedule_job_save(), schedule_node_save(), schedule_part_save(),
+ * schedule_front_end_save(), and schedule_trigger_save() to queue state save
+ * of each data structures
  * no_data IN - unused
  * RET - NULL
  */
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index c5441c680..b9b389eb2 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,7 +52,7 @@
 #include <strings.h>
 #include <unistd.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
@@ -76,22 +76,77 @@
 
 static int  _count_cpus(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t *usable_cpu_cnt);
-static struct step_record * _create_step_record (struct job_record *job_ptr);
+static struct step_record * _create_step_record(struct job_record *job_ptr);
 static void _dump_step_layout(struct step_record *step_ptr);
 static void _free_step_rec(struct step_record *step_ptr);
 static bool _is_mem_resv(void);
+static int  _opt_cpu_cnt(uint32_t step_min_cpus, bitstr_t *node_bitmap,
+			 uint32_t *usable_cpu_cnt);
+static int  _opt_node_cnt(uint32_t step_min_nodes, uint32_t step_max_nodes,
+			  int nodes_avail, int nodes_picked_cnt);
 static void _pack_ctld_job_step_info(struct step_record *step, Buf buffer,
 				     uint16_t protocol_version);
-static bitstr_t * _pick_step_nodes (struct job_record  *job_ptr,
-				    job_step_create_request_msg_t *step_spec,
-				    List step_gres_list, int cpus_per_task,
-				    bool batch_step, int *return_code);
+static bitstr_t * _pick_step_nodes(struct job_record *job_ptr,
+				   job_step_create_request_msg_t *step_spec,
+				   List step_gres_list, int cpus_per_task,
+				   uint32_t node_count,
+				   dynamic_plugin_data_t *select_jobinfo,
+				   int *return_code);
+static bitstr_t *_pick_step_nodes_cpus(struct job_record *job_ptr,
+				       bitstr_t *nodes_bitmap, int node_cnt,
+				       int cpu_cnt, uint32_t *usable_cpu_cnt);
 static hostlist_t _step_range_to_hostlist(struct step_record *step_ptr,
 				uint32_t range_first, uint32_t range_last);
 static int _step_hostname_to_inx(struct step_record *step_ptr,
 				char *node_name);
 static void _step_dealloc_lps(struct step_record *step_ptr);
 
+/* Determine how many more CPUs are required for a job step */
+static int  _opt_cpu_cnt(uint32_t step_min_cpus, bitstr_t *node_bitmap,
+			 uint32_t *usable_cpu_cnt)
+{
+	int rem_cpus = step_min_cpus;
+	int first_bit, last_bit, i;
+
+	if (!node_bitmap)
+		return rem_cpus;
+	xassert(usable_cpu_cnt);
+	first_bit = bit_ffs(node_bitmap);
+	if (first_bit >= 0)
+		last_bit = bit_fls(node_bitmap);
+	else
+		last_bit = first_bit - 1;
+	for (i = first_bit; i <= last_bit; i++) {
+		if (!bit_test(node_bitmap, i))
+			continue;
+		if (usable_cpu_cnt[i] >= rem_cpus)
+			return 0;
+		rem_cpus -= usable_cpu_cnt[i];
+	}
+
+	return rem_cpus;
+}
+
+/* Select the optimal node count for a job step based upon it's min and 
+ * max target, available resources, and nodes already picked */
+static int _opt_node_cnt(uint32_t step_min_nodes, uint32_t step_max_nodes,
+			 int nodes_avail, int nodes_picked_cnt)
+{
+	int target_node_cnt;
+
+	if ((step_max_nodes > step_min_nodes) && (step_max_nodes != NO_VAL))
+		target_node_cnt = step_max_nodes;
+	else
+		target_node_cnt = step_min_nodes;
+	if (target_node_cnt > nodes_picked_cnt)
+		target_node_cnt -= nodes_picked_cnt;
+	else
+		target_node_cnt = 0;
+	if (nodes_avail < target_node_cnt)
+		target_node_cnt = nodes_avail;
+
+	return target_node_cnt;
+}
 
 /*
  * _create_step_record - create an empty step_record for the specified job.
@@ -112,15 +167,14 @@ static struct step_record * _create_step_record(struct job_record *job_ptr)
 		return NULL;
 	}
 
-	step_ptr = (struct step_record *) xmalloc(sizeof (struct step_record));
+	step_ptr = (struct step_record *) xmalloc(sizeof(struct step_record));
 
 	last_job_update = time(NULL);
 	step_ptr->job_ptr = job_ptr;
-	step_ptr->start_time = time(NULL) ;
-	step_ptr->time_limit = INFINITE ;
+	step_ptr->start_time = time(NULL);
+	step_ptr->time_limit = INFINITE;
 	step_ptr->jobacct = jobacct_gather_g_create(NULL);
 	step_ptr->requid = -1;
-
 	if (list_append (job_ptr->step_list, step_ptr) == NULL)
 		fatal ("_create_step_record: unable to allocate memory");
 
@@ -131,12 +185,8 @@ static struct step_record * _create_step_record(struct job_record *job_ptr)
 /*
  * delete_step_records - delete step record for specified job_ptr
  * IN job_ptr - pointer to job table entry to have step records removed
- * IN filter  - determine which job steps to delete
- *              0: delete all job steps
- *              1: delete only job steps without a switch allocation
  */
-extern void
-delete_step_records (struct job_record *job_ptr, int filter)
+extern void delete_step_records (struct job_record *job_ptr)
 {
 	ListIterator step_iterator;
 	struct step_record *step_ptr;
@@ -146,9 +196,6 @@ delete_step_records (struct job_record *job_ptr, int filter)
 
 	last_job_update = time(NULL);
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
-		if ((filter == 1) && (step_ptr->switch_job))
-			continue;
-
 		list_remove (step_iterator);
 		_free_step_rec(step_ptr);
 	}
@@ -185,6 +232,7 @@ static void _free_step_rec(struct step_record *step_ptr)
 	xfree(step_ptr->gres);
 	if (step_ptr->gres_list)
 		list_destroy(step_ptr->gres_list);
+	select_g_select_jobinfo_free(step_ptr->select_jobinfo);
 	xfree(step_ptr);
 }
 
@@ -204,6 +252,9 @@ delete_step_record (struct job_record *job_ptr, uint32_t step_id)
 
 	xassert(job_ptr);
 	error_code = ENOENT;
+	if (!job_ptr->step_list)
+		return error_code;
+
 	step_iterator = list_iterator_create (job_ptr->step_list);
 	last_job_update = time(NULL);
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
@@ -337,7 +388,9 @@ int job_step_signal(uint32_t job_id, uint32_t step_id,
 void signal_step_tasks(struct step_record *step_ptr, uint16_t signal,
 		       slurm_msg_type_t msg_type)
 {
+#ifndef HAVE_FRONT_END
 	int i;
+#endif
 	kill_tasks_msg_t *kill_tasks_msg;
 	agent_arg_t *agent_args = NULL;
 
@@ -353,16 +406,21 @@ void signal_step_tasks(struct step_record *step_ptr, uint16_t signal,
 	kill_tasks_msg->job_step_id = step_ptr->step_id;
 	kill_tasks_msg->signal      = signal;
 
+#ifdef HAVE_FRONT_END
+	xassert(step_ptr->job_ptr->batch_host);
+	hostlist_push(agent_args->hostlist, step_ptr->job_ptr->batch_host);
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
+	agent_args->node_count = 1;
+#else
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(step_ptr->step_node_bitmap, i) == 0)
 			continue;
 		hostlist_push(agent_args->hostlist,
 			      node_record_table_ptr[i].name);
 		agent_args->node_count++;
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		break;
-#endif
 	}
+#endif
 
 	if (agent_args->node_count == 0) {
 		xfree(kill_tasks_msg);
@@ -394,8 +452,14 @@ void signal_step_tasks_on_node(char* node_name, struct step_record *step_ptr,
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	agent_args->msg_type = msg_type;
 	agent_args->retry    = 1;
+#ifdef HAVE_FRONT_END
+	xassert(step_ptr->job_ptr->batch_host);
+	agent_args->node_count++;
+	agent_args->hostlist = hostlist_create(step_ptr->job_ptr->batch_host);
+#else
 	agent_args->node_count++;
 	agent_args->hostlist = hostlist_create(node_name);
+#endif
 	if (agent_args->hostlist == NULL)
 		fatal("hostlist_create: malloc failure");
 	kill_tasks_msg = xmalloc(sizeof(kill_tasks_msg_t));
@@ -441,6 +505,8 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 	if (step_ptr == NULL)
 		return ESLURM_INVALID_JOB_ID;
 
+	select_g_step_finish(step_ptr);
+
 	jobacct_storage_g_step_complete(acct_db_conn, step_ptr);
 	job_ptr->derived_ec = MAX(job_ptr->derived_ec, step_ptr->exit_code);
 
@@ -458,6 +524,109 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 	return SLURM_SUCCESS;
 }
 
+/* Pick nodes to be allocated to a job step. If a CPU count is also specified,
+ * then select nodes with a sufficient CPU count. */
+static bitstr_t *_pick_step_nodes_cpus(struct job_record *job_ptr,
+				       bitstr_t *nodes_bitmap, int node_cnt,
+				       int cpu_cnt, uint32_t *usable_cpu_cnt)
+{
+	bitstr_t *picked_node_bitmap = NULL;
+	int *usable_cpu_array;
+	int first_bit, last_bit;
+	int cpu_target;	/* Target number of CPUs per allocated node */
+	int rem_nodes, rem_cpus, save_rem_nodes, save_rem_cpus;
+	int i;
+
+	xassert(node_cnt > 0);
+	xassert(nodes_bitmap);
+	xassert(usable_cpu_cnt);
+	cpu_target = (cpu_cnt + node_cnt - 1) / node_cnt;
+	if (cpu_target > 1024)
+		info("_pick_step_nodes_cpus: high cpu_target (%d)",cpu_target);
+	if ((cpu_cnt <= node_cnt) || (cpu_target > 1024))
+		return bit_pick_cnt(nodes_bitmap, node_cnt);
+
+	/* Need to satisfy both a node count and a cpu count */
+	picked_node_bitmap = bit_alloc(node_record_count);
+	usable_cpu_array = xmalloc(sizeof(int) * cpu_target);
+	rem_nodes = node_cnt;
+	rem_cpus  = cpu_cnt;
+	first_bit = bit_ffs(nodes_bitmap);
+	if (first_bit >= 0)
+		last_bit  = bit_fls(nodes_bitmap);
+	else
+		last_bit = first_bit - 1;
+	for (i = first_bit; i <= last_bit; i++) {
+		if (!bit_test(nodes_bitmap, i))
+			continue;
+		if (usable_cpu_cnt[i] < cpu_target) {
+			usable_cpu_array[usable_cpu_cnt[i]]++;
+			continue;
+		}
+		bit_set(picked_node_bitmap, i);
+		rem_cpus -= usable_cpu_cnt[i];
+		rem_nodes--;
+		if ((rem_cpus <= 0) && (rem_nodes <= 0)) {
+			/* Satisfied request */
+			xfree(usable_cpu_array);
+			return picked_node_bitmap;
+		}
+		if (rem_nodes == 0) {	/* Reached node limit, not CPU limit */
+			xfree(usable_cpu_array);
+			bit_free(picked_node_bitmap);
+			return NULL;
+		}
+	}
+
+	/* Need more resources. Determine what CPU counts per node to use */
+	save_rem_nodes = rem_nodes;
+	save_rem_cpus  = rem_cpus;
+	usable_cpu_array[0] = 0;
+	for (i = (cpu_target - 1); i > 0; i--) {
+		if (usable_cpu_array[i] == 0)
+			continue;
+		if (usable_cpu_array[i] > rem_nodes)
+			usable_cpu_array[i] = rem_nodes;
+		if (rem_nodes > 0) {
+			rem_nodes -= usable_cpu_array[i];
+			rem_cpus  -= (usable_cpu_array[i] * i);
+		}
+	}
+	if ((rem_cpus > 0) || (rem_nodes > 0)){	/* Can not satisfy request */
+		xfree(usable_cpu_array);
+		bit_free(picked_node_bitmap);
+		return NULL;
+	}
+	rem_nodes = save_rem_nodes;
+	rem_cpus  = save_rem_cpus;
+
+	/* Pick nodes with CPU counts below original target */
+	for (i = first_bit; i <= last_bit; i++) {
+		if (!bit_test(nodes_bitmap, i))
+			continue;
+		if (usable_cpu_cnt[i] >= cpu_target)
+			continue;	/* already picked */
+		if (usable_cpu_array[usable_cpu_cnt[i]] == 0)
+			continue;
+		usable_cpu_array[usable_cpu_cnt[i]]--;
+		bit_set(picked_node_bitmap, i);
+		rem_cpus -= usable_cpu_cnt[i];
+		rem_nodes--;
+		if ((rem_cpus <= 0) && (rem_nodes <= 0)) {
+			/* Satisfied request */
+			xfree(usable_cpu_array);
+			return picked_node_bitmap;
+		}
+		if (rem_nodes == 0)	/* Reached node limit */
+			break;
+	}
+
+	/* Can not satisfy request */
+	xfree(usable_cpu_array);
+	bit_free(picked_node_bitmap);
+	return NULL;
+}
+
 /*
  * _pick_step_nodes - select nodes for a job step that satisfy its requirements
  *	we satisfy the super-set of constraints.
@@ -465,7 +634,7 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
  * IN step_spec - job step specification
  * IN step_gres_list - job step's gres requirement details
  * IN cpus_per_task - NOTE could be zero
- * IN batch_step - if set then step is a batch script
+ * IN node_count - How many real nodes a select plugin should be looking for
  * OUT return_code - exit code or SLURM_SUCCESS
  * global: node_record_table_ptr - pointer to global node table
  * NOTE: returns all of a job's nodes if step_spec->node_count == INFINITE
@@ -474,9 +643,10 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 static bitstr_t *
 _pick_step_nodes (struct job_record  *job_ptr,
 		  job_step_create_request_msg_t *step_spec,
-		  List step_gres_list, int cpus_per_task,
-		  bool batch_step, int *return_code)
+		  List step_gres_list, int cpus_per_task, uint32_t node_count,
+		  dynamic_plugin_data_t *select_jobinfo, int *return_code)
 {
+	int node_inx, first_bit, last_bit;
 	struct node_record *node_ptr;
 	bitstr_t *nodes_avail = NULL, *nodes_idle = NULL;
 	bitstr_t *nodes_picked = NULL, *node_tmp = NULL;
@@ -504,6 +674,16 @@ _pick_step_nodes (struct job_record  *job_ptr,
 		return NULL;
 	}
 
+	/* If we have a select plugin that figures this out for us
+	 * just return.  Else just do the normal operations.
+	 */
+	if ((nodes_picked = select_g_step_pick_nodes(
+		     job_ptr, select_jobinfo, node_count)))
+		return nodes_picked;
+#ifdef HAVE_BGQ
+	*return_code = ESLURM_NODES_BUSY;
+	return NULL;
+#endif
 	nodes_avail = bit_copy (job_ptr->node_bitmap);
 	if (nodes_avail == NULL)
 		fatal("bit_copy malloc failure");
@@ -692,14 +872,14 @@ _pick_step_nodes (struct job_record  *job_ptr,
 	}
 
 	if ((step_spec->mem_per_cpu && _is_mem_resv()) || step_spec->gres) {
-		int node_inx = -1, first_bit, last_bit;
 		int fail_mode = ESLURM_INVALID_TASK_MEMORY;
 		uint32_t tmp_mem, tmp_cpus, avail_cpus, total_cpus;
 		uint32_t avail_tasks, total_tasks;
+
 		usable_cpu_cnt = xmalloc(sizeof(uint32_t) * node_record_count);
 		first_bit = bit_ffs(job_resrcs_ptr->node_bitmap);
 		last_bit  = bit_fls(job_resrcs_ptr->node_bitmap);
-		for (i=first_bit; i<=last_bit; i++) {
+		for (i=first_bit, node_inx=-1; i<=last_bit; i++) {
 			if (!bit_test(job_resrcs_ptr->node_bitmap, i))
 				continue;
 			node_inx++;
@@ -903,11 +1083,16 @@ _pick_step_nodes (struct job_record  *job_ptr,
 	}
 
 	if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
-		char *temp1, *temp2;
+		char *temp1, *temp2, *temp3;
 		temp1 = bitmap2node_name(nodes_avail);
 		temp2 = bitmap2node_name(nodes_idle);
-		info("step pick %u-%u nodes, avail:%s idle:%s",
-		     step_spec->min_nodes, step_spec->max_nodes, temp1, temp2);
+		if (step_spec->node_list)
+			temp3 = step_spec->node_list;
+		else
+			temp3 = "NONE";
+		info("step pick %u-%u nodes, avail:%s idle:%s picked:%s",
+		     step_spec->min_nodes, step_spec->max_nodes, temp1, temp2,
+		     temp3);
 		xfree(temp1);
 		xfree(temp2);
 	}
@@ -930,39 +1115,83 @@ _pick_step_nodes (struct job_record  *job_ptr,
 			*return_code = ESLURM_TOO_MANY_REQUESTED_CPUS;
 			goto cleanup;
 		}
-		//step_spec->cpu_count = 0;
 	}
 
 	if (step_spec->min_nodes) {
+		int cpus_needed, node_avail_cnt, nodes_needed;
+
+		if (usable_cpu_cnt == NULL) {
+			usable_cpu_cnt = xmalloc(sizeof(uint32_t) *
+						 node_record_count);
+			first_bit = bit_ffs(job_resrcs_ptr->node_bitmap);
+			last_bit  = bit_fls(job_resrcs_ptr->node_bitmap);
+			for (i=first_bit, node_inx=-1; i<=last_bit; i++) {
+				if (!bit_test(job_resrcs_ptr->node_bitmap, i))
+					continue;
+				node_inx++;
+				usable_cpu_cnt[i] = job_resrcs_ptr->
+						    cpus[node_inx];
+			}
+
+		}
 		nodes_picked_cnt = bit_set_count(nodes_picked);
 		if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
-			verbose("got %u %d", step_spec->min_nodes,
-				nodes_picked_cnt);
-		}
-		if (nodes_idle &&
-		    (bit_set_count(nodes_idle) >= step_spec->min_nodes) &&
-		    (step_spec->min_nodes > nodes_picked_cnt)) {
-			node_tmp = bit_pick_cnt(nodes_idle,
-						(step_spec->min_nodes -
-						 nodes_picked_cnt));
-			if (node_tmp == NULL)
-				goto cleanup;
-			bit_or  (nodes_picked, node_tmp);
-			bit_not (node_tmp);
-			bit_and (nodes_idle, node_tmp);
-			bit_and (nodes_avail, node_tmp);
-			FREE_NULL_BITMAP (node_tmp);
-			node_tmp = NULL;
-			nodes_picked_cnt = step_spec->min_nodes;
+			verbose("step picked %d of %u nodes",
+				nodes_picked_cnt, step_spec->min_nodes);
+		}
+		if (nodes_idle)
+			node_avail_cnt = bit_set_count(nodes_idle);
+		else
+			node_avail_cnt = 0;
+		nodes_needed = step_spec->min_nodes - nodes_picked_cnt;
+		if ((nodes_needed > 0) &&
+		    (node_avail_cnt >= nodes_needed)) {
+			cpus_needed = _opt_cpu_cnt(step_spec->cpu_count,
+						   nodes_picked,
+						   usable_cpu_cnt);
+			nodes_needed = _opt_node_cnt(step_spec->min_nodes,
+						     step_spec->max_nodes,
+						     node_avail_cnt,
+						     nodes_picked_cnt);
+			node_tmp = _pick_step_nodes_cpus(job_ptr, nodes_idle,
+							 nodes_needed,
+							 cpus_needed,
+							 usable_cpu_cnt);
+			if (node_tmp) {
+				bit_or  (nodes_picked, node_tmp);
+				bit_not (node_tmp);
+				bit_and (nodes_idle, node_tmp);
+				bit_and (nodes_avail, node_tmp);
+				FREE_NULL_BITMAP (node_tmp);
+				node_tmp = NULL;
+				nodes_picked_cnt = step_spec->min_nodes;
+				nodes_needed = 0;
+			}
 		}
-		if (step_spec->min_nodes > nodes_picked_cnt) {
-			node_tmp = bit_pick_cnt(nodes_avail,
-						(step_spec->min_nodes -
-						 nodes_picked_cnt));
+		if (nodes_avail)
+			node_avail_cnt = bit_set_count(nodes_avail);
+		else
+			node_avail_cnt = 0;
+		if ((nodes_needed > 0) &&
+		    (node_avail_cnt >= nodes_needed)) {
+			cpus_needed = _opt_cpu_cnt(step_spec->cpu_count,
+						   nodes_picked,
+						   usable_cpu_cnt);
+			nodes_needed = _opt_node_cnt(step_spec->min_nodes,
+						     step_spec->max_nodes,
+						     node_avail_cnt,
+						     nodes_picked_cnt);
+			node_tmp = _pick_step_nodes_cpus(job_ptr, nodes_avail,
+							 nodes_needed,
+							 cpus_needed,
+							 usable_cpu_cnt);
 			if (node_tmp == NULL) {
-				if (step_spec->min_nodes <=
-				    (bit_set_count(nodes_avail) +
-				     nodes_picked_cnt + mem_blocked_nodes)) {
+				int avail_node_cnt = bit_set_count(nodes_avail);
+				if ((avail_node_cnt <
+				     bit_set_count(nodes_idle)) &&
+				    (step_spec->min_nodes <=
+				     (avail_node_cnt + nodes_picked_cnt +
+				      mem_blocked_nodes))) {
 					*return_code = ESLURM_NODES_BUSY;
 				} else if (!bit_super_set(job_ptr->node_bitmap,
 							  up_node_bitmap)) {
@@ -976,10 +1205,18 @@ _pick_step_nodes (struct job_record  *job_ptr,
 			FREE_NULL_BITMAP (node_tmp);
 			node_tmp = NULL;
 			nodes_picked_cnt = step_spec->min_nodes;
+			nodes_needed = 0;
+		} else if (nodes_needed > 0) {
+			if (step_spec->min_nodes <=
+			    (nodes_picked_cnt + mem_blocked_nodes)) {
+				*return_code = ESLURM_NODES_BUSY;
+			} else if (!bit_super_set(job_ptr->node_bitmap,
+						  up_node_bitmap)) {
+				*return_code = ESLURM_NODE_NOT_AVAIL;
+			}
+			goto cleanup;
 		}
-	}
-
-	if (step_spec->cpu_count) {
+	} else if (step_spec->cpu_count) {
 		/* make sure the selected nodes have enough cpus */
 		cpus_picked_cnt = _count_cpus(job_ptr, nodes_picked,
 					      usable_cpu_cnt);
@@ -1072,7 +1309,7 @@ static int _count_cpus(struct job_record *job_ptr, bitstr_t *bitmap,
 
 	if (job_ptr->job_resrcs && job_ptr->job_resrcs->cpus &&
 	    job_ptr->job_resrcs->node_bitmap) {
-		int node_inx = 0;
+		int node_inx = -1;
 		for (i = 0, node_ptr = node_record_table_ptr;
 		     i < node_record_count; i++, node_ptr++) {
 			if (!bit_test(job_ptr->job_resrcs->node_bitmap, i))
@@ -1086,7 +1323,7 @@ static int _count_cpus(struct job_record *job_ptr, bitstr_t *bitmap,
 			if (usable_cpu_cnt)
 				sum += usable_cpu_cnt[i];
 			else
-				sum += job_ptr->job_resrcs->cpus[node_inx-1];
+				sum += job_ptr->job_resrcs->cpus[node_inx];
 		}
 	} else {
 		error("job %u lacks cpus array", job_ptr->job_id);
@@ -1117,11 +1354,12 @@ static void _pick_step_cores(struct step_record *step_ptr,
 	bool use_all_cores;
 	static int last_core_inx;
 
-	if (!step_ptr->core_bitmap_job) {
-		step_ptr->core_bitmap_job = bit_alloc(bit_size(job_resrcs_ptr->
-							       core_bitmap));
-	}
-	if (get_job_resources_cnt(job_resrcs_ptr, job_node_inx, &sockets, &cores))
+	if (!step_ptr->core_bitmap_job)
+		step_ptr->core_bitmap_job =
+			bit_alloc(bit_size(job_resrcs_ptr->core_bitmap));
+
+	if (get_job_resources_cnt(job_resrcs_ptr, job_node_inx,
+				  &sockets, &cores))
 		fatal("get_job_resources_cnt");
 
 	if (task_cnt == (cores * sockets))
@@ -1290,7 +1528,8 @@ static void _dump_step_layout(struct step_record *step_ptr)
 	int i, bit_inx, core_inx, node_inx, rep, sock_inx;
 
 	if ((step_ptr->core_bitmap_job == NULL) ||
-	    (job_resrcs_ptr == NULL) || (job_resrcs_ptr->cores_per_socket == NULL))
+	    (job_resrcs_ptr == NULL) ||
+	    (job_resrcs_ptr->cores_per_socket == NULL))
 		return;
 
 	info("====================");
@@ -1432,12 +1671,18 @@ step_create(job_step_create_request_msg_t *step_specs,
 	struct step_record *step_ptr;
 	struct job_record  *job_ptr;
 	bitstr_t *nodeset;
-	int cpus_per_task, node_count, ret_code, i;
+	int cpus_per_task, ret_code, i;
+	uint32_t node_count = 0;
 	time_t now = time(NULL);
 	char *step_node_list = NULL;
 	uint32_t orig_cpu_count;
 	List step_gres_list = (List) NULL;
-
+	dynamic_plugin_data_t *select_jobinfo = NULL;
+#if defined HAVE_BG
+	static uint16_t cpus_per_mp = (uint16_t)NO_VAL;
+#else
+	uint32_t max_tasks;
+#endif
 	*new_step_record = NULL;
 	job_ptr = find_job_record (step_specs->job_id);
 	if (job_ptr == NULL)
@@ -1494,6 +1739,52 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    _test_strlen(step_specs->node_list, "node_list", 1024*64))
 		return ESLURM_PATHNAME_TOO_LONG;
 
+	if (job_ptr->next_step_id >= slurmctld_conf.max_step_cnt)
+		return ESLURM_STEP_LIMIT;
+
+#if defined HAVE_BG
+	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
+				    SELECT_JOBDATA_NODE_CNT,
+				    &node_count);
+
+#if defined HAVE_BGQ
+	if (step_specs->min_nodes < node_count) {
+		if (step_specs->min_nodes > 512) {
+			error("step asked for more than 512 nodes but "
+			      "less than the allocation, on a "
+			      "bluegene/Q system that isn't allowed.");
+			return ESLURM_INVALID_NODE_COUNT;
+		}
+		/* We are asking for less than we have. */
+		node_count = step_specs->min_nodes;
+
+		step_specs->min_nodes = 1;
+		step_specs->max_nodes = 1;
+	} else if (node_count == step_specs->min_nodes) {
+		step_specs->min_nodes = job_ptr->details->min_nodes;
+		step_specs->max_nodes = job_ptr->details->max_nodes;
+	} else {
+		error("bad node count %u only have %u", step_specs->min_nodes,
+		      node_count);
+		return ESLURM_INVALID_NODE_COUNT;
+	}
+#else
+	/* No sub-block steps in BGL/P, always give them the full allocation */
+	step_specs->min_nodes = job_ptr->details->min_nodes;
+	step_specs->max_nodes = job_ptr->details->max_nodes;
+#endif
+
+	if (cpus_per_mp == (uint16_t)NO_VAL)
+		select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
+					&cpus_per_mp);
+	/* Below is done to get the correct cpu_count and then we need
+	   to set the cpu_count to 0 later so just pretend we are
+	   overcommitting.
+	*/
+	step_specs->cpu_count = node_count * cpus_per_mp;
+	step_specs->overcommit = 1;
+	step_specs->exclusive = 0;
+#endif
 	/* if the overcommit flag is checked, we 0 set cpu_count=0
 	 * which makes it so we don't check to see the available cpus
 	 */
@@ -1527,6 +1818,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 	if (step_specs->no_kill > 1)
 		step_specs->no_kill = 1;
 
+	if (step_specs->gres && !strcasecmp(step_specs->gres, "NONE"))
+		xfree(step_specs->gres);
+	else if (step_specs->gres == NULL)
+		step_specs->gres = xstrdup(job_ptr->gres);
 	i = gres_plugin_step_state_validate(step_specs->gres, &step_gres_list,
 					    job_ptr->gres_list, job_ptr->job_id,
 					    NO_VAL);
@@ -1537,15 +1832,32 @@ step_create(job_step_create_request_msg_t *step_specs,
 	}
 
 	job_ptr->time_last_active = now;
+
+	/* make sure this exists since we need it so we don't core on
+	 * a xassert */
+	select_jobinfo = select_g_select_jobinfo_alloc();
+
 	nodeset = _pick_step_nodes(job_ptr, step_specs, step_gres_list,
-				   cpus_per_task, batch_step, &ret_code);
+				   cpus_per_task, node_count, select_jobinfo,
+				   &ret_code);
 	if (nodeset == NULL) {
 		if (step_gres_list)
 			list_destroy(step_gres_list);
+		select_g_select_jobinfo_free(select_jobinfo);
 		return ret_code;
 	}
+#ifdef HAVE_BGQ
+	/* Things might of changed here since sometimes users ask for
+	   the wrong size in cnodes to make a block.
+	*/
+	select_g_select_jobinfo_get(select_jobinfo,
+				    SELECT_JOBDATA_NODE_CNT,
+				    &node_count);
+	step_specs->cpu_count = node_count * cpus_per_mp;
+	orig_cpu_count =  step_specs->cpu_count;
+#else
 	node_count = bit_set_count(nodeset);
-
+#endif
 	if (step_specs->num_tasks == NO_VAL) {
 		if (step_specs->cpu_count != NO_VAL)
 			step_specs->num_tasks = step_specs->cpu_count;
@@ -1553,21 +1865,24 @@ step_create(job_step_create_request_msg_t *step_specs,
 			step_specs->num_tasks = node_count;
 	}
 
-	if (step_specs->num_tasks >
-			(node_count*slurmctld_conf.max_tasks_per_node)) {
-		error("step has invalid task count: %u",
-		      step_specs->num_tasks);
+#ifndef HAVE_BG
+	max_tasks = node_count * slurmctld_conf.max_tasks_per_node;
+	if (step_specs->num_tasks > max_tasks) {
+		error("step has invalid task count: %u max is %u",
+		      step_specs->num_tasks, max_tasks);
 		if (step_gres_list)
 			list_destroy(step_gres_list);
 		FREE_NULL_BITMAP(nodeset);
+		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURM_BAD_TASK_COUNT;
 	}
-
-	step_ptr = _create_step_record (job_ptr);
+#endif
+	step_ptr = _create_step_record(job_ptr);
 	if (step_ptr == NULL) {
 		if (step_gres_list)
 			list_destroy(step_gres_list);
 		FREE_NULL_BITMAP(nodeset);
+		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURMD_TOOMANYSTEPS;
 	}
 	step_ptr->step_id = job_ptr->next_step_id++;
@@ -1631,6 +1946,9 @@ step_create(job_step_create_request_msg_t *step_specs,
 	else
 		step_ptr->network = xstrdup(job_ptr->network);
 
+	step_ptr->select_jobinfo = select_jobinfo;
+	select_jobinfo = NULL;
+
 	/* the step time_limit is recorded as submitted (INFINITE
 	 * or partition->max_time by default), but the allocation
 	 * time limits may cut it short */
@@ -1654,8 +1972,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 	if (!batch_step) {
 		step_ptr->step_layout =
 			step_layout_create(step_ptr,
-					   step_node_list,
-					   step_specs->min_nodes,
+					   step_node_list, node_count,
 					   step_specs->num_tasks,
 					   (uint16_t)cpus_per_task,
 					   step_specs->task_dist,
@@ -1708,7 +2025,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 		fatal ("step_create: checkpoint_alloc_jobinfo error");
 	*new_step_record = step_ptr;
 
-	if(!with_slurmdbd && !job_ptr->db_index)
+	if (!with_slurmdbd && !job_ptr->db_index)
 		jobacct_storage_g_job_start(acct_db_conn, job_ptr);
 
 	jobacct_storage_g_step_start(acct_db_conn, step_ptr);
@@ -1845,17 +2162,26 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 				     uint16_t protocol_version)
 {
-	int task_cnt;
+	uint32_t task_cnt, cpu_cnt;
 	char *node_list = NULL;
 	time_t begin_time, run_time;
 	bitstr_t *pack_bitstr;
-#ifdef HAVE_FRONT_END
+
+//#if defined HAVE_FRONT_END && (!defined HAVE_BGQ || !defined HAVE_BG_FILES)
+#if defined HAVE_FRONT_END && (!defined HAVE_BGQ)
 	/* On front-end systems, the steps only execute on one node.
 	 * We need to make them appear like they are running on the job's
 	 * entire allocation (which they really are). */
 	task_cnt = step_ptr->job_ptr->cpu_cnt;
 	node_list = step_ptr->job_ptr->nodes;
 	pack_bitstr = step_ptr->job_ptr->node_bitmap;
+
+	if (step_ptr->job_ptr->total_cpus)
+		cpu_cnt = step_ptr->job_ptr->total_cpus;
+	else if(step_ptr->job_ptr->details)
+		cpu_cnt = step_ptr->job_ptr->details->min_cpus;
+	else
+		cpu_cnt = step_ptr->job_ptr->cpu_cnt;
 #else
 	pack_bitstr = step_ptr->step_node_bitmap;
 	if (step_ptr->step_layout) {
@@ -1868,23 +2194,45 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 			task_cnt = step_ptr->job_ptr->cpu_cnt;
 		node_list = step_ptr->job_ptr->nodes;
 	}
+	cpu_cnt = step_ptr->cpu_count;
 #endif
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
 		pack32(step_ptr->job_ptr->job_id, buffer);
 		pack32(step_ptr->step_id, buffer);
 		pack16(step_ptr->ckpt_interval, buffer);
 		pack32(step_ptr->job_ptr->user_id, buffer);
-#ifdef HAVE_BG
-		if (step_ptr->job_ptr->total_cpus)
-			pack32(step_ptr->job_ptr->total_cpus, buffer);
-		else if(step_ptr->job_ptr->details)
-			pack32(step_ptr->job_ptr->details->min_cpus, buffer);
-		else
-			pack32(step_ptr->job_ptr->cpu_cnt, buffer);
-#else
-		pack32(step_ptr->cpu_count, buffer);
-#endif
+		pack32(cpu_cnt, buffer);
+		pack32(task_cnt, buffer);
+		pack32(step_ptr->time_limit, buffer);
+
+		pack_time(step_ptr->start_time, buffer);
+		if (IS_JOB_SUSPENDED(step_ptr->job_ptr)) {
+			run_time = step_ptr->pre_sus_time;
+		} else {
+			begin_time = MAX(step_ptr->start_time,
+					 step_ptr->job_ptr->suspend_time);
+			run_time = step_ptr->pre_sus_time +
+				difftime(time(NULL), begin_time);
+		}
+		pack_time(run_time, buffer);
+
+		packstr(step_ptr->job_ptr->partition, buffer);
+		packstr(step_ptr->resv_ports, buffer);
+		packstr(node_list, buffer);
+		packstr(step_ptr->name, buffer);
+		packstr(step_ptr->network, buffer);
+		pack_bit_fmt(pack_bitstr, buffer);
+		packstr(step_ptr->ckpt_dir, buffer);
+		packstr(step_ptr->gres, buffer);
+		select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
+					     protocol_version);
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+		pack32(step_ptr->job_ptr->job_id, buffer);
+		pack32(step_ptr->step_id, buffer);
+		pack16(step_ptr->ckpt_interval, buffer);
+		pack32(step_ptr->job_ptr->user_id, buffer);
+		pack32(cpu_cnt, buffer);
 		pack32(task_cnt, buffer);
 		pack32(step_ptr->time_limit, buffer);
 
@@ -1912,16 +2260,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		pack32(step_ptr->step_id, buffer);
 		pack16(step_ptr->ckpt_interval, buffer);
 		pack32(step_ptr->job_ptr->user_id, buffer);
-#ifdef HAVE_BG
-		if (step_ptr->job_ptr->total_cpus)
-			pack32(step_ptr->job_ptr->total_cpus, buffer);
-		else if(step_ptr->job_ptr->details)
-			pack32(step_ptr->job_ptr->details->min_cpus, buffer);
-		else
-			pack32(step_ptr->job_ptr->cpu_cnt, buffer);
-#else
-		pack32(step_ptr->cpu_count, buffer);
-#endif
+		pack32(cpu_cnt, buffer);
 		pack32(task_cnt, buffer);
 		pack32(step_ptr->time_limit, buffer);
 
@@ -2321,28 +2660,34 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 	if (!step_ptr->exit_node_bitmap) {
 		/* initialize the node bitmap for exited nodes */
 		nodes = bit_set_count(step_ptr->step_node_bitmap);
-		if (req->range_last >= nodes) {	/* range is zero origin */
-			error("step_partial_comp: StepID=%u.%u last=%u "
-			      "nodes=%d",
-			      req->job_id, req->job_step_id, req->range_last,
-			      nodes);
-			return EINVAL;
-		}
+#ifdef HAVE_BGQ
+		/* For BGQ we only have 1 real task, so if it exits,
+		   the whole step is ending as well.
+		*/
+		req->range_last = nodes - 1;
+#endif
 		step_ptr->exit_node_bitmap = bit_alloc(nodes);
 		if (step_ptr->exit_node_bitmap == NULL)
 			fatal("bit_alloc: %m");
 		step_ptr->exit_code = req->step_rc;
 	} else {
 		nodes = _bitstr_bits(step_ptr->exit_node_bitmap);
-		if (req->range_last >= nodes) {	/* range is zero origin */
-			error("step_partial_comp: StepID=%u.%u last=%u "
-			      "nodes=%d",
-			      req->job_id, req->job_step_id, req->range_last,
-			      nodes);
-			return EINVAL;
-		}
+#ifdef HAVE_BGQ
+		/* For BGQ we only have 1 real task, so if it exits,
+		   the whole step is ending as well.
+		*/
+		req->range_last = nodes - 1;
+#endif
 		step_ptr->exit_code = MAX(step_ptr->exit_code, req->step_rc);
 	}
+	if ((req->range_first >= nodes) || (req->range_last >= nodes) ||
+	    (req->range_first > req->range_last)) {
+		/* range is zero origin */
+		error("step_partial_comp: StepID=%u.%u range=%u-%u nodes=%d",
+		      req->job_id, req->job_step_id, req->range_first,
+		      req->range_last, nodes);
+		return EINVAL;
+	}
 
 	bit_nset(step_ptr->exit_node_bitmap,
 		 req->range_first, req->range_last);
@@ -2590,6 +2935,9 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 	}
 	checkpoint_pack_jobinfo(step_ptr->check_job, buffer,
 				SLURM_PROTOCOL_VERSION);
+	select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
+				     SLURM_PROTOCOL_VERSION);
+
 }
 
 /*
@@ -2615,8 +2963,77 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	check_jobinfo_t check_tmp = NULL;
 	slurm_step_layout_t *step_layout = NULL;
 	List gres_list = NULL;
+	dynamic_plugin_data_t *select_jobinfo = NULL;
 
-	if(protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_2_3_PROTOCOL_VERSION) {
+		safe_unpack32(&step_id, buffer);
+		safe_unpack16(&cyclic_alloc, buffer);
+		safe_unpack16(&port, buffer);
+		safe_unpack16(&ckpt_interval, buffer);
+		safe_unpack16(&cpus_per_task, buffer);
+		safe_unpack16(&resv_port_cnt, buffer);
+
+		safe_unpack8(&no_kill, buffer);
+
+		safe_unpack32(&cpu_count, buffer);
+		safe_unpack32(&mem_per_cpu, buffer);
+		safe_unpack32(&exit_code, buffer);
+		if (exit_code != NO_VAL) {
+			safe_unpackstr_xmalloc(&bit_fmt, &name_len, buffer);
+			safe_unpack16(&bit_cnt, buffer);
+		}
+		safe_unpack32(&core_size, buffer);
+		if (core_size)
+			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
+
+		safe_unpack32(&time_limit, buffer);
+		safe_unpack_time(&start_time, buffer);
+		safe_unpack_time(&pre_sus_time, buffer);
+		safe_unpack_time(&tot_sus_time, buffer);
+		safe_unpack_time(&ckpt_time, buffer);
+
+		safe_unpackstr_xmalloc(&host, &name_len, buffer);
+		safe_unpackstr_xmalloc(&resv_ports, &name_len, buffer);
+		safe_unpackstr_xmalloc(&name, &name_len, buffer);
+		safe_unpackstr_xmalloc(&network, &name_len, buffer);
+		safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
+
+		safe_unpackstr_xmalloc(&gres, &name_len, buffer);
+		if (gres_plugin_step_state_unpack(&gres_list, buffer,
+						  job_ptr->job_id, step_id,
+						  protocol_version)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+
+		safe_unpack16(&batch_step, buffer);
+		if (!batch_step) {
+			if (unpack_slurm_step_layout(&step_layout, buffer,
+						     protocol_version))
+				goto unpack_error;
+			switch_alloc_jobinfo(&switch_tmp);
+			if (switch_unpack_jobinfo(switch_tmp, buffer))
+				goto unpack_error;
+		}
+		checkpoint_alloc_jobinfo(&check_tmp);
+		if (checkpoint_unpack_jobinfo(check_tmp, buffer,
+					      protocol_version))
+			goto unpack_error;
+
+		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
+						   protocol_version))
+			goto unpack_error;
+		/* validity test as possible */
+		if (cyclic_alloc > 1) {
+			error("Invalid data for job %u.%u: cyclic_alloc=%u",
+			      job_ptr->job_id, step_id, cyclic_alloc);
+			goto unpack_error;
+		}
+		if (no_kill > 1) {
+			error("Invalid data for job %u.%u: no_kill=%u",
+			      job_ptr->job_id, step_id, no_kill);
+			goto unpack_error;
+		}
+	} else if (protocol_version >= SLURM_2_2_PROTOCOL_VERSION) {
 		safe_unpack32(&step_id, buffer);
 		safe_unpack16(&cyclic_alloc, buffer);
 		safe_unpack16(&port, buffer);
@@ -2739,6 +3156,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 			goto unpack_error;
 		}
 	}
+
 	step_ptr = find_step_record(job_ptr, step_id);
 	if (step_ptr == NULL)
 		step_ptr = _create_step_record(job_ptr);
@@ -2770,6 +3188,11 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	step_ptr->tot_sus_time = tot_sus_time;
 	step_ptr->ckpt_time    = ckpt_time;
 
+	if (!select_jobinfo)
+		select_jobinfo = select_g_select_jobinfo_alloc();
+	step_ptr->select_jobinfo = select_jobinfo;
+	select_jobinfo = NULL;
+
 	slurm_step_layout_destroy(step_ptr->step_layout);
 	step_ptr->step_layout  = step_layout;
 
@@ -2808,7 +3231,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	info("recovered job step %u.%u", job_ptr->job_id, step_id);
 	return SLURM_SUCCESS;
 
-      unpack_error:
+unpack_error:
 	xfree(host);
 	xfree(resv_ports);
 	xfree(name);
@@ -2822,6 +3245,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	if (switch_tmp)
 		switch_free_jobinfo(switch_tmp);
 	slurm_step_layout_destroy(step_layout);
+	select_g_select_jobinfo_free(select_jobinfo);
 	return SLURM_FAILURE;
 }
 
@@ -2921,7 +3345,9 @@ extern void step_checkpoint(void)
 static void _signal_step_timelimit(struct job_record *job_ptr,
 				   struct step_record *step_ptr, time_t now)
 {
+#ifndef HAVE_FRONT_END
 	int i;
+#endif
 	kill_job_msg_t *kill_step;
 	agent_arg_t *agent_args = NULL;
 
@@ -2930,6 +3356,8 @@ static void _signal_step_timelimit(struct job_record *job_ptr,
 	agent_args->msg_type = REQUEST_KILL_TIMELIMIT;
 	agent_args->retry = 1;
 	agent_args->hostlist = hostlist_create("");
+	if (agent_args->hostlist == NULL)
+		fatal("hostlist_create: malloc failure");
 	kill_step = xmalloc(sizeof(kill_job_msg_t));
 	kill_step->job_id    = job_ptr->job_id;
 	kill_step->step_id   = step_ptr->step_id;
@@ -2941,16 +3369,19 @@ static void _signal_step_timelimit(struct job_record *job_ptr,
 	kill_step->select_jobinfo = select_g_select_jobinfo_copy(
 			job_ptr->select_jobinfo);
 
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	hostlist_push(agent_args->hostlist, job_ptr->batch_host);
+	agent_args->node_count++;
+#else
 	for (i = 0; i < node_record_count; i++) {
 		if (bit_test(step_ptr->step_node_bitmap, i) == 0)
 			continue;
 		hostlist_push(agent_args->hostlist,
 			node_record_table_ptr[i].name);
 		agent_args->node_count++;
-#ifdef HAVE_FRONT_END		/* Operate only on front-end */
-		break;
-#endif
 	}
+#endif
 
 	if (agent_args->node_count == 0) {
 		hostlist_destroy(agent_args->hostlist);
@@ -3042,7 +3473,9 @@ extern int update_step(step_update_request_msg_t *req, uid_t uid)
 	/* No need to limit step time limit as job time limit will kill
 	 * any steps with any time limit */
 	if (req->step_id == NO_VAL) {
-		step_iterator = list_iterator_create (job_ptr->step_list);
+		step_iterator = list_iterator_create(job_ptr->step_list);
+		if (step_iterator == NULL)
+			fatal("list_iterator_create: malloc failure");
 		while ((step_ptr = (struct step_record *)
 				   list_next (step_iterator))) {
 			step_ptr->time_limit = req->time_limit;
@@ -3066,3 +3499,86 @@ extern int update_step(step_update_request_msg_t *req, uid_t uid)
 
 	return SLURM_SUCCESS;
 }
+
+/* Return the total core count on a given node index */
+static int _get_node_cores(int node_inx)
+{
+	struct node_record *node_ptr;
+	int socks, cores;
+
+	node_ptr = node_record_table_ptr + node_inx;
+	if (slurmctld_conf.fast_schedule) {
+		socks = node_ptr->config_ptr->sockets;
+		cores = node_ptr->config_ptr->cores;
+	} else {
+		socks = node_ptr->sockets;
+		cores = node_ptr->cores;
+	}
+	return socks * cores;
+}
+
+/*
+ * Rebuild a job step's core_bitmap_job after a job has just changed size
+ * job_ptr IN - job that was just re-sized
+ * orig_job_node_bitmap IN - The job's original node bitmap
+ */
+extern void rebuild_step_bitmaps(struct job_record *job_ptr,
+				 bitstr_t *orig_job_node_bitmap)
+{
+	struct step_record *step_ptr;
+	ListIterator step_iterator;
+	bitstr_t *orig_step_core_bitmap;
+	int i, j, i_first, i_last, i_size;
+	int old_core_offset, new_core_offset, node_core_count;
+	bool old_node_set, new_node_set;
+
+	if (job_ptr->step_list == NULL)
+		return;
+
+	step_iterator = list_iterator_create(job_ptr->step_list);
+	if (step_iterator == NULL)
+		fatal("list_iterator_create: malloc failure");
+	while ((step_ptr = (struct step_record *)
+			   list_next (step_iterator))) {
+		gres_plugin_step_state_rebase(step_ptr->gres_list,
+					orig_job_node_bitmap,
+					job_ptr->job_resrcs->node_bitmap);
+		if (step_ptr->core_bitmap_job == NULL)
+			continue;
+		orig_step_core_bitmap = step_ptr->core_bitmap_job;
+		i_size = bit_size(job_ptr->job_resrcs->core_bitmap);
+		step_ptr->core_bitmap_job = bit_alloc(i_size);
+		old_core_offset = 0;
+		new_core_offset = 0;
+		i_first = MIN(bit_ffs(orig_job_node_bitmap),
+			      bit_ffs(job_ptr->job_resrcs->node_bitmap));
+		i_last  = MAX(bit_fls(orig_job_node_bitmap),
+			      bit_fls(job_ptr->job_resrcs->node_bitmap));
+		for (i = i_first; i <= i_last; i++) {
+			old_node_set = bit_test(orig_job_node_bitmap, i);
+			new_node_set = bit_test(job_ptr->job_resrcs->
+						node_bitmap, i);
+			if (!old_node_set && !new_node_set)
+				continue;
+			node_core_count = _get_node_cores(i);
+			if (old_node_set && new_node_set) {
+				for (j = 0; j < node_core_count; j++) {
+					if (!bit_test(orig_step_core_bitmap,
+						      old_core_offset + j))
+						continue;
+					bit_set(step_ptr->core_bitmap_job,
+						new_core_offset + j);
+					bit_set(job_ptr->job_resrcs->
+						core_bitmap_used,
+						new_core_offset + j);
+				}
+			}
+			if (old_node_set)
+				old_core_offset += node_core_count;
+			if (new_node_set)
+				new_core_offset += node_core_count;
+		}
+		bit_free(orig_step_core_bitmap);
+	}
+	list_iterator_destroy (step_iterator);
+}
diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c
index 7b79314b6..aa31de005 100644
--- a/src/slurmctld/trigger_mgr.c
+++ b/src/slurmctld/trigger_mgr.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -75,6 +75,8 @@
 List trigger_list;
 uint32_t next_trigger_id = 1;
 static pthread_mutex_t trigger_mutex = PTHREAD_MUTEX_INITIALIZER;
+bitstr_t *trigger_down_front_end_bitmap = NULL;
+bitstr_t *trigger_up_front_end_bitmap = NULL;
 bitstr_t *trigger_down_nodes_bitmap = NULL;
 bitstr_t *trigger_drained_nodes_bitmap = NULL;
 bitstr_t *trigger_fail_nodes_bitmap = NULL;
@@ -193,7 +195,7 @@ static bool _validate_trigger(trig_mgr_info_t *trig_in)
 
 extern int trigger_pull(trigger_info_msg_t *msg)
 {
-	int rc = ESRCH;
+	int rc = SLURM_SUCCESS;
 	ListIterator trig_iter;
 	trigger_info_t *trig_in;
 	trig_mgr_info_t *trig_test;
@@ -253,7 +255,6 @@ extern int trigger_pull(trigger_info_msg_t *msg)
 				break;
 			}
 		}
-		rc = SLURM_SUCCESS;
 	}
 	list_iterator_destroy(trig_iter);
 
@@ -449,6 +450,28 @@ fini:	slurm_mutex_unlock(&trigger_mutex);
 	return rc;
 }
 
+extern void trigger_front_end_down(front_end_record_t *front_end_ptr)
+{
+	int inx = front_end_ptr - front_end_nodes;
+
+	slurm_mutex_lock(&trigger_mutex);
+	if (trigger_down_front_end_bitmap == NULL)
+		trigger_down_front_end_bitmap = bit_alloc(front_end_node_cnt);
+	bit_set(trigger_down_front_end_bitmap, inx);
+	slurm_mutex_unlock(&trigger_mutex);
+}
+
+extern void trigger_front_end_up(front_end_record_t *front_end_ptr)
+{
+	int inx = front_end_ptr - front_end_nodes;
+
+	slurm_mutex_lock(&trigger_mutex);
+	if (trigger_up_front_end_bitmap == NULL)
+		trigger_up_front_end_bitmap = bit_alloc(front_end_node_cnt);
+	bit_set(trigger_up_front_end_bitmap, inx);
+	slurm_mutex_unlock(&trigger_mutex);
+}
+
 extern void trigger_node_down(struct node_record *node_ptr)
 {
 	int inx = node_ptr - node_record_table_ptr;
@@ -511,7 +534,7 @@ extern void trigger_primary_ctld_fail(void)
 }
 
 extern void trigger_primary_ctld_res_op(void)
-{	
+{
 	slurm_mutex_lock(&trigger_mutex);
 	trigger_pri_ctld_res_op = true;
 	ctld_failure = 0;
@@ -560,7 +583,7 @@ extern void trigger_backup_ctld_as_ctrl(void)
 extern void trigger_primary_dbd_fail(void)
 {
 	slurm_mutex_lock(&trigger_mutex);
-	if (dbd_failure != 1) {	
+	if (dbd_failure != 1) {
 		trigger_pri_dbd_fail = true;
 		dbd_failure = 1;
 	}
@@ -578,7 +601,7 @@ extern void trigger_primary_dbd_res_op(void)
 extern void trigger_primary_db_fail(void)
 {
 	slurm_mutex_lock(&trigger_mutex);
-	if (db_failure != 1) {	
+	if (db_failure != 1) {
 		trigger_pri_db_fail = true;
 		db_failure = 1;
 	}
@@ -899,6 +922,25 @@ fini:	verbose("State of %d triggers recovered", trigger_cnt);
 	return SLURM_FAILURE;
 }
 
+static bool _front_end_job_test(bitstr_t *front_end_bitmap,
+				struct job_record *job_ptr)
+{
+#ifdef HAVE_FRONT_END
+	int i;
+
+	if ((front_end_bitmap == NULL) || (job_ptr->batch_host == NULL))
+		return false;
+
+	for (i = 0; i < front_end_node_cnt; i++) {
+		if (bit_test(front_end_bitmap, i) &&
+		    !strcmp(front_end_nodes[i].name, job_ptr->batch_host)) {
+			return true;
+		}
+	}
+#endif
+	return false;
+}
+
 /* Test if the event has been triggered, change trigger state as needed */
 static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 {
@@ -943,6 +985,20 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 		}
 	}
 
+	if (trig_in->trig_type & TRIGGER_TYPE_DOWN) {
+		if (_front_end_job_test(trigger_down_front_end_bitmap,
+					trig_in->job_ptr)) {
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for job %u down",
+				     trig_in->trig_id, trig_in->job_id);
+			}
+			trig_in->state = 1;
+			trig_in->trig_time = now +
+					    (trig_in->trig_time - 0x8000);
+			return;
+		}
+	}
+
 	if (trig_in->trig_type & TRIGGER_TYPE_DOWN) {
 		if (trigger_down_nodes_bitmap &&
 		    bit_overlap(trig_in->job_ptr->node_bitmap,
@@ -989,6 +1045,52 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 	}
 }
 
+
+static void _trigger_front_end_event(trig_mgr_info_t *trig_in, time_t now)
+{
+	int i;
+
+	if ((trig_in->trig_type & TRIGGER_TYPE_DOWN) &&
+	    (trigger_down_front_end_bitmap != NULL) &&
+	    ((i = bit_ffs(trigger_down_front_end_bitmap)) != -1)) {
+		xfree(trig_in->res_id);
+		for (i = 0; i < front_end_node_cnt; i++) {
+			if (!bit_test(trigger_down_front_end_bitmap, i))
+				continue;
+			if (trig_in->res_id != NULL)
+				xstrcat(trig_in->res_id, ",");
+			xstrcat(trig_in->res_id, front_end_nodes[i].name);
+		}
+		trig_in->state = 1;
+		trig_in->trig_time = now + (trig_in->trig_time - 0x8000);
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+			info("trigger[%u] for node %s down",
+			     trig_in->trig_id, trig_in->res_id);
+		}
+		return;
+	}
+
+	if ((trig_in->trig_type & TRIGGER_TYPE_UP) &&
+	    (trigger_up_front_end_bitmap != NULL) &&
+	    ((i = bit_ffs(trigger_up_front_end_bitmap)) != -1)) {
+		xfree(trig_in->res_id);
+		for (i = 0; i < front_end_node_cnt; i++) {
+			if (!bit_test(trigger_up_front_end_bitmap, i))
+				continue;
+			if (trig_in->res_id != NULL)
+				xstrcat(trig_in->res_id, ",");
+			xstrcat(trig_in->res_id, front_end_nodes[i].name);
+		}
+		trig_in->state = 1;
+		trig_in->trig_time = now + (trig_in->trig_time - 0x8000);
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+			info("trigger[%u] for node %s up",
+			     trig_in->trig_id, trig_in->res_id);
+		}
+		return;
+	}
+}
+
 static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 {
 	if ((trig_in->trig_type & TRIGGER_TYPE_BLOCK_ERR) &&
@@ -1001,7 +1103,7 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 	}
 
 	if ((trig_in->trig_type & TRIGGER_TYPE_DOWN) &&
-	    trigger_down_nodes_bitmap               &&
+	    trigger_down_nodes_bitmap                &&
 	    (bit_ffs(trigger_down_nodes_bitmap) != -1)) {
 		if (trig_in->nodes_bitmap == NULL) {	/* all nodes */
 			xfree(trig_in->res_id);
@@ -1029,7 +1131,7 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 	}
 
 	if ((trig_in->trig_type & TRIGGER_TYPE_DRAINED) &&
-	    trigger_drained_nodes_bitmap               &&
+	    trigger_drained_nodes_bitmap                &&
 	    (bit_ffs(trigger_drained_nodes_bitmap) != -1)) {
 		if (trig_in->nodes_bitmap == NULL) {	/* all nodes */
 			xfree(trig_in->res_id);
@@ -1057,7 +1159,7 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 	}
 
 	if ((trig_in->trig_type & TRIGGER_TYPE_FAIL) &&
-	    trigger_fail_nodes_bitmap               &&
+	    trigger_fail_nodes_bitmap                &&
 	    (bit_ffs(trigger_fail_nodes_bitmap) != -1)) {
 		if (trig_in->nodes_bitmap == NULL) {	/* all nodes */
 			xfree(trig_in->res_id);
@@ -1125,7 +1227,7 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 	}
 
 	if ((trig_in->trig_type & TRIGGER_TYPE_UP) &&
-	    trigger_up_nodes_bitmap               &&
+	    trigger_up_nodes_bitmap                &&
 	    (bit_ffs(trigger_up_nodes_bitmap) != -1)) {
 		if (trig_in->nodes_bitmap == NULL) {	/* all nodes */
 			xfree(trig_in->res_id);
@@ -1372,6 +1474,14 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in)
 
 static void _clear_event_triggers(void)
 {
+	if (trigger_down_front_end_bitmap) {
+		bit_nclear(trigger_down_front_end_bitmap,
+			   0, (bit_size(trigger_down_front_end_bitmap) - 1));
+	}
+	if (trigger_up_front_end_bitmap) {
+		bit_nclear(trigger_up_front_end_bitmap,
+			   0, (bit_size(trigger_up_front_end_bitmap) - 1));
+	}
 	if (trigger_down_nodes_bitmap) {
 		bit_nclear(trigger_down_nodes_bitmap,
 			   0, (bit_size(trigger_down_nodes_bitmap) - 1));
@@ -1425,8 +1535,12 @@ extern void trigger_process(void)
 			else if (trig_in->res_type ==
 				 TRIGGER_RES_TYPE_SLURMDBD)
 				_trigger_slurmdbd_event(trig_in, now);
-			else /* TRIGGER_RES_TYPE_DATABASE */
+			else if (trig_in->res_type ==
+				 TRIGGER_RES_TYPE_DATABASE)
 			 	_trigger_database_event(trig_in, now);
+			else if (trig_in->res_type ==
+				 TRIGGER_RES_TYPE_FRONT_END)
+			 	_trigger_front_end_event(trig_in, now);
 		}
 		if ((trig_in->state == 1) &&
 		    (trig_in->trig_time <= now)) {
@@ -1502,6 +1616,8 @@ extern void trigger_fini(void)
 		list_destroy(trigger_list);
 		trigger_list = NULL;
 	}
+	FREE_NULL_BITMAP(trigger_down_front_end_bitmap);
+	FREE_NULL_BITMAP(trigger_up_front_end_bitmap);
 	FREE_NULL_BITMAP(trigger_down_nodes_bitmap);
 	FREE_NULL_BITMAP(trigger_drained_nodes_bitmap);
 	FREE_NULL_BITMAP(trigger_fail_nodes_bitmap);
diff --git a/src/slurmctld/trigger_mgr.h b/src/slurmctld/trigger_mgr.h
index 3087367e2..bb4eb6dec 100644
--- a/src/slurmctld/trigger_mgr.h
+++ b/src/slurmctld/trigger_mgr.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,7 +42,8 @@
 
 #include <unistd.h>
 #include <sys/types.h>
-#include <src/common/slurm_protocol_defs.h>
+#include "src/common/slurm_protocol_defs.h"
+#include "src/slurmctld/slurmctld.h"
 
 
 /* User RPC processing to set, get, clear, and pull triggers */
@@ -53,6 +54,8 @@ extern int trigger_pull(trigger_info_msg_t *msg);
 
 /* Note the some event has occured and flag triggers as needed */
 extern void trigger_block_error(void);
+extern void trigger_front_end_down(front_end_record_t *front_end_ptr);
+extern void trigger_front_end_up(front_end_record_t *front_end_ptr);
 extern void trigger_node_down(struct node_record *node_ptr);
 extern void trigger_node_drained(struct node_record *node_ptr);
 extern void trigger_node_failing(struct node_record *node_ptr);
diff --git a/src/slurmd/Makefile.am b/src/slurmd/Makefile.am
index 89380c871..ca7d43aa3 100644
--- a/src/slurmd/Makefile.am
+++ b/src/slurmd/Makefile.am
@@ -1,2 +1,2 @@
 
-SUBDIRS = slurmd slurmstepd
+SUBDIRS = common slurmd slurmstepd
diff --git a/src/slurmd/Makefile.in b/src/slurmd/Makefile.in
index 896b7dbb7..afc45a935 100644
--- a/src/slurmd/Makefile.in
+++ b/src/slurmd/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -312,7 +320,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = slurmd slurmstepd
+SUBDIRS = common slurmd slurmstepd
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/slurmd/common/Makefile.am b/src/slurmd/common/Makefile.am
new file mode 100644
index 000000000..dda570360
--- /dev/null
+++ b/src/slurmd/common/Makefile.am
@@ -0,0 +1,18 @@
+# Makefile.am for slurmd/common
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir)
+
+# making a .la
+
+noinst_LTLIBRARIES = libslurmd_common.la
+libslurmd_common_la_SOURCES =    \
+	proctrack.c proctrack.h \
+	setproctitle.c setproctitle.h \
+	slurmstepd_init.c slurmstepd_init.h \
+	run_script.c run_script.h \
+	task_plugin.c task_plugin.h \
+	set_oomadj.c set_oomadj.h \
+	reverse_tree.h
diff --git a/src/slurmd/common/Makefile.in b/src/slurmd/common/Makefile.in
new file mode 100644
index 000000000..5aff5aa02
--- /dev/null
+++ b/src/slurmd/common/Makefile.in
@@ -0,0 +1,608 @@
+# Makefile.in generated by automake 1.11.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009  Free Software Foundation,
+# Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am for slurmd/common
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/slurmd/common
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libslurmd_common_la_LIBADD =
+am_libslurmd_common_la_OBJECTS = proctrack.lo setproctitle.lo \
+	slurmstepd_init.lo run_script.lo task_plugin.lo set_oomadj.lo
+libslurmd_common_la_OBJECTS = $(am_libslurmd_common_la_OBJECTS)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(libslurmd_common_la_SOURCES)
+DIST_SOURCES = $(libslurmd_common_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lt_ECHO = @lt_ECHO@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir)
+
+# making a .la
+noinst_LTLIBRARIES = libslurmd_common.la
+libslurmd_common_la_SOURCES = \
+	proctrack.c proctrack.h \
+	setproctitle.c setproctitle.h \
+	slurmstepd_init.c slurmstepd_init.h \
+	run_script.c run_script.h \
+	task_plugin.c task_plugin.h \
+	set_oomadj.c set_oomadj.h \
+	reverse_tree.h
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/slurmd/common/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/slurmd/common/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+libslurmd_common.la: $(libslurmd_common_la_OBJECTS) $(libslurmd_common_la_DEPENDENCIES) 
+	$(LINK)  $(libslurmd_common_la_OBJECTS) $(libslurmd_common_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/run_script.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/set_oomadj.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setproctitle.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_init.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_plugin.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c
index 4d3209b53..5e007f32d 100644
--- a/src/slurmd/common/proctrack.c
+++ b/src/slurmd/common/proctrack.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,12 +51,12 @@
 typedef struct slurm_proctrack_ops {
 	int              (*create)    (slurmd_job_t * job);
 	int              (*add)       (slurmd_job_t * job, pid_t pid);
-	int              (*signal)    (uint32_t id, int signal);
-	int              (*destroy)   (uint32_t id);
-	 uint32_t        (*find_cont) (pid_t pid);
-	 bool            (*has_pid)   (uint32_t id, pid_t pid);
-	int              (*wait)      (uint32_t id);
-	int              (*get_pids)  (uint32_t id, pid_t ** pids, int *npids);
+	int              (*signal)    (uint64_t id, int signal);
+	int              (*destroy)   (uint64_t id);
+	uint64_t         (*find_cont) (pid_t pid);
+	bool             (*has_pid)   (uint64_t id, pid_t pid);
+	int              (*wait)      (uint64_t id);
+	int              (*get_pids)  (uint64_t id, pid_t ** pids, int *npids);
 } slurm_proctrack_ops_t;
 
 
@@ -85,14 +85,14 @@ _proctrack_get_ops(slurm_proctrack_context_t * c)
 	 * Must be synchronized with slurm_proctrack_ops_t above.
 	 */
 	static const char *syms[] = {
-		"slurm_container_create",
-		"slurm_container_add",
-		"slurm_container_signal",
-		"slurm_container_destroy",
-		"slurm_container_find",
-		"slurm_container_has_pid",
-		"slurm_container_wait",
-		"slurm_container_get_pids"
+		"slurm_container_plugin_create",
+		"slurm_container_plugin_add",
+		"slurm_container_plugin_signal",
+		"slurm_container_plugin_destroy",
+		"slurm_container_plugin_find",
+		"slurm_container_plugin_has_pid",
+		"slurm_container_plugin_wait",
+		"slurm_container_plugin_get_pids"
 	};
 	int n_syms = sizeof(syms) / sizeof(char *);
 
@@ -290,7 +290,7 @@ extern int slurm_container_add(slurmd_job_t * job, pid_t pid)
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_signal(uint32_t cont_id, int signal)
+extern int slurm_container_signal(uint64_t cont_id, int signal)
 {
 	if (slurm_proctrack_init() < 0) {
 		return SLURM_ERROR;
@@ -304,7 +304,7 @@ extern int slurm_container_signal(uint32_t cont_id, int signal)
  *
  * Returns a SLURM errno.
 */
-extern int slurm_container_destroy(uint32_t cont_id)
+extern int slurm_container_destroy(uint64_t cont_id)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -317,7 +317,7 @@ extern int slurm_container_destroy(uint32_t cont_id)
  *
  * Returns zero if no container found for the given pid.
  */
-extern uint32_t slurm_container_find(pid_t pid)
+extern uint64_t slurm_container_find(pid_t pid)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -329,7 +329,7 @@ extern uint32_t slurm_container_find(pid_t pid)
  * Return "true" if the container "cont_id" contains the process with
  * ID "pid".
  */
-extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
+extern bool slurm_container_has_pid(uint64_t cont_id, pid_t pid)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -347,7 +347,7 @@ extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid)
  *
  * Return SLURM_SUCCESS or SLURM_ERROR.
  */
-extern int slurm_container_wait(uint32_t cont_id)
+extern int slurm_container_wait(uint64_t cont_id)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
@@ -368,7 +368,7 @@ extern int slurm_container_wait(uint32_t cont_id)
  *   plugin does not implement the call.
  */
 extern int
-slurm_container_get_pids(uint32_t cont_id, pid_t ** pids, int *npids)
+slurm_container_get_pids(uint64_t cont_id, pid_t ** pids, int *npids)
 {
 	if (slurm_proctrack_init() < 0)
 		return SLURM_ERROR;
diff --git a/src/slurmd/common/proctrack.h b/src/slurmd/common/proctrack.h
index 976db3f15..4ca94998e 100644
--- a/src/slurmd/common/proctrack.h
+++ b/src/slurmd/common/proctrack.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,10 +39,11 @@
 #ifndef __PROC_TRACK_H__
 #define __PROC_TRACK_H__
 
-#include <slurm/slurm.h>
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
 #include <stdbool.h>
 
+#include "slurm/slurm.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+
 /*
  * Initialize the process tracking plugin.
  *
@@ -92,7 +93,7 @@ extern int slurm_container_add(slurmd_job_t *job, pid_t pid);
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_signal(uint32_t cont_id, int signal);
+extern int slurm_container_signal(uint64_t cont_id, int signal);
 
 
 /* 
@@ -102,20 +103,20 @@ extern int slurm_container_signal(uint32_t cont_id, int signal);
  *
  * Returns a SLURM errno.
  */
-extern int slurm_container_destroy(uint32_t cont_id);
+extern int slurm_container_destroy(uint64_t cont_id);
 
 /*
  * Get container ID for given process ID
  *
  * Returns zero if no container found for the given pid.
  */
-extern uint32_t slurm_container_find(pid_t pid);
+extern uint64_t slurm_container_find(pid_t pid);
 
 /*
  * Return "true" if the container "cont_id" contains the process with
  * ID "pid".
  */
-extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid);
+extern bool slurm_container_has_pid(uint64_t cont_id, pid_t pid);
 
 /*
  * Wait for all processes within a container to exit.
@@ -127,7 +128,7 @@ extern bool slurm_container_has_pid(uint32_t cont_id, pid_t pid);
  *
  * Return SLURM_SUCCESS or SLURM_ERROR.
  */
-extern int slurm_container_wait(uint32_t cont_id);
+extern int slurm_container_wait(uint64_t cont_id);
 
 /*
  * Get all process IDs within a container.
@@ -141,7 +142,7 @@ extern int slurm_container_wait(uint32_t cont_id);
  *   pids NULL), return SLURM_ERROR if container does not exist, or
  *   plugin does not implement the call.
  */
-extern int slurm_container_get_pids(uint32_t cont_id, pid_t **pids, int *npids);
+extern int slurm_container_get_pids(uint64_t cont_id, pid_t **pids, int *npids);
 
 /* Collect accounting information for all processes within a container */
 
diff --git a/src/slurmd/common/reverse_tree.h b/src/slurmd/common/reverse_tree.h
index 7703c9044..66d6af95b 100644
--- a/src/slurmd/common/reverse_tree.h
+++ b/src/slurmd/common/reverse_tree.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/run_script.c b/src/slurmd/common/run_script.c
index 4a63fa249..6fc3309d4 100644
--- a/src/slurmd/common/run_script.c
+++ b/src/slurmd/common/run_script.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/run_script.h b/src/slurmd/common/run_script.h
index dbab911af..82463410e 100644
--- a/src/slurmd/common/run_script.h
+++ b/src/slurmd/common/run_script.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/set_oomadj.c b/src/slurmd/common/set_oomadj.c
index c1e82042b..ef123af62 100644
--- a/src/slurmd/common/set_oomadj.c
+++ b/src/slurmd/common/set_oomadj.c
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,14 +48,27 @@ extern int set_oom_adj(int adj)
 {
 	int fd;
 	char oom_adj[16];
+	char *oom_adj_file = "/proc/self/oom_score_adj";
 
-	fd = open("/proc/self/oom_adj", O_WRONLY);
+	fd = open(oom_adj_file, O_WRONLY);
 	if (fd < 0) {
-		if (errno == ENOENT)
-			debug("failed to open /proc/self/oom_adj: %m");
-		else
-			error("failed to open /proc/self/oom_adj: %m");
-		return -1;
+		if (errno == ENOENT) {
+			debug("%s not found. Falling back to oom_adj",
+			      oom_adj_file);
+			oom_adj_file = "/proc/self/oom_adj";
+			fd = open(oom_adj_file, O_WRONLY);
+			if (fd < 0) {
+				if (errno == ENOENT)
+					error("%s not found", oom_adj_file);
+				else
+					error("failed to open %s: %m",
+					      oom_adj_file);
+				return -1;
+			}
+		} else {
+			error("failed to open %s: %m", oom_adj_file);
+			return -1;
+		}
 	}
 	if (snprintf(oom_adj, 16, "%d", adj) >= 16) {
 		close(fd);
diff --git a/src/slurmd/common/set_oomadj.h b/src/slurmd/common/set_oomadj.h
index c725900b4..068aeec6c 100644
--- a/src/slurmd/common/set_oomadj.h
+++ b/src/slurmd/common/set_oomadj.h
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/setproctitle.c b/src/slurmd/common/setproctitle.c
index 977744fa7..91a5e8e3c 100644
--- a/src/slurmd/common/setproctitle.c
+++ b/src/slurmd/common/setproctitle.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/setproctitle.h b/src/slurmd/common/setproctitle.h
index 64e0525af..eda7743f7 100644
--- a/src/slurmd/common/setproctitle.h
+++ b/src/slurmd/common/setproctitle.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/slurmstepd_init.c b/src/slurmd/common/slurmstepd_init.c
index bcd3590a8..e33b7b28c 100644
--- a/src/slurmd/common/slurmstepd_init.c
+++ b/src/slurmd/common/slurmstepd_init.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,7 @@ extern void pack_slurmd_conf_lite(slurmd_conf_t *conf, Buf buffer)
 	pack16(conf->sockets, buffer);
 	pack16(conf->cores, buffer);
 	pack16(conf->threads, buffer);
+	pack32(conf->real_memory_size, buffer);
 	packstr(conf->spooldir, buffer);
 	packstr(conf->node_name, buffer);
 	packstr(conf->logfile, buffer);
@@ -70,6 +71,7 @@ extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer)
 	safe_unpack16(&conf->sockets, buffer);
 	safe_unpack16(&conf->cores, buffer);
 	safe_unpack16(&conf->threads, buffer);
+	safe_unpack32(&conf->real_memory_size, buffer);
 	safe_unpackstr_xmalloc(&conf->spooldir,    &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&conf->node_name,   &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&conf->logfile,     &uint32_tmp, buffer);
diff --git a/src/slurmd/common/slurmstepd_init.h b/src/slurmd/common/slurmstepd_init.h
index f9a810d8a..493f41ccf 100644
--- a/src/slurmd/common/slurmstepd_init.h
+++ b/src/slurmd/common/slurmstepd_init.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c
index d7f662a3a..7e44b63dc 100644
--- a/src/slurmd/common/task_plugin.c
+++ b/src/slurmd/common/task_plugin.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,21 +48,22 @@
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
 typedef struct slurmd_task_ops {
-	int	(*slurmd_batch_request)		(uint32_t job_id,
-						 batch_job_launch_msg_t *req);
-	int	(*slurmd_launch_request)	(uint32_t job_id,
-						 launch_tasks_request_msg_t *req,
-						 uint32_t node_id);
-	int	(*slurmd_reserve_resources)	(uint32_t job_id,
-						 launch_tasks_request_msg_t *req,
-						 uint32_t node_id);
-	int	(*slurmd_suspend_job)		(uint32_t job_id);
-	int	(*slurmd_resume_job)		(uint32_t job_id);
-	int	(*slurmd_release_resources)	(uint32_t job_id);
-
-	int	(*pre_setuid)			(slurmd_job_t *job);
-	int	(*pre_launch)			(slurmd_job_t *job);
-	int	(*post_term)			(slurmd_job_t *job);
+	int	(*slurmd_batch_request)	    (uint32_t job_id,
+					     batch_job_launch_msg_t *req);
+	int	(*slurmd_launch_request)    (uint32_t job_id,
+					     launch_tasks_request_msg_t *req,
+					     uint32_t node_id);
+	int	(*slurmd_reserve_resources) (uint32_t job_id,
+					     launch_tasks_request_msg_t *req,
+					     uint32_t node_id);
+	int	(*slurmd_suspend_job)	    (uint32_t job_id);
+	int	(*slurmd_resume_job)	    (uint32_t job_id);
+	int	(*slurmd_release_resources) (uint32_t job_id);
+
+	int	(*pre_setuid)		    (slurmd_job_t *job);
+	int	(*pre_launch)		    (slurmd_job_t *job);
+	int	(*post_term)		    (slurmd_job_t *job);
+	int	(*post_step)		    (slurmd_job_t *job);
 } slurmd_task_ops_t;
 
 
@@ -73,7 +74,8 @@ typedef struct slurmd_task_context {
 	slurmd_task_ops_t	ops;
 } slurmd_task_context_t;
 
-static slurmd_task_context_t	*g_task_context = NULL;
+static slurmd_task_context_t	**g_task_context = NULL;
+static int			g_task_context_num = -1;
 static pthread_mutex_t		g_task_context_lock = PTHREAD_MUTEX_INITIALIZER;
 
 
@@ -93,6 +95,7 @@ _slurmd_task_get_ops(slurmd_task_context_t *c)
 		"task_pre_setuid",
 		"task_pre_launch",
 		"task_post_term",
+		"task_post_step",
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
@@ -195,34 +198,58 @@ _slurmd_task_context_destroy(slurmd_task_context_t *c)
  */
 extern int slurmd_task_init(void)
 {
-	int retval = SLURM_SUCCESS;
+	int retval = SLURM_SUCCESS, i;
 	char *task_plugin_type = NULL;
+	char *last = NULL, *task_plugin_list, *task_plugin = NULL;
 
 	slurm_mutex_lock( &g_task_context_lock );
 
-	if ( g_task_context )
+	if ( g_task_context_num >= 0 )
 		goto done;
 
 	task_plugin_type = slurm_get_task_plugin();
-	g_task_context = _slurmd_task_context_create( task_plugin_type );
-	if ( g_task_context == NULL ) {
-		error( "cannot create task context for %s",
-			 task_plugin_type );
-		retval = SLURM_ERROR;
+	g_task_context_num = 0; /* mark it before anything else */
+	if (task_plugin_type == NULL || task_plugin_type[0] == '\0')
 		goto done;
-	}
 
-	if ( _slurmd_task_get_ops( g_task_context ) == NULL ) {
-		error( "cannot resolve task plugin operations" );
-		_slurmd_task_context_destroy( g_task_context );
-		g_task_context = NULL;
-		retval = SLURM_ERROR;
+	task_plugin_list = task_plugin_type;
+	while ((task_plugin = strtok_r(task_plugin_list, ",", &last))) {
+		i = g_task_context_num++;
+		xrealloc(g_task_context,
+			 (sizeof(slurmd_task_context_t *) * g_task_context_num));
+		if (strncmp(task_plugin, "task/", 5) == 0)
+			task_plugin += 5; /* backward compatibility */
+		task_plugin = xstrdup_printf("task/%s", task_plugin);
+		g_task_context[i] = _slurmd_task_context_create( task_plugin );
+		if ( g_task_context[i] == NULL ) {
+			error( "cannot create task context for %s",
+				 task_plugin );
+			goto error;
+		}
+
+		if ( _slurmd_task_get_ops( g_task_context[i] ) == NULL ) {
+			error( "cannot resolve task plugin operations for %s",
+			       task_plugin );
+			goto error;
+		}
+		xfree(task_plugin);
+		task_plugin_list = NULL; /* for next iteration */
 	}
 
  done:
 	slurm_mutex_unlock( &g_task_context_lock );
 	xfree(task_plugin_type);
 	return retval;
+
+error:
+	xfree(task_plugin);
+	retval = SLURM_ERROR;
+	for (i = 0; i < g_task_context_num; i++)
+		if (g_task_context[i])
+			_slurmd_task_context_destroy(g_task_context[i]);
+	xfree(g_task_context);
+	g_task_context_num = -1;
+	goto done;
 }
 
 /*
@@ -232,13 +259,24 @@ extern int slurmd_task_init(void)
  */
 extern int slurmd_task_fini(void)
 {
-	int rc;
+	int i, rc = SLURM_SUCCESS;
 
+	slurm_mutex_lock( &g_task_context_lock );
 	if (!g_task_context)
-		return SLURM_SUCCESS;
+		goto done;
 
-	rc = _slurmd_task_context_destroy(g_task_context);
-	g_task_context = NULL;
+	for (i = 0; i < g_task_context_num; i++) {
+		if (_slurmd_task_context_destroy(g_task_context[i]) !=
+		    SLURM_SUCCESS) {
+			rc = SLURM_ERROR;
+		}
+	}
+
+	xfree(g_task_context);
+	g_task_context_num = -1;
+
+done:
+	slurm_mutex_unlock( &g_task_context_lock );
 	return rc;
 }
 
@@ -249,10 +287,19 @@ extern int slurmd_task_fini(void)
  */
 extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_batch_request))(job_id, req);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++) {
+		rc = (*(g_task_context[i]->ops.slurmd_batch_request))(job_id,
+								      req);
+	}
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -264,10 +311,19 @@ extern int slurmd_launch_request(uint32_t job_id,
 				 launch_tasks_request_msg_t *req,
 				 uint32_t node_id)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_launch_request))(job_id, req, node_id);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++) {
+		rc = (*(g_task_context[i]->ops.slurmd_launch_request))
+					(job_id, req, node_id);
+	}
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -279,10 +335,19 @@ extern int slurmd_reserve_resources(uint32_t job_id,
 				    launch_tasks_request_msg_t *req,
 				    uint32_t node_id )
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_reserve_resources))(job_id, req, node_id);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++) {
+		rc = (*(g_task_context[i]->ops.slurmd_reserve_resources))
+					(job_id, req, node_id);
+	}
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -292,10 +357,17 @@ extern int slurmd_reserve_resources(uint32_t job_id,
  */
 extern int slurmd_suspend_job(uint32_t job_id)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_suspend_job))(job_id);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.slurmd_suspend_job))(job_id);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -305,10 +377,17 @@ extern int slurmd_suspend_job(uint32_t job_id)
  */
 extern int slurmd_resume_job(uint32_t job_id)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_resume_job))(job_id);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.slurmd_resume_job))(job_id);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -318,10 +397,19 @@ extern int slurmd_resume_job(uint32_t job_id)
  */
 extern int slurmd_release_resources(uint32_t job_id)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.slurmd_release_resources))(job_id);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++) {
+		rc = (*(g_task_context[i]->ops.slurmd_release_resources))
+				(job_id);
+	}
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -332,10 +420,17 @@ extern int slurmd_release_resources(uint32_t job_id)
  */
 extern int pre_setuid(slurmd_job_t *job)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.pre_setuid))(job);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.pre_setuid))(job);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -345,10 +440,17 @@ extern int pre_setuid(slurmd_job_t *job)
  */
 extern int pre_launch(slurmd_job_t *job)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.pre_launch))(job);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.pre_launch))(job);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
 
 /*
@@ -358,8 +460,35 @@ extern int pre_launch(slurmd_job_t *job)
  */
 extern int post_term(slurmd_job_t *job)
 {
+	int i, rc = SLURM_SUCCESS;
+
 	if (slurmd_task_init())
 		return SLURM_ERROR;
 
-	return (*(g_task_context->ops.post_term))(job);
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.post_term))(job);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
+}
+
+/*
+ * Note that a step has terminated.
+ *
+ * RET - slurm error code
+ */
+extern int post_step(slurmd_job_t *job)
+{
+	int i, rc = SLURM_SUCCESS;
+
+	if (slurmd_task_init())
+		return SLURM_ERROR;
+
+	slurm_mutex_lock( &g_task_context_lock );
+	for (i = 0; ((i < g_task_context_num) && (rc == SLURM_SUCCESS)); i++)
+		rc = (*(g_task_context[i]->ops.post_step))(job);
+	slurm_mutex_unlock( &g_task_context_lock );
+
+	return (rc);
 }
diff --git a/src/slurmd/common/task_plugin.h b/src/slurmd/common/task_plugin.h
index 228fdf276..4419c8ef9 100644
--- a/src/slurmd/common/task_plugin.h
+++ b/src/slurmd/common/task_plugin.h
@@ -6,32 +6,32 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
- *  
+ *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
@@ -51,7 +51,7 @@ extern int slurmd_task_init( void );
 
 /*
  * Terminate the task plugin, free memory.
- * 
+ *
  * RET - slurm error code
  */
 extern int slurmd_task_fini(void);
@@ -74,8 +74,8 @@ extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req);
  *
  * RET - slurm error code
  */
-extern int slurmd_launch_request(uint32_t job_id, 
-				 launch_tasks_request_msg_t *req, 
+extern int slurmd_launch_request(uint32_t job_id,
+				 launch_tasks_request_msg_t *req,
 				 uint32_t node_id );
 
 /*
@@ -83,8 +83,8 @@ extern int slurmd_launch_request(uint32_t job_id,
  *
  * RET - slurm error code
  */
-extern int slurmd_reserve_resources(uint32_t job_id, 
-				    launch_tasks_request_msg_t *req, 
+extern int slurmd_reserve_resources(uint32_t job_id,
+				    launch_tasks_request_msg_t *req,
 				    uint32_t node_id );
 
 /*
@@ -130,5 +130,11 @@ extern int pre_launch(slurmd_job_t *job);
  */
 extern int post_term(slurmd_job_t *job);
 
-#endif /* _SLURMD_TASK_PLUGIN_H_ */
+/*
+ * Note that a step has terminated.
+ *
+ * RET - slurm error code
+ */
+extern int post_step(slurmd_job_t *job);
 
+#endif /* _SLURMD_TASK_PLUGIN_H_ */
diff --git a/src/slurmd/slurmd/Makefile.am b/src/slurmd/slurmd/Makefile.am
index 30afd9de1..1cd8f8551 100644
--- a/src/slurmd/slurmd/Makefile.am
+++ b/src/slurmd/slurmd/Makefile.am
@@ -11,7 +11,8 @@ INCLUDES = -I$(top_srcdir)
 slurmd_LDADD = 					   \
 	$(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/api/libslurm.o -ldl	   \
-	$(PLPA_LIBS)
+	$(PLPA_LIBS) \
+	../common/libslurmd_common.la
 
 SLURMD_SOURCES = \
 	slurmd.c slurmd.h \
@@ -19,20 +20,7 @@ SLURMD_SOURCES = \
 	get_mach_stat.c get_mach_stat.h	\
 	read_proc.c 	        	\
 	reverse_tree_math.c reverse_tree_math.h \
-	xcpu.c xcpu.h			\
-	$(top_builddir)/src/slurmd/common/proctrack.c \
-	$(top_builddir)/src/slurmd/common/proctrack.h \
-	$(top_builddir)/src/slurmd/common/setproctitle.c \
-	$(top_builddir)/src/slurmd/common/setproctitle.h \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.c \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.h \
-	$(top_builddir)/src/slurmd/common/run_script.c \
-	$(top_builddir)/src/slurmd/common/run_script.h \
-	$(top_builddir)/src/slurmd/common/task_plugin.c \
-	$(top_builddir)/src/slurmd/common/task_plugin.h \
-	$(top_builddir)/src/slurmd/common/set_oomadj.c \
-	$(top_builddir)/src/slurmd/common/set_oomadj.h \
-	$(top_builddir)/src/slurmd/common/reverse_tree.h
+	xcpu.c xcpu.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
 
diff --git a/src/slurmd/slurmd/Makefile.in b/src/slurmd/slurmd/Makefile.in
index 8b06bc391..6504a5d5f 100644
--- a/src/slurmd/slurmd/Makefile.in
+++ b/src/slurmd/slurmd/Makefile.in
@@ -65,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -75,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -86,14 +88,12 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 PROGRAMS = $(sbin_PROGRAMS)
 am__objects_1 = slurmd.$(OBJEXT) req.$(OBJEXT) get_mach_stat.$(OBJEXT) \
-	read_proc.$(OBJEXT) reverse_tree_math.$(OBJEXT) xcpu.$(OBJEXT) \
-	proctrack.$(OBJEXT) setproctitle.$(OBJEXT) \
-	slurmstepd_init.$(OBJEXT) run_script.$(OBJEXT) \
-	task_plugin.$(OBJEXT) set_oomadj.$(OBJEXT)
+	read_proc.$(OBJEXT) reverse_tree_math.$(OBJEXT) xcpu.$(OBJEXT)
 am_slurmd_OBJECTS = $(am__objects_1)
 slurmd_OBJECTS = $(am_slurmd_OBJECTS)
 slurmd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
-	$(top_builddir)/src/api/libslurm.o
+	$(top_builddir)/src/api/libslurm.o \
+	../common/libslurmd_common.la
 slurmd_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(slurmd_LDFLAGS) \
 	$(LDFLAGS) -o $@
@@ -125,7 +125,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -162,6 +165,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -219,6 +223,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -254,6 +259,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -311,7 +317,8 @@ INCLUDES = -I$(top_srcdir)
 slurmd_LDADD = \
 	$(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/api/libslurm.o -ldl	   \
-	$(PLPA_LIBS)
+	$(PLPA_LIBS) \
+	../common/libslurmd_common.la
 
 SLURMD_SOURCES = \
 	slurmd.c slurmd.h \
@@ -319,20 +326,7 @@ SLURMD_SOURCES = \
 	get_mach_stat.c get_mach_stat.h	\
 	read_proc.c 	        	\
 	reverse_tree_math.c reverse_tree_math.h \
-	xcpu.c xcpu.h			\
-	$(top_builddir)/src/slurmd/common/proctrack.c \
-	$(top_builddir)/src/slurmd/common/proctrack.h \
-	$(top_builddir)/src/slurmd/common/setproctitle.c \
-	$(top_builddir)/src/slurmd/common/setproctitle.h \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.c \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.h \
-	$(top_builddir)/src/slurmd/common/run_script.c \
-	$(top_builddir)/src/slurmd/common/run_script.h \
-	$(top_builddir)/src/slurmd/common/task_plugin.c \
-	$(top_builddir)/src/slurmd/common/task_plugin.h \
-	$(top_builddir)/src/slurmd/common/set_oomadj.c \
-	$(top_builddir)/src/slurmd/common/set_oomadj.h \
-	$(top_builddir)/src/slurmd/common/reverse_tree.h
+	xcpu.c xcpu.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
 @HAVE_AIX_FALSE@slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
@@ -428,16 +422,10 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/get_mach_stat.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_proc.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reverse_tree_math.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/run_script.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/set_oomadj.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setproctitle.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmd.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_init.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_plugin.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcpu.Po@am__quote@
 
 .c.o:
@@ -461,90 +449,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
-proctrack.o: $(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.o -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.o `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/proctrack.c' object='proctrack.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o proctrack.o `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c
-
-proctrack.obj: $(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.obj -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.obj `if test -f '$(top_builddir)/src/slurmd/common/proctrack.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/proctrack.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/proctrack.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/proctrack.c' object='proctrack.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o proctrack.obj `if test -f '$(top_builddir)/src/slurmd/common/proctrack.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/proctrack.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/proctrack.c'; fi`
-
-setproctitle.o: $(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT setproctitle.o -MD -MP -MF $(DEPDIR)/setproctitle.Tpo -c -o setproctitle.o `test -f '$(top_builddir)/src/slurmd/common/setproctitle.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/setproctitle.Tpo $(DEPDIR)/setproctitle.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/setproctitle.c' object='setproctitle.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o setproctitle.o `test -f '$(top_builddir)/src/slurmd/common/setproctitle.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/setproctitle.c
-
-setproctitle.obj: $(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT setproctitle.obj -MD -MP -MF $(DEPDIR)/setproctitle.Tpo -c -o setproctitle.obj `if test -f '$(top_builddir)/src/slurmd/common/setproctitle.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/setproctitle.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/setproctitle.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/setproctitle.Tpo $(DEPDIR)/setproctitle.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/setproctitle.c' object='setproctitle.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o setproctitle.obj `if test -f '$(top_builddir)/src/slurmd/common/setproctitle.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/setproctitle.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/setproctitle.c'; fi`
-
-slurmstepd_init.o: $(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT slurmstepd_init.o -MD -MP -MF $(DEPDIR)/slurmstepd_init.Tpo -c -o slurmstepd_init.o `test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/slurmstepd_init.Tpo $(DEPDIR)/slurmstepd_init.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/slurmstepd_init.c' object='slurmstepd_init.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o slurmstepd_init.o `test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/slurmstepd_init.c
-
-slurmstepd_init.obj: $(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT slurmstepd_init.obj -MD -MP -MF $(DEPDIR)/slurmstepd_init.Tpo -c -o slurmstepd_init.obj `if test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/slurmstepd_init.Tpo $(DEPDIR)/slurmstepd_init.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/slurmstepd_init.c' object='slurmstepd_init.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o slurmstepd_init.obj `if test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; fi`
-
-run_script.o: $(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT run_script.o -MD -MP -MF $(DEPDIR)/run_script.Tpo -c -o run_script.o `test -f '$(top_builddir)/src/slurmd/common/run_script.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/run_script.Tpo $(DEPDIR)/run_script.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/run_script.c' object='run_script.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o run_script.o `test -f '$(top_builddir)/src/slurmd/common/run_script.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/run_script.c
-
-run_script.obj: $(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT run_script.obj -MD -MP -MF $(DEPDIR)/run_script.Tpo -c -o run_script.obj `if test -f '$(top_builddir)/src/slurmd/common/run_script.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/run_script.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/run_script.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/run_script.Tpo $(DEPDIR)/run_script.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/run_script.c' object='run_script.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o run_script.obj `if test -f '$(top_builddir)/src/slurmd/common/run_script.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/run_script.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/run_script.c'; fi`
-
-task_plugin.o: $(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_plugin.o -MD -MP -MF $(DEPDIR)/task_plugin.Tpo -c -o task_plugin.o `test -f '$(top_builddir)/src/slurmd/common/task_plugin.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_plugin.Tpo $(DEPDIR)/task_plugin.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/task_plugin.c' object='task_plugin.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.o `test -f '$(top_builddir)/src/slurmd/common/task_plugin.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/task_plugin.c
-
-task_plugin.obj: $(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_plugin.obj -MD -MP -MF $(DEPDIR)/task_plugin.Tpo -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_plugin.Tpo $(DEPDIR)/task_plugin.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/task_plugin.c' object='task_plugin.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
-
-set_oomadj.o: $(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.o -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
-
-set_oomadj.obj: $(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.obj -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
-
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/slurmd/slurmd/get_mach_stat.c b/src/slurmd/slurmd/get_mach_stat.c
index ed3edefcb..f1346620e 100644
--- a/src/slurmd/slurmd/get_mach_stat.c
+++ b/src/slurmd/slurmd/get_mach_stat.c
@@ -1,9 +1,9 @@
 /*****************************************************************************\
- *  get_mach_stat.c - Get the status of the current machine 
+ *  get_mach_stat.c - Get the status of the current machine
  *
  *  NOTE: Some of these functions are system dependent. Built on RedHat2.4
  *  NOTE: While not currently used by SLURM, this code can also get a node's
- *       OS name and CPU speed. See code ifdef'ed out via USE_OS_NAME and 
+ *       OS name and CPU speed. See code ifdef'ed out via USE_OS_NAME and
  *       USE_CPU_SPEED
  *****************************************************************************
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
@@ -11,32 +11,32 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
  *  CODE-OCEC-09-009. All rights reserved.
- *  
+ *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
@@ -57,9 +57,9 @@
 #ifdef HAVE_SYS_SYSCTL_H
 # include <sys/sysctl.h>
 #endif
- 
+
 #include <errno.h>
-#include <fcntl.h> 
+#include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -97,13 +97,6 @@
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmd/slurmd/get_mach_stat.h"
 
-static char* _cpuinfo_path = "/proc/cpuinfo";
-
-static int _compute_block_map(uint16_t numproc,
-			      uint16_t **block_map, uint16_t **block_map_inv);
-static int _chk_cpuinfo_str(char *buffer, char *keyword, char **valptr);
-static int _chk_cpuinfo_uint32(char *buffer, char *keyword, uint32_t *val);
-
 /* #define DEBUG_DETAIL	1 */	/* enable detailed debugging within SLURM */
 
 #if DEBUG_MODULE
@@ -119,8 +112,8 @@ static int _chk_cpuinfo_uint32(char *buffer, char *keyword, uint32_t *val);
 #define xfree	free
 /* main is used here for testing purposes only:				*/
 /* % gcc -DDEBUG_MODULE get_mach_stat.c -I../../.. -g -DUSE_CPU_SPEED	*/
-int 
-main(int argc, char * argv[]) 
+int
+main(int argc, char * argv[])
 {
 	int error_code;
 	uint16_t sockets, cores, threads;
@@ -132,6 +125,7 @@ main(int argc, char * argv[])
 	uint16_t testnumproc = 0;
 	uint32_t up_time = 0;
 	int days, hours, mins, secs;
+	char* _cpuinfo_path = "/proc/cpuinfo";
 
 	if (argc > 1) {
 	    	_cpuinfo_path = argv[1];
@@ -140,7 +134,7 @@ main(int argc, char * argv[])
 	debug3("%s:", _cpuinfo_path);
 
 	error_code = get_mach_name(node_name);
-	if (error_code != 0) 
+	if (error_code != 0)
 		exit(1);    /* The show is all over without a node name */
 
 	error_code += get_procs(&this_node.cpus);
@@ -171,15 +165,15 @@ main(int argc, char * argv[])
 	days  = (up_time / 86400);
 	debug3("\tUpTime=%u=%u-%2.2u:%2.2u:%2.2u",
 	       up_time, days, hours, mins, secs);
-	if (error_code != 0) 
+	if (error_code != 0)
 		debug3("get_mach_stat error_code=%d encountered", error_code);
 	exit (error_code);
 }
 
 
-/* gethostname_short - equivalent to gethostname, but return only the first 
- * component of the fully qualified name 
- * (e.g. "linux123.foo.bar" becomes "linux123") 
+/* gethostname_short - equivalent to gethostname, but return only the first
+ * component of the fully qualified name
+ * (e.g. "linux123.foo.bar" becomes "linux123")
  * OUT name
  */
 int
@@ -207,69 +201,15 @@ gethostname_short (char *name, size_t len)
 }
 #endif
 
-
-/*
- * get_procs - Return the count of procs on this system 
- * Input: procs - buffer for the CPU count
- * Output: procs - filled in with CPU count, "1" if error
- *         return code - 0 if no error, otherwise errno
- */
-extern int 
-get_procs(uint16_t *procs) 
-{
-#ifdef LPAR_INFO_FORMAT2
-	/* AIX 5.3 only */
-	lpar_info_format2_t info;
-
-	*procs = 1;
-	if (lpar_get_info(LPAR_INFO_FORMAT2, &info, sizeof(info)) != 0) {
-		error("lpar_get_info() failed");
-		return EINVAL;
-	}
-	
-	*procs = (uint16_t) info.online_vcpus;
-#else /* !LPAR_INFO_FORMAT2 */
-
-#  ifdef _SC_NPROCESSORS_ONLN
-	int my_proc_tally;
-
-	*procs = 1;
-	my_proc_tally = (int)sysconf(_SC_NPROCESSORS_ONLN);
-	if (my_proc_tally < 1) {
-		error ("get_procs: error running sysconf(_SC_NPROCESSORS_ONLN)");
-		return EINVAL;
-	} 
-
-	*procs = (uint16_t) my_proc_tally;
-#  else
-#    ifdef HAVE_SYSCTLBYNAME
-	int ncpu;
-	size_t len = sizeof(ncpu);
-
-	*procs = 1;
-	if (sysctlbyname("hw.ncpus", &ncpu, &len, NULL, 0) == -1) {
-		error("get_procs: error running sysctl(HW_NCPU)");
-		return EINVAL;
-	}
-	*procs = (uint16_t) ncpu;
-#    else /* !HAVE_SYSCTLBYNAME */
-	*procs = 1;
-#    endif /* HAVE_SYSCTLBYNAME */
-#  endif /* _SC_NPROCESSORS_ONLN */
-#endif /* LPAR_INFO_FORMAT2 */
-
-	return 0;
-}
-
 #ifdef USE_OS_NAME
 /*
- * get_os_name - Return the operating system name and version 
+ * get_os_name - Return the operating system name and version
  * Input: os_name - buffer for the OS name, must be at least MAX_OS_LEN characters
  * Output: os_name - filled in with OS name, "UNKNOWN" if error
  *         return code - 0 if no error, otherwise errno
  */
-extern int 
-get_os_name(char *os_name) 
+extern int
+get_os_name(char *os_name)
 {
 	int error_code;
 	struct utsname sys_info;
@@ -279,13 +219,13 @@ get_os_name(char *os_name)
 	if (error_code != 0) {
 		error ("get_os_name: uname error %d", error_code);
 		return error_code;
-	} 
+	}
 
-	if ((strlen(sys_info.sysname) + strlen(sys_info.release) + 2) >= 
+	if ((strlen(sys_info.sysname) + strlen(sys_info.release) + 2) >=
 		MAX_OS_LEN) {
 		error ("get_os_name: OS name too long");
 		return error_code;
-	} 
+	}
 
 	strcpy(os_name, sys_info.sysname);
 	strcat(os_name, ".");
@@ -296,13 +236,13 @@ get_os_name(char *os_name)
 
 
 /*
- * get_mach_name - Return the name of this node 
+ * get_mach_name - Return the name of this node
  * Input: node_name - buffer for the node name, must be at least MAX_SLURM_NAME characters
  * Output: node_name - filled in with node name
  *         return code - 0 if no error, otherwise errno
  */
-extern int 
-get_mach_name(char *node_name) 
+extern int
+get_mach_name(char *node_name)
 {
     int error_code;
 
@@ -315,7 +255,7 @@ get_mach_name(char *node_name)
 
 
 /*
- * get_memory - Return the count of procs on this system 
+ * get_memory - Return the count of procs on this system
  * Input: real_memory - buffer for the Real Memory size
  * Output: real_memory - the Real Memory size in MB, "1" if error
  *         return code - 0 if no error, otherwise errno
@@ -334,8 +274,8 @@ get_memory(uint32_t *real_memory)
 	if (pages < 1) {
 		error ("get_memory: error running sysconf(_SC_PHYS_PAGES)");
 		return EINVAL;
-	} 
-	*real_memory = (uint32_t)((float)pages * (sysconf(_SC_PAGE_SIZE) / 
+	}
+	*real_memory = (uint32_t)((float)pages * (sysconf(_SC_PAGE_SIZE) /
 			1048576.0)); /* Megabytes of memory */
 #  else  /* !_SC_PHYS_PAGES */
 #    if HAVE_SYSCTLBYNAME
@@ -357,16 +297,16 @@ get_memory(uint32_t *real_memory)
 
 
 /*
- * get_tmp_disk - Return the total size of temporary file system on 
- *    this system 
+ * get_tmp_disk - Return the total size of temporary file system on
+ *    this system
  * Input: tmp_disk - buffer for the disk space size
- *        tmp_fs - pathname of the temporary file system to status, 
- *	           defaults to "/tmp"
+ *        tmp_fs - pathname of the temporary file system to status,
+ *		   defaults to "/tmp"
  * Output: tmp_disk - filled in with disk space size in MB, zero if error
  *         return code - 0 if no error, otherwise errno
  */
-extern int 
-get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs) 
+extern int
+get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs)
 {
 	int error_code = 0;
 #ifdef HAVE_SYS_VFS_H
@@ -390,7 +330,7 @@ get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs)
 	}
 	else if (errno != ENOENT) {
 		error_code = errno;
-		error ("get_tmp_disk: error %d executing statfs on %s", 
+		error ("get_tmp_disk: error %d executing statfs on %s",
 			errno, tmp_fs_name);
 	}
 
@@ -403,7 +343,7 @@ get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs)
 
 extern int get_up_time(uint32_t *up_time)
 {
-#if defined(HAVE_AIX) || defined(__sun) || defined(__APPLE__)
+#if defined(HAVE_AIX) || defined(__sun)	|| defined(__APPLE__)
 	clock_t tm;
 	struct tms buf;
 
@@ -414,10 +354,24 @@ extern int get_up_time(uint32_t *up_time)
 	}
 
 	*up_time = tm / sysconf(_SC_CLK_TCK);
+#elif defined(__CYGWIN__)
+	FILE *uptime_file;
+	char buffer[128];
+	char* _uptime_path = "/proc/uptime";
+
+	if (!(uptime_file = fopen(_uptime_path, "r"))) {
+		error("get_up_time: error %d opening %s", errno, _uptime_path);
+		return errno;
+	}
+
+	if (fgets(buffer, sizeof(buffer), uptime_file))
+		*up_time = atoi(buffer);
+
+	fclose(uptime_file);
 #else
-	/* NOTE for Linux: The return value of times() may overflow the 
-	 * possible range of type clock_t. There is also an offset of 
-	 * 429 million seconds on some implementations. We just use the 
+	/* NOTE for Linux: The return value of times() may overflow the
+	 * possible range of type clock_t. There is also an offset of
+	 * 429 million seconds on some implementations. We just use the
 	 * simpler sysinfo() function instead. */
 	struct sysinfo info;
 
@@ -431,6 +385,7 @@ extern int get_up_time(uint32_t *up_time)
 	return 0;
 }
 
+#ifdef USE_CPU_SPEED
 /* _chk_cpuinfo_str
  *	check a line of cpuinfo data (buffer) for a keyword.  If it
  *	exists, return the string value for that keyword in *valptr.
@@ -446,31 +401,12 @@ static int _chk_cpuinfo_str(char *buffer, char *keyword, char **valptr)
 		return false;
 
 	ptr = strstr(buffer, ":");
-	if (ptr != NULL) 
+	if (ptr != NULL)
 		ptr++;
 	*valptr = ptr;
 	return true;
 }
 
-/* _chk_cpuinfo_uint32
- *	check a line of cpuinfo data (buffer) for a keyword.  If it
- *	exists, return the uint16 value for that keyword in *valptr.
- * Input:  buffer - single line of cpuinfo data
- *	   keyword - keyword to check for
- * Output: valptr - uint32 value corresponding to keyword
- *         return code - true if keyword found, false if not found
- */
-static int _chk_cpuinfo_uint32(char *buffer, char *keyword, uint32_t *val)
-{
-	char *valptr;
-	if (_chk_cpuinfo_str(buffer, keyword, &valptr)) {
-		*val = strtoul(valptr, (char **)NULL, 10);
-		return true;
-	} else {
-		return false;
-	}
-}
-#ifdef USE_CPU_SPEED
 /* _chk_cpuinfo_float
  *	check a line of cpuinfo data (buffer) for a keyword.  If it
  *	exists, return the float value for that keyword in *valptr.
@@ -496,8 +432,8 @@ static int _chk_cpuinfo_float(char *buffer, char *keyword, float *val)
  * Output: procs - filled in with CPU speed, "1.0" if error
  *         return code - 0 if no error, otherwise errno
  */
-extern int 
-get_speed(float *speed) 
+extern int
+get_speed(float *speed)
 {
 #if defined (__sun)
 	kstat_ctl_t   *kc;
@@ -518,511 +454,23 @@ get_speed(float *speed)
 #else
 	FILE *cpu_info_file;
 	char buffer[128];
+	char* _cpuinfo_path = "/proc/cpuinfo";
 
 	*speed = 1.0;
 	cpu_info_file = fopen(_cpuinfo_path, "r");
 	if (cpu_info_file == NULL) {
 		error("get_speed: error %d opening %s", errno, _cpuinfo_path);
 		return errno;
-	} 
-
-	while (fgets(buffer, sizeof(buffer), cpu_info_file) != NULL) {
-		_chk_cpuinfo_float(buffer, "cpu MHz", speed);
-	} 
-
-	fclose(cpu_info_file);
-#endif
-	return 0;
-} 
-
-#endif
-
-/*
- * get_cpuinfo - Return detailed cpuinfo on this system 
- * Input:  numproc - number of processors on the system
- * Output: p_sockets - number of physical processor sockets
- *         p_cores - total number of physical CPU cores
- *         p_threads - total number of hardware execution threads
- *         block_map - asbtract->physical block distribution map 
- *         block_map_inv - physical->abstract block distribution map (inverse)
- *         return code - 0 if no error, otherwise errno
- * NOTE: User must xfree block_map and block_map_inv  
- */
-typedef struct cpuinfo {
-	uint16_t seen;
-	uint32_t cpuid;
-	uint32_t physid;
-	uint16_t physcnt;
-	uint32_t coreid;
-	uint16_t corecnt;
-	uint16_t siblings;
-	uint16_t cores;
-} cpuinfo_t;
-static cpuinfo_t *cpuinfo = NULL; /* array of CPU information for get_cpuinfo */
-				  /* Note: file static for qsort/_compare_cpus*/
-extern int
-get_cpuinfo(uint16_t numproc,
-		uint16_t *p_sockets, uint16_t *p_cores, uint16_t *p_threads,
-		uint16_t *block_map_size,
-		uint16_t **block_map, uint16_t **block_map_inv)
-{
-	int retval;
-	uint16_t numcpu	   = 0;		/* number of cpus seen */
-	uint16_t numphys   = 0;		/* number of unique "physical id"s */
-	uint16_t numcores  = 0;		/* number of unique "cores id"s */
-
-	uint16_t maxsibs   = 0;		/* maximum value of "siblings" */
-	uint16_t maxcores  = 0;		/* maximum value of "cores" */
-	uint16_t minsibs   = 0xffff;	/* minimum value of "siblings" */
-	uint16_t mincores  = 0xffff;	/* minimum value of "cores" */
-
-	uint32_t maxcpuid  = 0;		/* maximum CPU ID ("processor") */
-	uint32_t maxphysid = 0;		/* maximum "physical id" */
-	uint32_t maxcoreid = 0;		/* maximum "core id" */
-	uint32_t mincpuid  = 0xffffffff;/* minimum CPU ID ("processor") */
-	uint32_t minphysid = 0xffffffff;/* minimum "physical id" */
-	uint32_t mincoreid = 0xffffffff;/* minimum "core id" */
-	int i;
-#if defined (__sun)
-#if defined (_LP64)
-	int64_t curcpu, val, sockets, cores, threads;
-#else
-	int32_t curcpu, val, sockets, cores, threads;
-#endif
-	int32_t chip_id, core_id, ncore_per_chip, ncpu_per_chip;
-#else
-	FILE *cpu_info_file;
-	char buffer[128];
-	uint16_t curcpu, sockets, cores, threads;
-#endif
-
-	*p_sockets = numproc;		/* initially all single core/thread */
-	*p_cores   = 1;
-	*p_threads = 1;
-	*block_map_size = 0;
-	*block_map      = NULL;
-	*block_map_inv  = NULL;
-
-#if defined (__sun)
-	kstat_ctl_t   *kc;
-	kstat_t       *ksp;
-	kstat_named_t *knp;
-
-	kc = kstat_open();
-	if (kc == NULL) {
-		error ("get speed: kstat error %d", errno);
-		return errno;
-	}
-#else
-	cpu_info_file = fopen(_cpuinfo_path, "r");
-	if (cpu_info_file == NULL) {
-		error ("get_cpuinfo: error %d opening %s", 
-			errno, _cpuinfo_path);
-		return errno;
-	}
-#endif
-
-	/* Note: assumes all processor IDs are within [0:numproc-1] */
-	/*       treats physical/core IDs as tokens, not indices */
-	if (cpuinfo)
-		memset(cpuinfo, 0, numproc * sizeof(cpuinfo_t));
-	else
-		cpuinfo = xmalloc(numproc * sizeof(cpuinfo_t));
-
-#if defined (__sun)
-	ksp = kstat_lookup(kc, "cpu_info", -1, NULL);
-	for (; ksp != NULL; ksp = ksp->ks_next) {
-		if (strcmp(ksp->ks_module, "cpu_info"))
-			continue;
-
-		numcpu++;
-		kstat_read(kc, ksp, NULL);
-
-		knp = kstat_data_lookup(ksp, "chip_id");
-		chip_id = knp->value.l;
-		knp = kstat_data_lookup(ksp, "core_id");
-		core_id = knp->value.l;
-		knp = kstat_data_lookup(ksp, "ncore_per_chip");
-		ncore_per_chip = knp->value.l;
-		knp = kstat_data_lookup(ksp, "ncpu_per_chip");
-		ncpu_per_chip = knp->value.l;
-
-		if (chip_id >= numproc) {
-			debug("cpuid is %ld (> %d), ignored", curcpu, numproc);
-			continue;
-		}
-
-		cpuinfo[chip_id].seen = 1;
-		cpuinfo[chip_id].cpuid = chip_id;
-
-		maxcpuid = MAX(maxcpuid, chip_id);
-		mincpuid = MIN(mincpuid, chip_id);
-
-		for (i = 0; i < numproc; i++) {
-			if ((cpuinfo[i].coreid == core_id) &&
-			    (cpuinfo[i].corecnt))
-				break;
-		}
-
-		if (i == numproc) {
-			numcores++;
-		} else {
-			cpuinfo[i].corecnt++;
-		}
-
-		if (chip_id < numproc) {
-			cpuinfo[chip_id].corecnt++;
-			cpuinfo[chip_id].coreid = core_id;
-		}
-
-		maxcoreid = MAX(maxcoreid, core_id);
-		mincoreid = MIN(mincoreid, core_id);
-
-		if (ncore_per_chip > numproc) {
-			debug("cores is %u (> %d), ignored",
-			      ncore_per_chip, numproc);
-				continue;
-		}
-
-		if (chip_id < numproc)
-			cpuinfo[chip_id].cores = ncore_per_chip;
-
-		maxcores = MAX(maxcores, ncore_per_chip);
-		mincores = MIN(mincores, ncore_per_chip);
 	}
-#else
 
-	curcpu = 0;
 	while (fgets(buffer, sizeof(buffer), cpu_info_file) != NULL) {
-		uint32_t val;
-		if (_chk_cpuinfo_uint32(buffer, "processor", &val)) {
-			numcpu++;
-			curcpu = val;
-		    	if (val >= numproc) {	/* out of bounds, ignore */
-				debug("cpuid is %u (> %d), ignored", 
-					val, numproc);
-				continue;
-			}
-			cpuinfo[val].seen = 1;
-			cpuinfo[val].cpuid = val;
-			maxcpuid = MAX(maxcpuid, val);
-			mincpuid = MIN(mincpuid, val);
-		} else if (_chk_cpuinfo_uint32(buffer, "physical id", &val)) {
-			/* see if the ID has already been seen */
-			for (i=0; i<numproc; i++) {
-				if ((cpuinfo[i].physid == val)
-				&&  (cpuinfo[i].physcnt))
-					break;
-			}
-
-			if (i == numproc) {		/* new ID... */
-				numphys++;		/* ...increment total */
-			} else {			/* existing ID... */
-				cpuinfo[i].physcnt++;	/* ...update ID cnt */
-			}
-
-			if (curcpu < numproc) {
-				cpuinfo[curcpu].physcnt++;
-				cpuinfo[curcpu].physid = val;
-			}
-
-			maxphysid = MAX(maxphysid, val);
-			minphysid = MIN(minphysid, val);
-		} else if (_chk_cpuinfo_uint32(buffer, "core id", &val)) {
-			/* see if the ID has already been seen */
-			for (i = 0; i < numproc; i++) {
-				if ((cpuinfo[i].coreid == val)
-				&&  (cpuinfo[i].corecnt))
-					break;
-			}
-
-			if (i == numproc) {		/* new ID... */
-				numcores++;		/* ...increment total */
-			} else {			/* existing ID... */
-				cpuinfo[i].corecnt++;	/* ...update ID cnt */
-			}
-
-			if (curcpu < numproc) {
-				cpuinfo[curcpu].corecnt++;
-				cpuinfo[curcpu].coreid = val;
-			}
-
-			maxcoreid = MAX(maxcoreid, val);
-			mincoreid = MIN(mincoreid, val);
-		} else if (_chk_cpuinfo_uint32(buffer, "siblings", &val)) {
-			/* Note: this value is a count, not an index */
-		    	if (val > numproc) {	/* out of bounds, ignore */
-				debug("siblings is %u (> %d), ignored",
-					val, numproc);
-				continue;
-			}
-			if (curcpu < numproc)
-				cpuinfo[curcpu].siblings = val;
-			maxsibs = MAX(maxsibs, val);
-			minsibs = MIN(minsibs, val);
-		} else if (_chk_cpuinfo_uint32(buffer, "cpu cores", &val)) {
-			/* Note: this value is a count, not an index */
-		    	if (val > numproc) {	/* out of bounds, ignore */
-				debug("cores is %u (> %d), ignored",
-					val, numproc);
-				continue;
-			}
-			if (curcpu < numproc)
-				cpuinfo[curcpu].cores = val;
-			maxcores = MAX(maxcores, val);
-			mincores = MIN(mincores, val);
-		}
+		_chk_cpuinfo_float(buffer, "cpu MHz", speed);
 	}
 
 	fclose(cpu_info_file);
 #endif
-
-	/*** Sanity check ***/
-	if (minsibs == 0) minsibs = 1;		/* guaranteee non-zero */
-	if (maxsibs == 0) {
-	    	minsibs = 1;
-	    	maxsibs = 1;
-	}
-	if (maxcores == 0) {			/* no core data */
-	    	mincores = 0;
-	    	maxcores = 0;
-	}
-
-	/*** Compute Sockets/Cores/Threads ***/
-	if ((minsibs == maxsibs) &&		/* homogeneous system */
-	    (mincores == maxcores)) {
-		sockets = numphys; 		/* unique "physical id" */
-		if (sockets <= 1) {		/* verify single socket */
-			sockets = numcpu / maxsibs; /* maximum "siblings" */
-		}
-		if (sockets == 0)
-			sockets = 1;		/* guarantee non-zero */
-	
-		cores = numcores / sockets;	/* unique "core id" */
-		cores = MAX(maxcores, cores);	/* maximum "cpu cores" */
-	
-		if (cores == 0) {
-			cores = numcpu / sockets;	/* assume multi-core */
-			if (cores > 1) {
-				debug3("Warning: cpuinfo missing 'core id' or "
-					"'cpu cores' but assuming multi-core");
-			}
-		}
-		if (cores == 0)
-			cores = 1;	/* guarantee non-zero */
-	
-		threads = numcpu / (sockets * cores); /* solve for threads */
-		if (threads == 0)
-			threads = 1;	/* guarantee non-zero */
-	} else {				/* heterogeneous system */
-		sockets = numcpu;
-		cores   = 1;			/* one core per socket */
-		threads = 1;			/* one core per core */
-	}
-
-	*p_sockets = sockets;		/* update output parameters */
-	*p_cores   = cores;
-	*p_threads = threads;
-
-#if DEBUG_DETAIL
-	/*** Display raw data ***/
-	debug3("");
-	debug3("numcpu:     %u", numcpu);
-	debug3("numphys:    %u", numphys);
-	debug3("numcores:   %u", numcores);
-
-	debug3("cores:      %u->%u", mincores, maxcores);
-	debug3("sibs:       %u->%u", minsibs,  maxsibs);
-
-	debug3("cpuid:      %u->%u", mincpuid,  maxcpuid);
-	debug3("physid:     %u->%u", minphysid, maxphysid);
-	debug3("coreid:     %u->%u", mincoreid, maxcoreid);
-
-	for (i = 0; i <= maxcpuid; i++) {
-		debug3("CPU %d:", i);
-		debug3(" seen:     %u", cpuinfo[i].seen);
-		debug3(" physid:   %u", cpuinfo[i].physid);
-		debug3(" physcnt:  %u", cpuinfo[i].physcnt);
-		debug3(" siblings: %u", cpuinfo[i].siblings);
-		debug3(" cores:    %u", cpuinfo[i].cores);
-		debug3(" coreid:   %u", cpuinfo[i].coreid);
-		debug3(" corecnt:  %u", cpuinfo[i].corecnt);
-		debug3("");
-	}
-
-	debug3("");
-	debug3("Sockets:          %u", sockets);
-	debug3("Cores per socket: %u", cores);
-	debug3("Threads per core: %u", threads);
-#endif
-
-	*block_map_size = numcpu;
-	retval = _compute_block_map(*block_map_size, block_map, block_map_inv);
-
-	xfree(cpuinfo);		/* done with raw cpuinfo data */
-
-	return retval;
-}
-
-/*
- * _compute_block_map - Compute abstract->machine block mapping (and inverse)
- *   allows computation of CPU ID masks for an abstract block distribution
- *   of logical processors which can then be mapped the IDs used in the
- *   actual machine processor ID ordering (which can be BIOS/OS dependendent)
- * Input:  numproc - number of processors on the system
- *	   cpu - array of cpuinfo (file static for qsort/_compare_cpus)
- * Output: block_map, block_map_inv - asbtract->physical block distribution map 
- *         return code - 0 if no error, otherwise errno
- * NOTE: User must free block_map and block_map_inv
- *
- * For example, given a system with 8 logical processors arranged as:
- *
- *	Sockets:          4
- *	Cores per socket: 2
- *	Threads per core: 1
- *
- * and a logical CPU ID assignment of:
- *
- *	Machine logical CPU ID assignment:
- *	Logical CPU ID:        0  1  2  3  4  5  6  7
- *	Physical Socket ID:    0  1  3  2  0  1  3  2
- *
- * The block_map would be:
- *
- *	Abstract -> Machine logical CPU ID block mapping:
- *	Input: (Abstract ID)   0  1  2  3  4  5  6  7
- *	Output: (Machine ID)   0  4  1  5  3  7  2  6  <--- block_map[]
- *	Physical Socket ID:    0  0  1  1  2  2  3  3
- *
- * and it's inverse would be:
- *
- *	Machine -> Abstract logical CPU ID block mapping: (inverse)
- *	Input: (Machine ID)    0  1  2  3  4  5  6  7
- *	Output: (Abstract ID)  0  2  6  4  1  3  7  5  <--- block_map_inv[]
- *	Physical Socket ID:    0  1  3  2  0  1  3  2
- */
-
-/* physical cpu comparison with void * arguments to allow use with
- * libc qsort()
- */
-static int _icmp16(uint16_t a, uint16_t b)
-{
-    	if (a < b) {
-		return -1;
-	} else if (a == b) {
-		return 0;
-	} else {
-		return 1;
-	}
-}
-static int _icmp32(uint32_t a, uint32_t b)
-{
-	if (a < b) {
-		return -1;
-	} else if (a == b) {
-		return 0;
-	} else {
-		return 1;
-	}
-}
-
-static int _compare_cpus(const void *a1, const void *b1) {
-	uint16_t *a = (uint16_t *) a1;
-	uint16_t *b = (uint16_t *) b1;
-	int cmp;
-
-	cmp = -1 * _icmp16(cpuinfo[*a].seen,cpuinfo[*b].seen); /* seen to front */
-	if (cmp != 0)
-		return cmp;
-
-	cmp = _icmp32(cpuinfo[*a].physid, cpuinfo[*b].physid); /* key 1: physid */
-	if (cmp != 0)
-		return cmp;
-
-	cmp = _icmp32(cpuinfo[*a].coreid, cpuinfo[*b].coreid); /* key 2: coreid */
-	if (cmp != 0)
-		return cmp;
-
-	cmp = _icmp32(cpuinfo[*a].cpuid, cpuinfo[*b].cpuid);   /* key 3: cpu id */
-	return cmp;
-}
-
-static int _compute_block_map(uint16_t numproc,
-			      uint16_t **block_map, uint16_t **block_map_inv)
-{
-	uint16_t i;
-	/* Compute abstract->machine block mapping (and inverse) */
-	if (block_map) {
-		*block_map = xmalloc(numproc * sizeof(uint16_t));
-		for (i = 0; i < numproc; i++) {
-			(*block_map)[i] = i;
-		}
-		qsort(*block_map, numproc, sizeof(uint16_t), &_compare_cpus);
-	}
-	if (block_map_inv) {
-		*block_map_inv = xmalloc(numproc * sizeof(uint16_t));
-		for (i = 0; i < numproc; i++) {
-			uint16_t idx = (*block_map)[i];
-			(*block_map_inv)[idx] = i;
-		}
-	}
-
-#if DEBUG_DETAIL
-	/* Display the mapping tables */
-
-	debug3("\nMachine logical CPU ID assignment:");
-	debug3("Logical CPU ID:      ");
-	for (i = 0; i < numproc; i++) {
-		debug3("%3d", i);
-	}
-	debug3("");
-	debug3("Physical Socket ID:  ");
-	for (i = 0; i < numproc; i++) {
-		debug3("%3u", cpuinfo[i].physid);
-	}
-	debug3("");
-
-	if (block_map) {
-		debug3("\nAbstract -> Machine logical CPU ID block mapping:");
-		debug3("Input: (Abstract ID) ");
-		for (i = 0; i < numproc; i++) {
-			debug3("%3d", i);
-		}
-		debug3("");
-		debug3("Output: (Machine ID) ");
-		for (i = 0; i < numproc; i++) {
-			debug3("%3u", (*block_map)[i]);
-		}
-		debug3("");
-		debug3("Physical Socket ID:  ");
-		for (i = 0; i < numproc; i++) {
-			uint16_t id = (*block_map)[i];
-			debug3("%3u", cpuinfo[id].physid);
-		}
-		debug3("");
-	}
-
-	if (block_map_inv) {
-		debug3("\nMachine -> Abstract logical CPU ID block mapping: "
-			"(inverse)");
-		debug3("Input: (Machine ID)  ");
-		for (i = 0; i < numproc; i++) {
-			debug3("%3d", i);
-		}
-		debug3("");
-		debug3("Output: (Abstract ID)");
-		for (i = 0; i < numproc; i++) {
-			debug3("%3u", (*block_map_inv)[i]);
-		}
-		debug3("");
-		debug3("Physical Socket ID:  ");
-		for (i = 0; i < numproc; i++) {
-			debug3("%3u", cpuinfo[i].physid);
-		}
-		debug3("");
-	}
-#endif
 	return 0;
 }
 
+#endif
 
diff --git a/src/slurmd/slurmd/get_mach_stat.h b/src/slurmd/slurmd/get_mach_stat.h
index aca63cdde..5a835395f 100644
--- a/src/slurmd/slurmd/get_mach_stat.h
+++ b/src/slurmd/slurmd/get_mach_stat.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,11 +51,6 @@
 #  include <inttypes.h>
 #endif  /*  HAVE_CONFIG_H */
 
-extern int get_procs(uint16_t *procs);
-extern int get_cpuinfo(uint16_t numproc,
-		       uint16_t *sockets, uint16_t *cores, uint16_t *threads,
-		       uint16_t *block_map_size,
-		       uint16_t **block_map, uint16_t **block_map_inv);
 extern int get_mach_name(char *node_name);
 extern int get_memory(uint32_t *real_memory);
 extern int get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs);
diff --git a/src/slurmd/slurmd/read_proc.c b/src/slurmd/slurmd/read_proc.c
index 10bec316b..c8d786127 100644
--- a/src/slurmd/slurmd/read_proc.c
+++ b/src/slurmd/slurmd/read_proc.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -221,6 +221,8 @@ parse_proc_stat(char* proc_stat, int *session, unsigned long *time,
 		&start_stack, &kstk_esp, &kstk_eip,
 /*		&signal, &blocked, &sig_ignore, &sig_catch, */ /* can't use */
 		&w_chan, &n_swap, &sn_swap /* , &Exit_signal  */, &l_proc);
+	if (num < 13)
+		error("/proc entry too short (%s)", proc_stat);
 	*time = (utime + stime) / hertz;
 	return 0;
 }
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index bfe78687b..c3f4ac825 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -260,6 +260,12 @@ slurmd_req(slurm_msg_t *msg)
 		_rpc_terminate_tasks(msg);
 		slurm_free_kill_tasks_msg(msg->data);
 		break;
+	case REQUEST_KILL_PREEMPTED:
+		debug2("Processing RPC: REQUEST_KILL_PREEMPTED");
+		last_slurmctld_msg = time(NULL);
+		_rpc_timelimit(msg);
+		slurm_free_timelimit_msg(msg->data);
+		break;
 	case REQUEST_KILL_TIMELIMIT:
 		debug2("Processing RPC: REQUEST_KILL_TIMELIMIT");
 		last_slurmctld_msg = time(NULL);
@@ -916,7 +922,9 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 	uid_t    req_uid;
 	launch_tasks_request_msg_t *req = msg->data;
 	bool     super_user = false;
+#ifndef HAVE_FRONT_END
 	bool     first_job_run;
+#endif
 	slurm_addr_t self;
 	slurm_addr_t *cli = &msg->orig_addr;
 	socklen_t adlen;
@@ -947,7 +955,9 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 	env_array_overwrite(&req->env, "SLURM_SRUN_COMM_HOST", host);
 	req->envc = envcount(req->env);
 
+#ifndef HAVE_FRONT_END
 	first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id);
+#endif
 	if (_check_job_credential(req, req_uid, nodeid, &step_hset) < 0) {
 		errnum = errno;
 		error("Invalid job credential from %ld@%s: %m",
@@ -1162,10 +1172,11 @@ _set_batch_job_limits(slurm_msg_t *msg)
 			last_bit = arg.sockets_per_node[0] *
 				   arg.cores_per_socket[0];
 			for (i=0; i<last_bit; i++) {
-				if (bit_test(arg.job_core_bitmap, i)) {
+				if (!bit_test(arg.job_core_bitmap, i))
+					continue;
+				if (cpu_log)
 					info("JobNode[0] CPU[%u] Job alloc",i);
-					alloc_lps++;
-				}
+				alloc_lps++;
 			}
 		}
 		if (cpu_log)
@@ -1252,11 +1263,10 @@ _rpc_batch_job(slurm_msg_t *msg)
 		/*
 	 	 * Run job prolog on this node
 	 	 */
-#ifdef HAVE_BG
+#if defined(HAVE_BG)
 		select_g_select_jobinfo_get(req->select_jobinfo,
 					    SELECT_JOBDATA_BLOCK_ID, &resv_id);
-#endif
-#ifdef HAVE_CRAY
+#elif defined(HAVE_CRAY)
 		resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
 					    SELECT_PRINT_RESV_ID);
 #endif
@@ -1847,7 +1857,8 @@ _signal_jobstep(uint32_t jobid, uint32_t stepid, uid_t req_uid,
 #  endif
 #endif
 
-	if ((signal == SIG_TIME_LIMIT) || (signal == SIG_DEBUG_WAKE)) {
+	if ((signal == SIG_PREEMPTED) || (signal == SIG_TIME_LIMIT) ||
+	    (signal == SIG_DEBUG_WAKE)) {
 		rc = stepd_signal_container(fd, signal);
 	} else {
 		rc = stepd_signal(fd, signal);
@@ -2262,14 +2273,20 @@ _rpc_timelimit(slurm_msg_t *msg)
 		slurm_ctl_conf_t *cf;
 		int delay;
 		/* A jobstep has timed out:
-		 * - send the container a SIG_TIME_LIMIT to note the occasion
+		 * - send the container a SIG_TIME_LIMIT or SIG_PREEMPTED
+		 *   to log the event
 		 * - send a SIGCONT to resume any suspended tasks
 		 * - send a SIGTERM to begin termination
 		 * - sleep KILL_WAIT
 		 * - send a SIGKILL to clean up
 		 */
-		rc = _signal_jobstep(req->job_id, req->step_id, uid,
-				     SIG_TIME_LIMIT);
+		if (msg->msg_type == REQUEST_KILL_TIMELIMIT) {
+			rc = _signal_jobstep(req->job_id, req->step_id, uid,
+					     SIG_TIME_LIMIT);
+		} else {
+			rc = _signal_jobstep(req->job_id, req->step_id, uid,
+					     SIG_PREEMPTED);
+		}
 		if (rc != SLURM_SUCCESS)
 			return;
 		rc = _signal_jobstep(req->job_id, req->step_id, uid, SIGCONT);
@@ -2286,7 +2303,10 @@ _rpc_timelimit(slurm_msg_t *msg)
 		return;
 	}
 
-	_kill_all_active_steps(req->job_id, SIG_TIME_LIMIT, true);
+	if (msg->msg_type == REQUEST_KILL_TIMELIMIT)
+		_kill_all_active_steps(req->job_id, SIG_TIME_LIMIT, true);
+	else /* (msg->type == REQUEST_KILL_PREEMPTED) */
+		_kill_all_active_steps(req->job_id, SIG_PREEMPTED, true);
 	nsteps = xcpu_signal(SIGTERM, req->nodes) +
 		_kill_all_active_steps(req->job_id, SIGTERM, false);
 	verbose( "Job %u: timeout: sent SIGTERM to %d active steps",
@@ -2844,7 +2864,7 @@ _steps_completed_now(uint32_t jobid)
 }
 
 /*
- *  Send epilog complete message to currently active comtroller.
+ *  Send epilog complete message to currently active controller.
  *   Returns SLURM_SUCCESS if message sent successfully,
  *           SLURM_FAILURE if epilog complete message fails to be sent.
  */
@@ -3237,12 +3257,11 @@ _rpc_abort_job(slurm_msg_t *msg)
 	}
 
 	save_cred_state(conf->vctx);
-#ifdef HAVE_BG
+#if defined(HAVE_BG)
 	select_g_select_jobinfo_get(req->select_jobinfo,
 				    SELECT_JOBDATA_BLOCK_ID,
 				    &resv_id);
-#endif
-#ifdef HAVE_CRAY
+#elif defined(HAVE_CRAY)
 	resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
 				    SELECT_PRINT_RESV_ID);
 #endif
@@ -3426,12 +3445,11 @@ _rpc_terminate_job(slurm_msg_t *msg)
 
 	save_cred_state(conf->vctx);
 
-#ifdef HAVE_BG
+#if defined(HAVE_BG)
 	select_g_select_jobinfo_get(req->select_jobinfo,
 				    SELECT_JOBDATA_BLOCK_ID,
 				    &resv_id);
-#endif
-#ifdef HAVE_CRAY
+#elif defined(HAVE_CRAY)
 	resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
 				    SELECT_PRINT_RESV_ID);
 #endif
@@ -3597,9 +3615,10 @@ static bool
 _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time)
 {
 	int sec = 0;
+	int pause = 1;
 	bool rc = false;
 
-	while ((sec++ < max_time) || (max_time == 0)) {
+	while ((sec < max_time) || (max_time == 0)) {
 		rc = (_job_still_running (job_id) ||
 			xcpu_signal(0, nodes));
 		if (!rc)
@@ -3608,12 +3627,15 @@ _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time)
 			xcpu_signal(SIGKILL, nodes);
 			_terminate_all_steps(job_id, true);
 		}
-		if (sec < 10)
-			sleep(1);
-		else {
-			/* Reduce logging about unkillable tasks */
-			sleep(60);
+		if (sec > 10) {
+			/* Reduce logging frequency about unkillable tasks */
+			if (max_time)
+				pause = MIN((max_time - sec), 10);
+			else
+				pause = 10;
 		}
+		sleep(pause);
+		sec += pause;
 	}
 
 	/*
@@ -3675,14 +3697,13 @@ _build_env(uint32_t jobid, uid_t uid, char *resv_id,
 	setenvf(&env, "SLURM_JOBID", "%u", jobid);
 	setenvf(&env, "SLURM_UID",   "%u", uid);
 	if (resv_id) {
-#ifdef HAVE_BG
+#if defined(HAVE_BG)
 		setenvf(&env, "MPIRUN_PARTITION", "%s", resv_id);
 # ifdef HAVE_BGP
 		/* Needed for HTC jobs */
 		setenvf(&env, "SUBMIT_POOL", "%s", resv_id);
 # endif
-#endif
-#ifdef HAVE_CRAY
+#elif defined(HAVE_CRAY)
 		setenvf(&env, "BASIL_RESERVATION_ID", "%s", resv_id);
 #endif
 	}
@@ -3999,7 +4020,7 @@ init_gids_cache(int cache)
 	gids_t *gids;
 #ifdef HAVE_AIX
 	FILE *fp = NULL;
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) || defined (__CYGWIN__)
 #else
 	struct passwd pw;
 	char buf[BUF_SIZE];
@@ -4029,7 +4050,7 @@ init_gids_cache(int cache)
 	setpwent();
 #if defined (__sun)
 	while ((pwd = getpwent_r(&pw, buf, BUF_SIZE)) != NULL) {
-#elif defined (__APPLE__)
+#elif defined (__APPLE__) || defined (__CYGWIN__)
 	while ((pwd = getpwent()) != NULL) {
 #else
 
diff --git a/src/slurmd/slurmd/req.h b/src/slurmd/slurmd/req.h
index e3b12b428..0ec5f45c6 100644
--- a/src/slurmd/slurmd/req.h
+++ b/src/slurmd/slurmd/req.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmd/reverse_tree_math.c b/src/slurmd/slurmd/reverse_tree_math.c
index c7d627b97..73f5757b6 100644
--- a/src/slurmd/slurmd/reverse_tree_math.c
+++ b/src/slurmd/slurmd/reverse_tree_math.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmd/reverse_tree_math.h b/src/slurmd/slurmd/reverse_tree_math.h
index b65e9537d..454962798 100644
--- a/src/slurmd/slurmd/reverse_tree_math.h
+++ b/src/slurmd/slurmd/reverse_tree_math.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index d3c133eef..a17311da9 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -85,6 +85,7 @@
 #include "src/common/stepd_api.h"
 #include "src/common/switch.h"
 #include "src/slurmd/common/task_plugin.h"
+#include "src/common/xcpuinfo.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 #include "src/common/xsignal.h"
@@ -173,6 +174,10 @@ main (int argc, char *argv[])
 	uint32_t slurmd_uid = 0;
 	uint32_t curr_uid = 0;
 	char time_stamp[256];
+	log_options_t lopts = LOG_OPTS_INITIALIZER;
+
+	/* NOTE: logfile is NULL at this point */
+	log_init(argv[0], lopts, LOG_DAEMON, NULL);
 
 	/*
 	 * Make sure we have no extra open files which
@@ -202,22 +207,15 @@ main (int argc, char *argv[])
 	conf->argv = &argv;
 	conf->argc = &argc;
 
-	/*
-	 * Process commandline arguments first, since one option may be
-	 * an alternate location for the slurm config file.
-	 */
-	_process_cmdline(*conf->argc, *conf->argv);
-
-	/*
-	 * Read global slurm config file, ovverride necessary values from
-	 * defaults and command line.
-	 */
-	_read_config();
-	/* we should load config file _before_ analyzing SlurmUser below */
+	if (_slurmd_init() < 0) {
+		error( "slurmd initialization failed" );
+		fflush( NULL );
+		exit(1);
+	}
 
 	slurmd_uid = slurm_get_slurmd_user_id();
 	curr_uid = getuid();
-	if(curr_uid != slurmd_uid) {
+	if (curr_uid != slurmd_uid) {
 		struct passwd *pw = NULL;
 		char *slurmd_user = NULL;
 		char *curr_user = NULL;
@@ -241,26 +239,11 @@ main (int argc, char *argv[])
 	}
 	init_setproctitle(argc, argv);
 
-	if (slurm_select_init(1) != SLURM_SUCCESS )
-		fatal( "failed to initialize node selection plugin" );
-
-	log_init(argv[0], conf->log_opts, LOG_DAEMON, conf->logfile);
-
 	xsignal(SIGTERM, &_term_handler);
 	xsignal(SIGINT,  &_term_handler);
 	xsignal(SIGHUP,  &_hup_handler );
 	xsignal_block(blocked_signals);
 
-	/*
-	 * Run slurmd_init() here in order to report early errors
-	 * (with public keyfile)
-	 */
-	if (_slurmd_init() < 0) {
-		error( "slurmd initialization failed" );
-		fflush( NULL );
-		exit(1);
-	}
-
 	debug3("slurmd initialization successful");
 
 	/*
@@ -717,9 +700,10 @@ _read_config(void)
 {
 	char *path_pubkey = NULL;
 	slurm_ctl_conf_t *cf = NULL;
+#ifndef HAVE_FRONT_END
 	bool cr_flag = false, gang_flag = false;
+#endif
 
-	slurm_conf_reinit(conf->conffile);
 	cf = slurm_conf_lock();
 
 	slurm_mutex_lock(&conf->config_mutex);
@@ -736,10 +720,12 @@ _read_config(void)
 	if (!conf->logfile)
 		conf->logfile = xstrdup(cf->slurmd_logfile);
 
+#ifndef HAVE_FRONT_END
 	if (!strcmp(cf->select_type, "select/cons_res"))
 		cr_flag = true;
 	if (cf->preempt_mode & PREEMPT_MODE_GANG)
 		gang_flag = true;
+#endif
 
 	slurm_conf_unlock();
 	/* node_name may already be set from a command line parameter */
@@ -779,7 +765,7 @@ _read_config(void)
 
 	_update_logging();
 	_update_nice();
-		
+
 	get_procs(&conf->actual_cpus);
 	get_cpuinfo(conf->actual_cpus,
 		    &conf->actual_sockets,
@@ -787,8 +773,19 @@ _read_config(void)
 		    &conf->actual_threads,
 		    &conf->block_map_size,
 		    &conf->block_map, &conf->block_map_inv);
-
-	if (((cf->fast_schedule == 0) && !cr_flag && !gang_flag) || 
+#ifdef HAVE_FRONT_END
+	/*
+	 * When running with multiple frontends, the slurmd S:C:T values are not
+	 * relevant, hence ignored by both _register_front_ends (sets all to 1)
+	 * and validate_nodes_via_front_end (uses slurm.conf values).
+	 * Report actual hardware configuration, irrespective of FastSchedule.
+	 */
+	conf->cpus    = conf->actual_cpus;
+	conf->sockets = conf->actual_sockets;
+	conf->cores   = conf->actual_cores;
+	conf->threads = conf->actual_threads;
+#else
+	if (((cf->fast_schedule == 0) && !cr_flag && !gang_flag) ||
 	    ((cf->fast_schedule == 1) &&
 	     (conf->actual_cpus < conf->conf_cpus))) {
 		conf->cpus    = conf->actual_cpus;
@@ -801,12 +798,13 @@ _read_config(void)
 		conf->cores   = conf->conf_cores;
 		conf->threads = conf->conf_threads;
 	}
+#endif
 
-	if(cf->fast_schedule &&
-	   ((conf->cpus    != conf->actual_cpus)    ||
-	    (conf->sockets != conf->actual_sockets) ||
-	    (conf->cores   != conf->actual_cores)   ||
-	    (conf->threads != conf->actual_threads))) {
+	if (cf->fast_schedule &&
+	    ((conf->cpus    != conf->actual_cpus)    ||
+	     (conf->sockets != conf->actual_sockets) ||
+	     (conf->cores   != conf->actual_cores)   ||
+	     (conf->threads != conf->actual_threads))) {
 		info("Node configuration differs from hardware\n"
 		     "   Procs=%u:%u(hw) Sockets=%u:%u(hw)\n"
 		     "   CoresPerSocket=%u:%u(hw) ThreadsPerCore=%u:%u(hw)",
@@ -864,6 +862,7 @@ _reconfigure(void)
 	bool did_change;
 
 	_reconfig = 0;
+	slurm_conf_reinit(conf->conffile);
 	_read_config();
 
 	/*
@@ -872,7 +871,6 @@ _reconfigure(void)
 	slurm_topo_build_config();
 	_set_topo_info();
 
-	/* _update_logging(); */
 	_print_conf();
 
 	/*
@@ -1193,19 +1191,40 @@ _slurmd_init(void)
 	char slurm_stepd_path[MAXPATHLEN];
 	uint32_t cpu_cnt;
 
-	cpu_cnt = MAX(conf->conf_cpus, conf->block_map_size);
-	if ((gres_plugin_init() != SLURM_SUCCESS) ||
-	    (gres_plugin_node_config_load(cpu_cnt) != SLURM_SUCCESS))
-		return SLURM_FAILURE;
-	if (slurm_topo_init() != SLURM_SUCCESS)
-		return SLURM_FAILURE;
+	/*
+	 * Process commandline arguments first, since one option may be
+	 * an alternate location for the slurm config file.
+	 */
+	_process_cmdline(*conf->argc, *conf->argv);
 
 	/*
 	 * Build nodes table like in slurmctld
 	 * This is required by the topology stack
+	 * Node tables setup must preceed _read_config() so that the
+	 * proper hostname is set.
 	 */
+	slurm_conf_init(conf->conffile);
 	init_node_conf();
+	/* slurm_select_init() must be called before
+	 * build_all_nodeline_info() to be called with proper argument. */
+	if (slurm_select_init(1) != SLURM_SUCCESS )
+		return SLURM_FAILURE;
 	build_all_nodeline_info(true);
+	build_all_frontend_info(true);
+
+	/*
+	 * Read global slurm config file, override necessary values from
+	 * defaults and command line.
+	 */
+	_read_config();
+
+	cpu_cnt = MAX(conf->conf_cpus, conf->block_map_size);
+
+	if ((gres_plugin_init() != SLURM_SUCCESS) ||
+	    (gres_plugin_node_config_load(cpu_cnt) != SLURM_SUCCESS))
+		return SLURM_FAILURE;
+	if (slurm_topo_init() != SLURM_SUCCESS)
+		return SLURM_FAILURE;
 
 	/*
 	 * Get and set slurmd topology information
@@ -1213,12 +1232,6 @@ _slurmd_init(void)
 	slurm_topo_build_config();
 	_set_topo_info();
 
-	/*
-	 * Update location of log messages (syslog, stderr, logfile, etc.),
-	 * print current configuration (if in debug mode), and
-	 * load appropriate plugin(s).
-	 */
-	/* _update_logging(); */
 	_print_conf();
 	if (slurm_proctrack_init() != SLURM_SUCCESS)
 		return SLURM_FAILURE;
@@ -1227,11 +1240,19 @@ _slurmd_init(void)
 	if (slurm_auth_init(NULL) != SLURM_SUCCESS)
 		return SLURM_FAILURE;
 
-	if (getrlimit(RLIMIT_NOFILE,&rlim) == 0) {
+	if (getrlimit(RLIMIT_CPU, &rlim) == 0) {
 		rlim.rlim_cur = rlim.rlim_max;
-		setrlimit(RLIMIT_NOFILE,&rlim);
+		setrlimit(RLIMIT_CPU, &rlim);
+		if (rlim.rlim_max != RLIM_INFINITY) {
+			error("Slurmd process CPU time limit is %d seconds",
+			      (int) rlim.rlim_max);
+		}
 	}
 
+	if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
+		rlim.rlim_cur = rlim.rlim_max;
+		setrlimit(RLIMIT_NOFILE, &rlim);
+	}
 #ifndef NDEBUG
 	if (getrlimit(RLIMIT_CORE, &rlim) == 0) {
 		rlim.rlim_cur = rlim.rlim_max;
@@ -1363,8 +1384,8 @@ cleanup:
 /**************************************************************************\
  * To test for memory leaks, set MEMORY_LEAK_DEBUG to 1 using
  * "configure --enable-memory-leak-debug" then execute
- * > valgrind --tool=memcheck --leak-check=yes --num-callers=6
- *    --leak-resolution=med slurmd -D
+ * $ valgrind --tool=memcheck --leak-check=yes --num-callers=8 \
+ *   --leak-resolution=med ./slurmd -Dc >valg.slurmd.out 2>&1
  *
  * Then exercise the slurmd functionality before executing
  * > scontrol shutdown
diff --git a/src/slurmd/slurmd/slurmd.h b/src/slurmd/slurmd/slurmd.h
index a13c1e491..1e07aa4e1 100644
--- a/src/slurmd/slurmd/slurmd.h
+++ b/src/slurmd/slurmd/slurmd.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmd/xcpu.c b/src/slurmd/slurmd/xcpu.c
index 698713d19..db8f419ce 100644
--- a/src/slurmd/slurmd/xcpu.c
+++ b/src/slurmd/slurmd/xcpu.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmd/xcpu.h b/src/slurmd/slurmd/xcpu.h
index ec91503ec..d2223395d 100644
--- a/src/slurmd/slurmd/xcpu.h
+++ b/src/slurmd/slurmd/xcpu.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/Makefile.am b/src/slurmd/slurmstepd/Makefile.am
index 7d1061d39..96e7bb639 100644
--- a/src/slurmd/slurmstepd/Makefile.am
+++ b/src/slurmd/slurmstepd/Makefile.am
@@ -9,8 +9,9 @@ INCLUDES = -I$(top_srcdir)
 
 slurmstepd_LDADD = 				   \
 	$(top_builddir)/src/common/libdaemonize.la \
-	$(top_builddir)/src/api/libslurm.o -ldl\
-	$(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS)
+	$(top_builddir)/src/api/libslurm.o -ldl \
+	$(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS) \
+	../common/libslurmd_common.la
 
 slurmstepd_SOURCES = 	        	\
 	slurmstepd.c slurmstepd.h	\
@@ -24,20 +25,7 @@ slurmstepd_SOURCES = 	        	\
 	pam_ses.c pam_ses.h		\
 	req.c req.h			\
 	multi_prog.c multi_prog.h	\
-	step_terminate_monitor.c step_terminate_monitor.h \
-	$(top_builddir)/src/slurmd/common/proctrack.c \
-	$(top_builddir)/src/slurmd/common/proctrack.h \
-	$(top_builddir)/src/slurmd/common/setproctitle.c \
-	$(top_builddir)/src/slurmd/common/setproctitle.h \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.c \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.h \
-	$(top_builddir)/src/slurmd/common/run_script.c \
-	$(top_builddir)/src/slurmd/common/run_script.h \
-	$(top_builddir)/src/slurmd/common/task_plugin.c \
-	$(top_builddir)/src/slurmd/common/task_plugin.h \
-	$(top_builddir)/src/slurmd/common/set_oomadj.c \
-	$(top_builddir)/src/slurmd/common/set_oomadj.h \
-	$(top_builddir)/src/slurmd/common/reverse_tree.h
+	step_terminate_monitor.c step_terminate_monitor.h
 
 if HAVE_AIX
 # We need to set maxdata back to 0 because this effects the "max memory size"
diff --git a/src/slurmd/slurmstepd/Makefile.in b/src/slurmd/slurmstepd/Makefile.in
index ebea1d39e..7a433c36f 100644
--- a/src/slurmd/slurmstepd/Makefile.in
+++ b/src/slurmd/slurmstepd/Makefile.in
@@ -65,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -75,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -89,15 +91,12 @@ am_slurmstepd_OBJECTS = slurmstepd.$(OBJEXT) mgr.$(OBJEXT) \
 	task.$(OBJEXT) slurmstepd_job.$(OBJEXT) io.$(OBJEXT) \
 	fname.$(OBJEXT) ulimits.$(OBJEXT) pdebug.$(OBJEXT) \
 	pam_ses.$(OBJEXT) req.$(OBJEXT) multi_prog.$(OBJEXT) \
-	step_terminate_monitor.$(OBJEXT) proctrack.$(OBJEXT) \
-	setproctitle.$(OBJEXT) slurmstepd_init.$(OBJEXT) \
-	run_script.$(OBJEXT) task_plugin.$(OBJEXT) \
-	set_oomadj.$(OBJEXT)
+	step_terminate_monitor.$(OBJEXT)
 slurmstepd_OBJECTS = $(am_slurmstepd_OBJECTS)
 am__DEPENDENCIES_1 =
 slurmstepd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/api/libslurm.o $(am__DEPENDENCIES_1) \
-	$(am__DEPENDENCIES_1)
+	$(am__DEPENDENCIES_1) ../common/libslurmd_common.la
 slurmstepd_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
 	$(slurmstepd_LDFLAGS) $(LDFLAGS) -o $@
@@ -129,7 +128,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -166,6 +168,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -223,6 +226,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -258,6 +262,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -314,8 +319,9 @@ AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir)
 slurmstepd_LDADD = \
 	$(top_builddir)/src/common/libdaemonize.la \
-	$(top_builddir)/src/api/libslurm.o -ldl\
-	$(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS)
+	$(top_builddir)/src/api/libslurm.o -ldl \
+	$(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS) \
+	../common/libslurmd_common.la
 
 slurmstepd_SOURCES = \
 	slurmstepd.c slurmstepd.h	\
@@ -329,20 +335,7 @@ slurmstepd_SOURCES = \
 	pam_ses.c pam_ses.h		\
 	req.c req.h			\
 	multi_prog.c multi_prog.h	\
-	step_terminate_monitor.c step_terminate_monitor.h \
-	$(top_builddir)/src/slurmd/common/proctrack.c \
-	$(top_builddir)/src/slurmd/common/proctrack.h \
-	$(top_builddir)/src/slurmd/common/setproctitle.c \
-	$(top_builddir)/src/slurmd/common/setproctitle.h \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.c \
-	$(top_builddir)/src/slurmd/common/slurmstepd_init.h \
-	$(top_builddir)/src/slurmd/common/run_script.c \
-	$(top_builddir)/src/slurmd/common/run_script.h \
-	$(top_builddir)/src/slurmd/common/task_plugin.c \
-	$(top_builddir)/src/slurmd/common/task_plugin.h \
-	$(top_builddir)/src/slurmd/common/set_oomadj.c \
-	$(top_builddir)/src/slurmd/common/set_oomadj.h \
-	$(top_builddir)/src/slurmd/common/reverse_tree.h
+	step_terminate_monitor.c step_terminate_monitor.h
 
 @HAVE_AIX_FALSE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
@@ -442,17 +435,11 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/multi_prog.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pam_ses.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pdebug.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/req.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/run_script.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/set_oomadj.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setproctitle.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_init.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_job.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/step_terminate_monitor.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_plugin.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ulimits.Po@am__quote@
 
 .c.o:
@@ -476,90 +463,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
 
-proctrack.o: $(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.o -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.o `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/proctrack.c' object='proctrack.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o proctrack.o `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c
-
-proctrack.obj: $(top_builddir)/src/slurmd/common/proctrack.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.obj -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.obj `if test -f '$(top_builddir)/src/slurmd/common/proctrack.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/proctrack.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/proctrack.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/proctrack.c' object='proctrack.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o proctrack.obj `if test -f '$(top_builddir)/src/slurmd/common/proctrack.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/proctrack.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/proctrack.c'; fi`
-
-setproctitle.o: $(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT setproctitle.o -MD -MP -MF $(DEPDIR)/setproctitle.Tpo -c -o setproctitle.o `test -f '$(top_builddir)/src/slurmd/common/setproctitle.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/setproctitle.Tpo $(DEPDIR)/setproctitle.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/setproctitle.c' object='setproctitle.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o setproctitle.o `test -f '$(top_builddir)/src/slurmd/common/setproctitle.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/setproctitle.c
-
-setproctitle.obj: $(top_builddir)/src/slurmd/common/setproctitle.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT setproctitle.obj -MD -MP -MF $(DEPDIR)/setproctitle.Tpo -c -o setproctitle.obj `if test -f '$(top_builddir)/src/slurmd/common/setproctitle.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/setproctitle.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/setproctitle.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/setproctitle.Tpo $(DEPDIR)/setproctitle.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/setproctitle.c' object='setproctitle.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o setproctitle.obj `if test -f '$(top_builddir)/src/slurmd/common/setproctitle.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/setproctitle.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/setproctitle.c'; fi`
-
-slurmstepd_init.o: $(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT slurmstepd_init.o -MD -MP -MF $(DEPDIR)/slurmstepd_init.Tpo -c -o slurmstepd_init.o `test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/slurmstepd_init.Tpo $(DEPDIR)/slurmstepd_init.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/slurmstepd_init.c' object='slurmstepd_init.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o slurmstepd_init.o `test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/slurmstepd_init.c
-
-slurmstepd_init.obj: $(top_builddir)/src/slurmd/common/slurmstepd_init.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT slurmstepd_init.obj -MD -MP -MF $(DEPDIR)/slurmstepd_init.Tpo -c -o slurmstepd_init.obj `if test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/slurmstepd_init.Tpo $(DEPDIR)/slurmstepd_init.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/slurmstepd_init.c' object='slurmstepd_init.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o slurmstepd_init.obj `if test -f '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/slurmstepd_init.c'; fi`
-
-run_script.o: $(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT run_script.o -MD -MP -MF $(DEPDIR)/run_script.Tpo -c -o run_script.o `test -f '$(top_builddir)/src/slurmd/common/run_script.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/run_script.Tpo $(DEPDIR)/run_script.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/run_script.c' object='run_script.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o run_script.o `test -f '$(top_builddir)/src/slurmd/common/run_script.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/run_script.c
-
-run_script.obj: $(top_builddir)/src/slurmd/common/run_script.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT run_script.obj -MD -MP -MF $(DEPDIR)/run_script.Tpo -c -o run_script.obj `if test -f '$(top_builddir)/src/slurmd/common/run_script.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/run_script.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/run_script.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/run_script.Tpo $(DEPDIR)/run_script.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/run_script.c' object='run_script.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o run_script.obj `if test -f '$(top_builddir)/src/slurmd/common/run_script.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/run_script.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/run_script.c'; fi`
-
-task_plugin.o: $(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_plugin.o -MD -MP -MF $(DEPDIR)/task_plugin.Tpo -c -o task_plugin.o `test -f '$(top_builddir)/src/slurmd/common/task_plugin.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_plugin.Tpo $(DEPDIR)/task_plugin.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/task_plugin.c' object='task_plugin.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.o `test -f '$(top_builddir)/src/slurmd/common/task_plugin.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/task_plugin.c
-
-task_plugin.obj: $(top_builddir)/src/slurmd/common/task_plugin.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT task_plugin.obj -MD -MP -MF $(DEPDIR)/task_plugin.Tpo -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/task_plugin.Tpo $(DEPDIR)/task_plugin.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/task_plugin.c' object='task_plugin.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
-
-set_oomadj.o: $(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.o -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.o' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
-
-set_oomadj.obj: $(top_builddir)/src/slurmd/common/set_oomadj.c
-@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.obj -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
-@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.obj' libtool=no @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
-
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/slurmd/slurmstepd/fname.c b/src/slurmd/slurmstepd/fname.c
index 7a743bcda..83aa32975 100644
--- a/src/slurmd/slurmstepd/fname.c
+++ b/src/slurmd/slurmstepd/fname.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/fname.h b/src/slurmd/slurmstepd/fname.h
index 31c3e7ff1..d4358d1e0 100644
--- a/src/slurmd/slurmstepd/fname.h
+++ b/src/slurmd/slurmstepd/fname.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/io.c b/src/slurmd/slurmstepd/io.c
index 88e3271a0..4c1b2411a 100644
--- a/src/slurmd/slurmstepd/io.c
+++ b/src/slurmd/slurmstepd/io.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -96,10 +96,10 @@ static int  _client_read(eio_obj_t *, List);
 static int  _client_write(eio_obj_t *, List);
 
 struct io_operations client_ops = {
-	readable:	&_client_readable,
-	writable:	&_client_writable,
-	handle_read:	&_client_read,
-	handle_write:	&_client_write,
+	.readable = &_client_readable,
+	.writable = &_client_writable,
+	.handle_read = &_client_read,
+	.handle_write = &_client_write,
 };
 
 struct client_io_info {
@@ -136,8 +136,8 @@ static bool _local_file_writable(eio_obj_t *);
 static int  _local_file_write(eio_obj_t *, List);
 
 struct io_operations local_file_ops = {
-	writable:	&_local_file_writable,
-	handle_write:	&_local_file_write,
+	.writable = &_local_file_writable,
+	.handle_write = &_local_file_write,
 };
 
 
@@ -149,9 +149,9 @@ static int  _task_write(eio_obj_t *, List);
 static int _task_write_error(eio_obj_t *obj, List objs);
 
 struct io_operations task_write_ops = {
-	writable:	&_task_writable,
-	handle_write:	&_task_write,
-	handle_error:   &_task_write_error,
+	.writable = &_task_writable,
+	.handle_write = &_task_write,
+	.handle_error = &_task_write_error,
 };
 
 struct task_write_info {
@@ -173,8 +173,8 @@ static bool _task_readable(eio_obj_t *);
 static int  _task_read(eio_obj_t *, List);
 
 struct io_operations task_read_ops = {
-	readable:	&_task_readable,
-	handle_read:	&_task_read,
+	.readable = &_task_readable,
+	.handle_read = &_task_read,
 };
 
 struct task_read_info {
diff --git a/src/slurmd/slurmstepd/io.h b/src/slurmd/slurmstepd/io.h
index 53026256c..9c5ac04cf 100644
--- a/src/slurmd/slurmstepd/io.h
+++ b/src/slurmd/slurmstepd/io.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index b9cb32a01..a8c4922f0 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -75,9 +75,15 @@
 #  include <stdlib.h>
 #endif
 
-#include <slurm/slurm_errno.h>
+#ifdef HAVE_PTY_H
+#  include <pty.h>
+#  ifdef HAVE_UTMP_H
+#    include <utmp.h>
+#  endif
+#endif
+
+#include "slurm/slurm_errno.h"
 
-#include "src/common/basil_resv_conf.h"
 #include "src/common/cbuf.h"
 #include "src/common/env.h"
 #include "src/common/fd.h"
@@ -192,7 +198,7 @@ static int  _run_script_as_user(const char *name, const char *path,
 				slurmd_job_t *job, int max_wait, char **env);
 
 /*
- * Batch job mangement prototypes:
+ * Batch job management prototypes:
  */
 static char * _make_batch_dir(slurmd_job_t *job);
 static char * _make_batch_script(batch_job_launch_msg_t *msg, char *path);
@@ -234,6 +240,7 @@ mgr_launch_tasks_setup(launch_tasks_request_msg_t *msg, slurm_addr_t *cli,
 
 	job->envtp->cli = cli;
 	job->envtp->self = self;
+	job->envtp->select_jobinfo = msg->select_jobinfo;
 
 	return job;
 }
@@ -258,6 +265,55 @@ static uint32_t _get_exit_code(slurmd_job_t *job)
 	return step_rc;
 }
 
+#ifdef HAVE_CRAY
+/*
+ * Kludge to better inter-operate with ALPS layer:
+ * - CONFIRM method requires the SID of the shell executing the job script,
+ * - RELEASE method is more robustly called from stepdmgr.
+ *
+ * To avoid calling the same select/cray plugin function also in slurmctld,
+ * we use the following convention:
+ * - only job_id, job_state, alloc_sid, and select_jobinfo set to non-NULL,
+ * - batch_flag is 0 (corresponding call in slurmctld uses batch_flag = 1),
+ * - job_state set to the unlikely value of 'NO_VAL'.
+ */
+static int _call_select_plugin_from_stepd(slurmd_job_t *job, uint64_t pagg_id,
+					  int (*select_fn)(struct job_record *))
+{
+	struct job_record fake_job_record = {0};
+	int rc;
+
+	fake_job_record.job_id		= job->jobid;
+	fake_job_record.job_state	= (uint16_t)NO_VAL;
+	fake_job_record.select_jobinfo	= select_g_select_jobinfo_alloc();
+	select_g_select_jobinfo_set(fake_job_record.select_jobinfo,
+				    SELECT_JOBDATA_RESV_ID, &job->resv_id);
+	if (pagg_id)
+		select_g_select_jobinfo_set(fake_job_record.select_jobinfo,
+				    SELECT_JOBDATA_PAGG_ID, &pagg_id);
+	rc = (*select_fn)(&fake_job_record);
+	select_g_select_jobinfo_free(fake_job_record.select_jobinfo);
+	return rc;
+}
+
+static int _select_cray_plugin_job_ready(slurmd_job_t *job)
+{
+	uint64_t pagg_id = slurm_container_find(job->jmgr_pid);
+
+	if (pagg_id == 0) {
+		error("no PAGG ID: job service disabled on this host?");
+		/*
+		 * If this process is not attached to a container, there is no
+		 * sense in trying to use the SID as fallback, since the call to
+		 * slurm_container_add() in _fork_all_tasks() will fail later.
+		 * Hence drain the node until sgi_job returns proper PAGG IDs.
+		 */
+		return READY_JOB_FATAL;
+	}
+	return _call_select_plugin_from_stepd(job, pagg_id, select_g_job_ready);
+}
+#endif
+
 /*
  * Send batch exit code to slurmctld. Non-zero rc will DRAIN the node.
  */
@@ -271,6 +327,11 @@ batch_finish(slurmd_job_t *job, int rc)
 	if (job->batchdir && (rmdir(job->batchdir) < 0))
 		error("rmdir(%s): %m",  job->batchdir);
 	xfree(job->batchdir);
+
+#ifdef HAVE_CRAY
+	_call_select_plugin_from_stepd(job, 0, select_g_job_fini);
+#endif
+
 	if (job->aborted) {
 		if ((job->stepid == NO_VAL) ||
 		    (job->stepid == SLURM_BATCH_SCRIPT)) {
@@ -644,6 +705,8 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last)
 	int rc = -1;
 	int retcode;
 	int i;
+	uint16_t port = 0;
+	char ip_buf[16];
 	static bool acct_sent = false;
 
 	debug2("_one_step_complete_msg: first=%d, last=%d", first, last);
@@ -656,7 +719,7 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last)
 	msg.step_rc = step_complete.step_rc;
 	msg.jobacct = jobacct_gather_g_create(NULL);
 	/************* acct stuff ********************/
-	if(!acct_sent) {
+	if (!acct_sent) {
 		jobacct_gather_g_aggregate(step_complete.jobacct, job->jobacct);
 		jobacct_gather_g_getinfo(step_complete.jobacct,
 					 JOBACCT_DATA_TOTAL, msg.jobacct);
@@ -668,40 +731,53 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last)
 	req.data = &msg;
 	req.address = step_complete.parent_addr;
 
-	/* Do NOT change this check to "step_complete.rank == 0", because
+	/* Do NOT change this check to "step_complete.rank != 0", because
 	 * there are odd situations where SlurmUser or root could
 	 * craft a launch without a valid credential, and no tree information
 	 * can be built with out the hostlist from the credential.
 	 */
-	if (step_complete.parent_rank == -1) {
+	if (step_complete.parent_rank != -1) {
+		debug3("Rank %d sending complete to rank %d, range %d to %d",
+		       step_complete.rank, step_complete.parent_rank,
+		       first, last);
+		/* On error, pause then try sending to parent again.
+		 * The parent slurmstepd may just not have started yet, because
+		 * of the way that the launch message forwarding works.
+		 */
+		for (i = 0; i < REVERSE_TREE_PARENT_RETRY; i++) {
+			if (i)
+				sleep(1);
+			retcode = slurm_send_recv_rc_msg_only_one(&req, &rc, 0);
+			if ((retcode == 0) && (rc == 0))
+				goto finished;
+		}
+		/* on error AGAIN, send to the slurmctld instead */
+		debug3("Rank %d sending complete to slurmctld instead, range "
+		       "%d to %d", step_complete.rank, first, last);
+	} else {
 		/* this is the base of the tree, its parent is slurmctld */
 		debug3("Rank %d sending complete to slurmctld, range %d to %d",
 		       step_complete.rank, first, last);
-		if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0)
-			error("Rank %d failed sending step completion message"
-			      " to slurmctld (parent)", step_complete.rank);
-		goto finished;
 	}
 
-	debug3("Rank %d sending complete to rank %d, range %d to %d",
-	       step_complete.rank, step_complete.parent_rank, first, last);
-	/* On error, pause then try sending to parent again.
-	 * The parent slurmstepd may just not have started yet, because
-	 * of the way that the launch message forwarding works.
-	 */
-	for (i = 0; i < REVERSE_TREE_PARENT_RETRY; i++) {
-		if (i)
-			sleep(1);
-		retcode = slurm_send_recv_rc_msg_only_one(&req, &rc, 0);
-		if (retcode == 0 && rc == 0)
-			goto finished;
-	}
-	/* on error AGAIN, send to the slurmctld instead */
-	debug3("Rank %d sending complete to slurmctld instead, range %d to %d",
-	       step_complete.rank, first, last);
-	if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0)
-		error("Rank %d failed sending step completion message"
-		      " directly to slurmctld", step_complete.rank);
+	/* Retry step complete RPC send to slurmctld indefinitely.
+	 * Prevent orphan job step if slurmctld is down */
+	i = 1;
+	while (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) {
+		if (i++ == 1) {
+			slurm_get_ip_str(&step_complete.parent_addr, &port,
+					 ip_buf, sizeof(ip_buf));
+			error("Rank %d failed sending step completion message "
+			      "directly to slurmctld (%s:%u), retrying",
+			      step_complete.rank, ip_buf, port);
+		}
+		sleep(60);
+	}
+	if (i > 1) {
+		info("Rank %d sent step completion message directly to "
+		     "slurmctld (%s:%u)", step_complete.rank, ip_buf, port);
+	}
+
 finished:
 	jobacct_gather_g_destroy(msg.jobacct);
 }
@@ -837,6 +913,37 @@ job_manager(slurmd_job_t *job)
 		goto fail1;
 	}
 
+#ifdef HAVE_CRAY
+	/*
+	 * We need to call the proctrack/sgi_job container-create function here
+	 * already since the select/cray plugin needs the job container ID in
+	 * order to CONFIRM the ALPS reservation.
+	 * It is not a good idea to perform this setup in _fork_all_tasks(),
+	 * since any transient failure of ALPS (which can happen in practice)
+	 * will then set the frontend node to DRAIN.
+	 */
+	if ((job->cont_id == 0) &&
+	    (slurm_container_create(job) != SLURM_SUCCESS)) {
+		error("failed to create proctrack/sgi_job container: %m");
+		rc = ESLURMD_SETUP_ENVIRONMENT_ERROR;
+		goto fail1;
+	}
+
+	rc = _select_cray_plugin_job_ready(job);
+	if (rc != SLURM_SUCCESS) {
+		/*
+		 * Transient error: slurmctld knows this condition to mean that
+		 * the ALPS (not the SLURM) reservation failed and tries again.
+		 */
+		if (rc == READY_JOB_ERROR)
+			rc = ESLURM_RESERVATION_NOT_USABLE;
+		else
+			rc = ESLURMD_SETUP_ENVIRONMENT_ERROR;
+		error("could not confirm ALPS reservation #%u", job->resv_id);
+		goto fail1;
+	}
+#endif
+
 #ifdef PR_SET_DUMPABLE
 	if (prctl(PR_SET_DUMPABLE, 1) < 0)
 		debug ("Unable to set dumpable to 1");
@@ -950,6 +1057,11 @@ job_manager(slurmd_job_t *job)
 	if (!job->batch && !job->user_managed_io && io_initialized)
 		_wait_for_io(job);
 
+	/*
+	 * Warn task plugin that the user's step have terminated
+	 */
+	post_step(job);
+
 	debug2("Before call to spank_fini()");
 	if (spank_fini (job)  < 0) {
 		error ("spank_fini failed");
@@ -988,6 +1100,110 @@ _spank_task_privileged(slurmd_job_t *job, int taskid, struct priv_state *sp)
 	return(_drop_privileges (job, true, sp));
 }
 
+struct exec_wait_info {
+	int id;
+	pid_t pid;
+	int parentfd;
+	int childfd;
+};
+
+static struct exec_wait_info * exec_wait_info_create (int i)
+{
+	int fdpair[2];
+	struct exec_wait_info * e;
+
+	if (pipe (fdpair) < 0) {
+		error ("exec_wait_info_create: pipe: %m");
+		return NULL;
+	}
+
+	fd_set_close_on_exec(fdpair[0]);
+	fd_set_close_on_exec(fdpair[1]);
+
+	e = xmalloc (sizeof (*e));
+	e->childfd = fdpair[0];
+	e->parentfd = fdpair[1];
+	e->id = i;
+	e->pid = -1;
+
+	return (e);
+}
+
+static void exec_wait_info_destroy (struct exec_wait_info *e)
+{
+	if (e == NULL)
+		return;
+
+	close (e->parentfd);
+	close (e->childfd);
+	e->id = -1;
+	e->pid = -1;
+}
+
+static pid_t exec_wait_get_pid (struct exec_wait_info *e)
+{
+	if (e == NULL)
+		return (-1);
+	return (e->pid);
+}
+
+static struct exec_wait_info * fork_child_with_wait_info (int id)
+{
+	struct exec_wait_info *e;
+
+	if (!(e = exec_wait_info_create (id)))
+		return (NULL);
+
+	if ((e->pid = fork ()) < 0) {
+		exec_wait_info_destroy (e);
+		return (NULL);
+	}
+	else if (e->pid == 0)  /* In child, close parent fd */
+		close (e->parentfd);
+
+	return (e);
+}
+
+static int exec_wait_child_wait_for_parent (struct exec_wait_info *e)
+{
+	char c;
+
+	if (read (e->childfd, &c, sizeof (c)) != 1)
+		return error ("wait_for_parent: failed: %m");
+
+	return (0);
+}
+
+static int exec_wait_signal_child (struct exec_wait_info *e)
+{
+	char c = '\0';
+
+	if (write (e->parentfd, &c, sizeof (c)) != 1)
+		return error ("write to unblock task %d failed: %m", e->id);
+
+	return (0);
+}
+
+static int exec_wait_signal (struct exec_wait_info *e, slurmd_job_t *job)
+{
+	debug3 ("Unblocking %u.%u task %d, writefd = %d",
+	        job->jobid, job->stepid, e->id, e->parentfd);
+	exec_wait_signal_child (e);
+	return (0);
+}
+
+static void prepare_tty (slurmd_job_t *job, slurmd_task_info_t *task)
+{
+#ifdef HAVE_PTY_H
+	if (job->pty && (task->gtid == 0)) {
+		if (login_tty(task->stdin_fd))
+			error("login_tty: %m");
+		else
+			debug3("login_tty good");
+	}
+#endif
+	return;
+}
 
 /* fork and exec N tasks
  */
@@ -996,12 +1212,10 @@ _fork_all_tasks(slurmd_job_t *job)
 {
 	int rc = SLURM_SUCCESS;
 	int i;
-	int *writefds; /* array of write file descriptors */
-	int *readfds; /* array of read file descriptors */
-	int fdpair[2];
 	struct priv_state sprivs;
 	jobacct_id_t jobacct_id;
 	char *oom_value;
+	List exec_wait_list = NULL;
 
 	xassert(job != NULL);
 
@@ -1011,13 +1225,6 @@ _fork_all_tasks(slurmd_job_t *job)
 		return SLURM_ERROR;
 	}
 
-#ifdef HAVE_CRAY
-	if (basil_resv_conf(job->resv_id, job->jobid)) {
-		error("could not confirm reservation");
-		return SLURM_ERROR;
-	}
-#endif
-
 	debug2("Before call to spank_init()");
 	if (spank_init (job) < 0) {
 		error ("Plugin stack initialization failed.");
@@ -1025,36 +1232,6 @@ _fork_all_tasks(slurmd_job_t *job)
 	}
 	debug2("After call to spank_init()");
 
-	/*
-	 * Pre-allocate a pipe for each of the tasks
-	 */
-	debug3("num tasks on this node = %d", job->node_tasks);
-	writefds = (int *) xmalloc (job->node_tasks * sizeof(int));
-	if (!writefds) {
-		error("writefds xmalloc failed!");
-		return SLURM_ERROR;
-	}
-	readfds = (int *) xmalloc (job->node_tasks * sizeof(int));
-	if (!readfds) {
-		error("readfds xmalloc failed!");
-		return SLURM_ERROR;
-	}
-
-
-	for (i = 0; i < job->node_tasks; i++) {
-		fdpair[0] = -1; fdpair[1] = -1;
-		if (pipe (fdpair) < 0) {
-			error ("exec_all_tasks: pipe: %m");
-			return SLURM_ERROR;
-		}
-		debug3("New fdpair[0] = %d, fdpair[1] = %d",
-		       fdpair[0], fdpair[1]);
-		fd_set_close_on_exec(fdpair[0]);
-		fd_set_close_on_exec(fdpair[1]);
-		readfds[i] = fdpair[0];
-		writefds[i] = fdpair[1];
-	}
-
 	set_oom_adj(0);	/* the tasks may be killed by OOM */
 	if (pre_setuid(job)) {
 		error("Failed task affinity setup");
@@ -1092,27 +1269,33 @@ _fork_all_tasks(slurmd_job_t *job)
 		return SLURM_ERROR;
 	}
 
+	exec_wait_list = list_create ((ListDelF) exec_wait_info_destroy);
+	if (!exec_wait_list)
+		return error ("Unable to create exec_wait_list");
+
 	/*
 	 * Fork all of the task processes.
 	 */
 	for (i = 0; i < job->node_tasks; i++) {
 		char time_stamp[256];
 		pid_t pid;
-		if ((pid = fork ()) < 0) {
+		struct exec_wait_info *ei;
+
+		if ((ei = fork_child_with_wait_info (i)) == NULL) {
 			error("child fork: %m");
 			goto fail2;
-		} else if (pid == 0)  { /* child */
-			int j;
+		} else if ((pid = exec_wait_get_pid (ei)) == 0)  { /* child */
+			/*
+			 *  Destroy exec_wait_list in the child.
+			 *   Only exec_wait_info for previous tasks have been
+			 *   added to the list so far, so everything else
+			 *   can be discarded.
+			 */
+			list_destroy (exec_wait_list);
 
 #ifdef HAVE_AIX
 			(void) mkcrid(0);
 #endif
-			/* Close file descriptors not needed by the child */
-			for (j = 0; j < job->node_tasks; j++) {
-				close(writefds[j]);
-				if (j > i)
-					close(readfds[j]);
-			}
 			/* jobacct_gather_g_endpoll();
 			 * closing jobacct files here causes deadlock */
 
@@ -1136,14 +1319,28 @@ _fork_all_tasks(slurmd_job_t *job)
 
 			xsignal_unblock(slurmstepd_blocked_signals);
 
-			exec_task(job, i, readfds[i]);
+			/*
+			 *  Setup tty before any setpgid() calls
+			 */
+			prepare_tty (job, job->task[i]);
+
+			/*
+			 *  Block until parent notifies us that it is ok to
+			 *   proceed. This allows the parent to place all
+			 *   children in any process groups or containers
+			 *   before they make a call to exec(2).
+			 */
+			exec_wait_child_wait_for_parent (ei);
+
+			exec_task(job, i);
 		}
 
 		/*
 		 * Parent continues:
 		 */
 
-		close(readfds[i]);
+		list_append (exec_wait_list, ei);
+
 		LOG_TIMESTAMP(time_stamp);
 		verbose ("task %lu (%lu) started %s",
 			(unsigned long) job->task[i]->gtid,
@@ -1213,16 +1410,10 @@ _fork_all_tasks(slurmd_job_t *job)
 	/*
 	 * Now it's ok to unblock the tasks, so they may call exec.
 	 */
-	for (i = 0; i < job->node_tasks; i++) {
-		char c = '\0';
-
-		debug3("Unblocking %u.%u task %d, writefd = %d",
-		       job->jobid, job->stepid, i, writefds[i]);
-		if (write (writefds[i], &c, sizeof (c)) != 1)
-			error ("write to unblock task %d failed", i);
-
-		close(writefds[i]);
+	list_for_each (exec_wait_list, (ListForF) exec_wait_signal, job);
+	list_destroy (exec_wait_list);
 
+	for (i = 0; i < job->node_tasks; i++) {
 		/*
 		 * Prepare process for attach by parallel debugger
 		 * (if specified and able)
@@ -1231,17 +1422,14 @@ _fork_all_tasks(slurmd_job_t *job)
 				== SLURM_ERROR)
 			rc = SLURM_ERROR;
 	}
-	xfree(writefds);
-	xfree(readfds);
 
 	return rc;
 
 fail2:
 	_reclaim_privileges (&sprivs);
+	if (exec_wait_list)
+		list_destroy (exec_wait_list);
 fail1:
-	xfree(writefds);
-	xfree(readfds);
-
 	pam_finish();
 	return SLURM_ERROR;
 }
@@ -1509,9 +1697,10 @@ _make_batch_dir(slurmd_job_t *job)
 	char path[MAXPATHLEN];
 
 	if (job->stepid == NO_VAL)
-		snprintf(path, 1024, "%s/job%05u", conf->spooldir, job->jobid);
+		snprintf(path, sizeof(path), "%s/job%05u",
+			 conf->spooldir, job->jobid);
 	else {
-		snprintf(path, 1024, "%s/job%05u.%05u",
+		snprintf(path, sizeof(path), "%s/job%05u.%05u",
 			 conf->spooldir, job->jobid, job->stepid);
 	}
 
@@ -1544,7 +1733,12 @@ _make_batch_script(batch_job_launch_msg_t *msg, char *path)
 	FILE *fp = NULL;
 	char  script[MAXPATHLEN];
 
-	snprintf(script, 1024, "%s/%s", path, "slurm_script");
+	if (msg->script == NULL) {
+		error("_make_batch_script: called with NULL script");
+		return NULL;
+	}
+
+	snprintf(script, sizeof(script), "%s/%s", path, "slurm_script");
 
   again:
 	if ((fp = safeopen(script, "w", SAFEOPEN_CREATE_ONLY)) == NULL) {
@@ -1716,7 +1910,7 @@ _send_complete_batch_script_msg(slurmd_job_t *job, int err, int status)
 	req_msg.msg_type= REQUEST_COMPLETE_BATCH_SCRIPT;
 	req_msg.data	= &req;
 
-	info("sending REQUEST_COMPLETE_BATCH_SCRIPT");
+	info("sending REQUEST_COMPLETE_BATCH_SCRIPT, error:%u", err);
 
 	/* Note: these log messages don't go to slurmd.log from here */
 	for (i=0; i<=MAX_RETRY; i++) {
@@ -1840,8 +2034,22 @@ _slurmd_job_log_init(slurmd_job_t *job)
 	log_alter(conf->log_opts, 0, NULL);
 	log_set_argv0(argv0);
 
-	/* Connect slurmd stderr to job's stderr */
-	if (!job->user_managed_io && job->task != NULL) {
+	/*  Connect slurmd stderr to stderr of job, unless we are using
+	 *   user_managed_io or a pty.
+	 *
+	 *  user_managed_io directly connects the client (e.g. poe) to the tasks
+	 *   over a TCP connection, and we fully leave it up to the client
+	 *   to manage the stream with no buffering on slurm's part.
+	 *   We also promise that we will not insert any foreign data into
+	 *   the stream, so here we need to avoid connecting slurmstepd's
+	 *   STDERR_FILENO to the tasks's stderr.
+	 *
+	 *  When pty terminal emulation is used, the pts can potentially
+	 *   cause IO to block, so we need to avoid connecting slurmstepd's
+	 *   STDERR_FILENO to the task's pts on stderr to avoid hangs in
+	 *   the slurmstepd.
+	 */
+	if (!job->user_managed_io && !job->pty && job->task != NULL) {
 		if (dup2(job->task[0]->stderr_fd, STDERR_FILENO) < 0) {
 			error("job_log_init: dup2(stderr): %m");
 			return ESLURMD_IO_ERROR;
@@ -2011,6 +2219,7 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job,
 {
 	int status, rc, opt;
 	pid_t cpid;
+	struct exec_wait_info *ei;
 
 	xassert(env);
 	if (path == NULL || path[0] == '\0')
@@ -2027,11 +2236,11 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job,
 	    (slurm_container_create(job) != SLURM_SUCCESS))
 		error("slurm_container_create: %m");
 
-	if ((cpid = fork()) < 0) {
+	if ((ei = fork_child_with_wait_info(0)) == NULL) {
 		error ("executing %s: fork: %m", name);
 		return -1;
 	}
-	if (cpid == 0) {
+	if ((cpid = exec_wait_get_pid (ei)) == 0) {
 		struct priv_state sprivs;
 		char *argv[2];
 
@@ -2058,6 +2267,11 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job,
 #else
 		setpgrp();
 #endif
+		/*
+		 *  Wait for signal from parent
+		 */
+		exec_wait_child_wait_for_parent (ei);
+
 		execve(path, argv, env);
 		error("execve(): %m");
 		exit(127);
@@ -2065,6 +2279,11 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job,
 
 	if (slurm_container_add(job, cpid) != SLURM_SUCCESS)
 		error("slurm_container_add: %m");
+
+	if (exec_wait_signal_child (ei) < 0)
+		error ("run_script_as_user: Failed to wakeup %s", name);
+	exec_wait_info_destroy (ei);
+
 	if (max_wait < 0)
 		opt = 0;
 	else
diff --git a/src/slurmd/slurmstepd/mgr.h b/src/slurmd/slurmstepd/mgr.h
index 7a474bea4..69d495428 100644
--- a/src/slurmd/slurmstepd/mgr.h
+++ b/src/slurmd/slurmstepd/mgr.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/multi_prog.c b/src/slurmd/slurmstepd/multi_prog.c
index 4603d8f1c..ad30329b8 100644
--- a/src/slurmd/slurmstepd/multi_prog.c
+++ b/src/slurmd/slurmstepd/multi_prog.c
@@ -14,7 +14,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/multi_prog.h b/src/slurmd/slurmstepd/multi_prog.h
index 7444002a1..aeb9a4d8b 100644
--- a/src/slurmd/slurmstepd/multi_prog.h
+++ b/src/slurmd/slurmstepd/multi_prog.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/pam_ses.c b/src/slurmd/slurmstepd/pam_ses.c
index d4ab3c7ad..57d4f43fe 100644
--- a/src/slurmd/slurmstepd/pam_ses.c
+++ b/src/slurmd/slurmstepd/pam_ses.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -89,7 +89,7 @@ pam_setup (char *user, char *host)
 	 * administrator. PAM's session management library is responsible for
 	 * handling resource limits. When a PAM session is opened on behalf of
 	 * a user, the limits imposed by the sys admin are picked up. Opening
-	 * a PAM session requires a PAM handle, which is obatined when the PAM
+	 * a PAM session requires a PAM handle, which is obtained when the PAM
 	 * interface is intialized. (PAM handles are required with essentially
 	 * all PAM calls.) It's also necessary to have the users PAM credentials
 	 * to open a user session.
diff --git a/src/slurmd/slurmstepd/pam_ses.h b/src/slurmd/slurmstepd/pam_ses.h
index 4a42db309..81160e243 100644
--- a/src/slurmd/slurmstepd/pam_ses.h
+++ b/src/slurmd/slurmstepd/pam_ses.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/pdebug.c b/src/slurmd/slurmstepd/pdebug.c
index f5a48ef90..b5dd89350 100644
--- a/src/slurmd/slurmstepd/pdebug.c
+++ b/src/slurmd/slurmstepd/pdebug.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -101,6 +101,10 @@ pdebug_trace_process(slurmd_job_t *job, pid_t pid)
 		if (_PTRACE(PT_DETACH, pid, NULL, 0)) {
 #elif defined(__sun)
 		if (_PTRACE(7, pid, NULL, 0)) {
+#elif defined(__CYGWIN__)
+		if (1) {
+			debug3("No ptrace for cygwin");
+		} else {
 #else
 		if (_PTRACE(PTRACE_DETACH, pid, NULL, 0)) {
 #endif
@@ -127,6 +131,8 @@ pdebug_stop_current(slurmd_job_t *job)
 	     && (_PTRACE(PT_TRACE_ME, 0, NULL, 0) < 0) )
 #elif defined(__sun)
 	     && (_PTRACE(0, 0, NULL, 0) < 0))
+#elif defined(__CYGWIN__)
+	     && 0)
 #else
 	     && (_PTRACE(PTRACE_TRACEME, 0, NULL, 0) < 0) )
 #endif
diff --git a/src/slurmd/slurmstepd/pdebug.h b/src/slurmd/slurmstepd/pdebug.h
index ad92a9ee5..d40a9f7c0 100644
--- a/src/slurmd/slurmstepd/pdebug.h
+++ b/src/slurmd/slurmstepd/pdebug.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index f412a8c51..478279db3 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -92,8 +92,8 @@ static bool _msg_socket_readable(eio_obj_t *obj);
 static int _msg_socket_accept(eio_obj_t *obj, List objs);
 
 struct io_operations msg_socket_ops = {
-	readable:	&_msg_socket_readable,
-	handle_read:	&_msg_socket_accept
+	.readable = &_msg_socket_readable,
+	.handle_read = &_msg_socket_accept
 };
 
 static char *socket_name;
@@ -170,7 +170,7 @@ _domain_socket_create(const char *dir, const char *nodename,
 	}
 
 	/*
-	 * Now build the the name of socket, and create the socket.
+	 * Now build the name of socket, and create the socket.
 	 */
 	xstrfmtcat(name, "%s/%s_%u.%u", dir, nodename, jobid, stepid);
 
@@ -762,7 +762,7 @@ _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid)
 	 * Sanity checks
 	 */
 	if (job->cont_id == 0) {
-		debug ("step %u.%u invalid container [cont_id:%u]",
+		debug ("step %u.%u invalid container [cont_id:%"PRIu64"]",
 			job->jobid, job->stepid, job->cont_id);
 		rc = -1;
 		errnum = ESLURMD_JOB_NOTRUNNING;
@@ -787,6 +787,10 @@ _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid)
 			error("*** %s CANCELLED AT %s DUE TO TIME LIMIT ***",
 			      entity, time_str);
 			msg_sent = 1;
+		} else if (sig == SIG_PREEMPTED) {
+			error("*** %s CANCELLED AT %s DUE TO PREEMPTION ***",
+			      entity, time_str);
+			msg_sent = 1;
 		} else if (sig == SIG_NODE_FAIL) {
 			error("*** %s CANCELLED AT %s DUE TO NODE FAILURE ***",
 			      entity, time_str);
@@ -801,7 +805,7 @@ _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid)
 		}
 	}
 	if ((sig == SIG_TIME_LIMIT) || (sig == SIG_NODE_FAIL) ||
-	    (sig == SIG_FAILURE))
+	    (sig == SIG_PREEMPTED)  || (sig == SIG_FAILURE))
 		goto done;
 	if (sig == SIG_DEBUG_WAKE) {
 		int i;
@@ -991,7 +995,7 @@ _handle_terminate(int fd, slurmd_job_t *job, uid_t uid)
 	 * Sanity checks
 	 */
 	if (job->cont_id == 0) {
-		debug ("step %u.%u invalid container [cont_id:%u]",
+		debug ("step %u.%u invalid container [cont_id:%"PRIu64"]",
 			job->jobid, job->stepid, job->cont_id);
 		rc = -1;
 		errnum = ESLURMD_JOB_NOTRUNNING;
@@ -1159,7 +1163,7 @@ _handle_suspend(int fd, slurmd_job_t *job, uid_t uid)
 	}
 
 	if (job->cont_id == 0) {
-		debug ("step %u.%u invalid container [cont_id:%u]",
+		debug ("step %u.%u invalid container [cont_id:%"PRIu64"]",
 			job->jobid, job->stepid, job->cont_id);
 		rc = -1;
 		errnum = ESLURMD_JOB_NOTRUNNING;
@@ -1178,13 +1182,20 @@ _handle_suspend(int fd, slurmd_job_t *job, uid_t uid)
 		pthread_mutex_unlock(&suspend_mutex);
 		goto done;
 	} else {
-		/* SIGTSTP is sent first to let MPI daemons stop their
-		 * tasks, then we send SIGSTOP to stop everything else */
+		/* SIGTSTP is sent first to let MPI daemons stop their tasks,
+		 * then wait 2 seconds, then send SIGSTOP to the spawned
+		 * process's container to stop everything else.
+		 *
+		 * In some cases, 1 second has proven insufficient. Longer
+		 * delays may help insure that all MPI tasks have been stopped
+		 * (that depends upon the MPI implementaiton used), but will
+		 * also permit longer time periods when more than one job can
+		 * be running on each resource (not good). */
 		if (slurm_container_signal(job->cont_id, SIGTSTP) < 0) {
 			verbose("Error suspending %u.%u (SIGTSTP): %m",
 				job->jobid, job->stepid);
 		} else
-			sleep(1);
+			sleep(2);
 
 		if (slurm_container_signal(job->cont_id, SIGSTOP) < 0) {
 			verbose("Error suspending %u.%u (SIGSTOP): %m",
@@ -1224,7 +1235,7 @@ _handle_resume(int fd, slurmd_job_t *job, uid_t uid)
 	}
 
 	if (job->cont_id == 0) {
-		debug ("step %u.%u invalid container [cont_id:%u]",
+		debug ("step %u.%u invalid container [cont_id:%"PRIu64"]",
 			job->jobid, job->stepid, job->cont_id);
 		rc = -1;
 		errnum = ESLURMD_JOB_NOTRUNNING;
diff --git a/src/slurmd/slurmstepd/req.h b/src/slurmd/slurmstepd/req.h
index 616917888..af674b754 100644
--- a/src/slurmd/slurmstepd/req.h
+++ b/src/slurmd/slurmstepd/req.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/slurmstepd.c b/src/slurmd/slurmstepd/slurmstepd.c
index 5ef4041b9..d27e85861 100644
--- a/src/slurmd/slurmstepd/slurmstepd.c
+++ b/src/slurmd/slurmstepd/slurmstepd.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -220,6 +220,9 @@ _init_from_slurmd(int sock, char **argv,
 	gid_t *gids = NULL;
 	uint16_t port;
 	char buf[16];
+	log_options_t lopts = LOG_OPTS_INITIALIZER;
+
+	log_init(argv[0], lopts, LOG_DAEMON, NULL);
 
 	/* receive job type from slurmd */
 	safe_read(sock, &step_type, sizeof(int));
@@ -247,11 +250,10 @@ _init_from_slurmd(int sock, char **argv,
 	}
 	free_buf(buffer);
 
-	debug2("debug level is %d.", conf->debug_level);
 	conf->log_opts.stderr_level = conf->debug_level;
 	conf->log_opts.logfile_level = conf->debug_level;
 	conf->log_opts.syslog_level = conf->debug_level;
-	//log_alter(conf->log_opts, 0, NULL);
+
 	/*
 	 * If daemonizing, turn off stderr logging -- also, if
 	 * logging to a file, turn off syslog.
@@ -265,8 +267,9 @@ _init_from_slurmd(int sock, char **argv,
 			conf->log_opts.syslog_level = LOG_LEVEL_QUIET;
 	} else
 		conf->log_opts.syslog_level  = LOG_LEVEL_QUIET;
+	log_alter(conf->log_opts, 0, conf->logfile);
 
-	log_init(argv[0], conf->log_opts, LOG_DAEMON, conf->logfile);
+	debug2("debug level is %d.", conf->debug_level);
 	/* acct info */
 	jobacct_gather_g_startpoll(conf->job_acct_gather_freq);
 
@@ -407,9 +410,11 @@ _step_setup(slurm_addr_t *cli, slurm_addr_t *self, slurm_msg_t *msg)
 static void
 _step_cleanup(slurmd_job_t *job, slurm_msg_t *msg, int rc)
 {
-	jobacct_gather_g_destroy(job->jobacct);
-	if (!job->batch)
-		job_destroy(job);
+	if (job) {
+		jobacct_gather_g_destroy(job->jobacct);
+		if (!job->batch)
+			job_destroy(job);
+	}
 	/*
 	 * The message cannot be freed until the jobstep is complete
 	 * because the job struct has pointers into the msg, such
diff --git a/src/slurmd/slurmstepd/slurmstepd.h b/src/slurmd/slurmstepd/slurmstepd.h
index 36d3ff4a3..fd107e6b0 100644
--- a/src/slurmd/slurmstepd/slurmstepd.h
+++ b/src/slurmd/slurmstepd/slurmstepd.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 3c97ae778..5b8d59516 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -298,6 +298,17 @@ job_create(launch_tasks_request_msg_t *msg)
 					     job->job_mem);
 	}
 
+#ifdef HAVE_CRAY
+	/* This is only used for Cray emulation mode where slurmd is used to
+	 * launch job steps. On a real Cray system, ALPS is used to launch
+	 * the tasks instead of SLURM. SLURM's task launch RPC does NOT
+	 * contain the reservation ID, so just use some non-zero value here
+	 * for testing purposes. */
+	job->resv_id = 1;
+	select_g_select_jobinfo_set(msg->select_jobinfo, SELECT_JOBDATA_RESV_ID,
+				    &job->resv_id);
+#endif
+
 	get_cred_gres(msg->cred, conf->node_name,
 		      &job->job_gres_list, &job->step_gres_list);
 
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.h b/src/slurmd/slurmstepd/slurmstepd_job.h
index b713155a7..dfcae09ae 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.h
+++ b/src/slurmd/slurmstepd/slurmstepd_job.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -190,7 +190,7 @@ typedef struct slurmd_job {
 	uint16_t       multi_prog;
 	uint16_t       overcommit;
 	env_t          *envtp;
-	uint32_t       cont_id;
+	uint64_t       cont_id;
 
 	char          *batchdir;
 	jobacctinfo_t *jobacct;
diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.c b/src/slurmd/slurmstepd/step_terminate_monitor.c
index 6e46fb75a..56b225474 100644
--- a/src/slurmd/slurmstepd/step_terminate_monitor.c
+++ b/src/slurmd/slurmstepd/step_terminate_monitor.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.h b/src/slurmd/slurmstepd/step_terminate_monitor.h
index fcda666ae..c358fb892 100644
--- a/src/slurmd/slurmstepd/step_terminate_monitor.h
+++ b/src/slurmd/slurmstepd/step_terminate_monitor.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index e93ee08e8..925e67e4a 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -65,17 +65,9 @@
 #  include <sys/checkpnt.h>
 #endif
 
-#ifdef HAVE_PTY_H
-#  include <pty.h>
-#endif
-
-#ifdef HAVE_UTMP_H
-#  include <utmp.h>
-#endif
-
 #include <sys/resource.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/checkpoint.h"
 #include "src/common/env.h"
@@ -337,37 +329,15 @@ _setup_mpi(slurmd_job_t *job, int ltaskid)
  *  Current process is running as the user when this is called.
  */
 void
-exec_task(slurmd_job_t *job, int i, int waitfd)
+exec_task(slurmd_job_t *job, int i)
 {
-	char c;
 	uint32_t *gtids;		/* pointer to arrary of ranks */
 	int fd, j;
-	int rc;
 	slurmd_task_info_t *task = job->task[i];
 
-#ifdef HAVE_PTY_H
-	/* Execute login_tty() before setpgid() calls */
-	if (job->pty && (task->gtid == 0)) {
-		if (login_tty(task->stdin_fd))
-			error("login_tty: %m");
-		else
-			debug3("login_tty good");
-	}
-#endif
-
 	if (i == 0)
 		_make_tmpdir(job);
 
-	/*
-	 * Stall exec until all tasks have joined the same process group
-	 */
-	if ((rc = read (waitfd, &c, sizeof (c))) != 1) {
-		error ("_exec_task read failed, fd = %d, rc=%d: %m", waitfd, rc);
-		log_fini();
-		exit(1);
-	}
-	close(waitfd);
-
 	gtids = xmalloc(job->node_tasks * sizeof(uint32_t));
 	for (j = 0; j < job->node_tasks; j++)
 		gtids[j] = job->task[j]->gtid;
@@ -422,14 +392,7 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 		}
 	}
 
-#ifdef HAVE_PTY_H
-	if (job->pty && (task->gtid == 0)) {
-		/* Need to perform the login_tty() before all tasks
-		 * register and the process groups are reset, otherwise
-		 * login_tty() gets disabled */
-	} else
-#endif
-		io_dup_stdio(task);
+	io_dup_stdio(task);
 
 	/* task-specific pre-launch activities */
 
@@ -518,10 +481,21 @@ _make_tmpdir(slurmd_job_t *job)
 	char *tmpdir;
 
 	if (!(tmpdir = getenvp(job->env, "TMPDIR")))
-		return;
-
-	if ((mkdir(tmpdir, 0700) < 0) && (errno != EEXIST))
-		error ("Unable to create TMPDIR [%s]: %m", tmpdir);
+		setenvf(&job->env, "TMPDIR", "/tmp"); /* task may want it set */
+	else if (mkdir(tmpdir, 0700) < 0) {
+		if (errno == EEXIST) {
+			struct stat st;
+
+			if (stat(tmpdir, &st) == 0 && /* does user have access? */
+			    S_ISDIR(st.st_mode) && /* is it a directory? */
+			    ((st.st_mode & S_IWOTH) || /* can user write there? */
+			     (st.st_uid == job->uid && (st.st_mode & S_IWUSR))))
+				return;
+		}
+		error("Unable to create TMPDIR [%s]: %m", tmpdir);
+		error("Setting TMPDIR to /tmp");
+		setenvf(&job->env, "TMPDIR", "/tmp");
+	}
 
 	return;
 }
diff --git a/src/slurmd/slurmstepd/task.h b/src/slurmd/slurmstepd/task.h
index ee87d405d..78c0b6058 100644
--- a/src/slurmd/slurmstepd/task.h
+++ b/src/slurmd/slurmstepd/task.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,7 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #if HAVE_SYS_TYPES_H
 #  include <sys/types.h>
@@ -52,6 +52,6 @@
 
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
-void exec_task(slurmd_job_t *job, int i, int waitfd);
+void exec_task(slurmd_job_t *job, int i);
 
 #endif /* !_TASK_H */
diff --git a/src/slurmd/slurmstepd/ulimits.c b/src/slurmd/slurmstepd/ulimits.c
index 6dfbb1ad2..bd806a097 100644
--- a/src/slurmd/slurmstepd/ulimits.c
+++ b/src/slurmd/slurmstepd/ulimits.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -89,6 +89,13 @@ int set_user_limits(slurmd_job_t *job)
 	struct rlimit r;
 	rlim_t task_mem_bytes;
 
+	if (getrlimit(RLIMIT_CPU, &r) == 0) {
+		if (r.rlim_max != RLIM_INFINITY) {
+			error("SLURM process CPU time limit is %d seconds",
+			      (int) r.rlim_max);
+		}
+	}
+
 	for (rli = get_slurm_rlimits_info(); rli->name; rli++)
 		_set_limit( job->env, rli );
 
diff --git a/src/slurmd/slurmstepd/ulimits.h b/src/slurmd/slurmstepd/ulimits.h
index 1d35faaa7..82cffa72c 100644
--- a/src/slurmd/slurmstepd/ulimits.h
+++ b/src/slurmd/slurmstepd/ulimits.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/Makefile.in b/src/slurmdbd/Makefile.in
index 2c667aefc..99e00ee4d 100644
--- a/src/slurmdbd/Makefile.in
+++ b/src/slurmdbd/Makefile.in
@@ -65,6 +65,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -75,6 +76,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/slurmdbd/agent.c b/src/slurmdbd/agent.c
index 1a8370b0a..01141586b 100644
--- a/src/slurmdbd/agent.c
+++ b/src/slurmdbd/agent.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/agent.h b/src/slurmdbd/agent.h
index fcf19033c..ac68f24a6 100644
--- a/src/slurmdbd/agent.h
+++ b/src/slurmdbd/agent.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/backup.c b/src/slurmdbd/backup.c
index 84eba1621..13e61adb9 100644
--- a/src/slurmdbd/backup.c
+++ b/src/slurmdbd/backup.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/backup.h b/src/slurmdbd/backup.h
index 8e694345c..f5531005f 100644
--- a/src/slurmdbd/backup.h
+++ b/src/slurmdbd/backup.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index a0885a838..e374d8236 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -82,8 +82,6 @@ static int   _get_config(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_events(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _get_jobs(slurmdbd_conn_t *slurmdbd_conn,
-		       Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_probs(slurmdbd_conn_t *slurmdbd_conn,
@@ -267,10 +265,6 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _get_events(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
 			break;
-		case DBD_GET_JOBS:
-			rc = _get_jobs(slurmdbd_conn,
-				       in_buffer, out_buffer, uid);
-			break;
 		case DBD_GET_JOBS_COND:
 			rc = _get_jobs_cond(slurmdbd_conn,
 					    in_buffer, out_buffer, uid);
@@ -424,6 +418,8 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _step_start(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
 			break;
+		case DBD_GET_JOBS:
+			/* Defunct RPC */
 		default:
 			comment = "Invalid RPC";
 			error("CONN:%u %s msg_type=%d",
@@ -1004,6 +1000,15 @@ static int _cluster_cpus(slurmdbd_conn_t *slurmdbd_conn,
 		rc = SLURM_ERROR;
 	}
 end_it:
+	if (rc == SLURM_SUCCESS)
+		slurmdbd_conn->cluster_cpus = cluster_cpus_msg->cpu_count;
+	if (!slurmdbd_conn->ctld_port) {
+		info("DBD_CLUSTER_CPUS: cluster not registered");
+		slurmdbd_conn->ctld_port =
+			clusteracct_storage_g_register_disconn_ctld(
+				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
+	}
+
 	slurmdbd_free_cluster_cpus_msg(cluster_cpus_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
 				      rc, comment, DBD_CLUSTER_CPUS);
@@ -1210,85 +1215,6 @@ static int _get_events(slurmdbd_conn_t *slurmdbd_conn,
 	return rc;
 }
 
-static int _get_jobs(slurmdbd_conn_t *slurmdbd_conn,
-		     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
-{
-	dbd_get_jobs_msg_t *get_jobs_msg = NULL;
-	dbd_list_msg_t list_msg;
-	char *comment = NULL;
-	slurmdb_job_cond_t job_cond;
-	int rc = SLURM_SUCCESS;
-
-	debug2("DBD_GET_JOBS: called");
-	if (slurmdbd_unpack_get_jobs_msg(
-		    &get_jobs_msg, slurmdbd_conn->rpc_version, in_buffer)
-	    != SLURM_SUCCESS) {
-		comment = "Failed to unpack DBD_GET_JOBS message";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      SLURM_ERROR, comment,
-					      DBD_GET_JOBS);
-		return SLURM_ERROR;
-	}
-
-	memset(&job_cond, 0, sizeof(slurmdb_job_cond_t));
-
-	job_cond.acct_list = get_jobs_msg->selected_steps;
-	job_cond.step_list = get_jobs_msg->selected_steps;
-	job_cond.partition_list = get_jobs_msg->selected_parts;
-
-	if (get_jobs_msg->user) {
-		uid_t pw_uid;
-		if (uid_from_string (get_jobs_msg->user, &pw_uid) >= 0) {
-			char *temp = xstrdup_printf("%u", pw_uid);
-			job_cond.userid_list = list_create(slurm_destroy_char);
-			list_append(job_cond.userid_list, temp);
-		}
-	}
-
-	if (get_jobs_msg->gid >=0) {
-		char *temp = xstrdup_printf("%u", get_jobs_msg->gid);
-		job_cond.groupid_list = list_create(slurm_destroy_char);
-		list_append(job_cond.groupid_list, temp);
-	}
-
-	if (get_jobs_msg->cluster_name) {
-		job_cond.cluster_list = list_create(NULL);
-		list_append(job_cond.cluster_list, get_jobs_msg->cluster_name);
-	}
-
-	list_msg.my_list = jobacct_storage_g_get_jobs_cond(
-		slurmdbd_conn->db_conn, *uid, &job_cond);
-
-	if (!errno) {
-		if (!list_msg.my_list)
-			list_msg.my_list = list_create(NULL);
-		*out_buffer = init_buf(1024);
-		pack16((uint16_t) DBD_GOT_JOBS, *out_buffer);
-		slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
-				       DBD_GOT_JOBS, *out_buffer);
-	} else {
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      errno, slurm_strerror(errno),
-					      DBD_GET_JOBS);
-		rc = SLURM_ERROR;
-	}
-
-	if (job_cond.cluster_list)
-		list_destroy(job_cond.cluster_list);
-	if (job_cond.userid_list)
-		list_destroy(job_cond.userid_list);
-	if (job_cond.groupid_list)
-		list_destroy(job_cond.groupid_list);
-
-	slurmdbd_free_get_jobs_msg(get_jobs_msg);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
-
-	return rc;
-}
-
 static int _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn,
 			  Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -1848,7 +1774,9 @@ static int  _job_complete(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&details, 0, sizeof(struct job_details));
 
 	job.assoc_id = job_comp_msg->assoc_id;
-	job.db_index = job_comp_msg->db_index;
+	job.comment = job_comp_msg->comment;
+	if (job_comp_msg->db_index != NO_VAL)
+		job.db_index = job_comp_msg->db_index;
 	job.derived_ec = job_comp_msg->derived_ec;
 	job.end_time = job_comp_msg->end_time;
 	job.exit_code = job_comp_msg->exit_code;
@@ -1874,6 +1802,14 @@ static int  _job_complete(slurmdbd_conn_t *slurmdbd_conn,
 
 	/* just incase this gets set we need to clear it */
 	xfree(job.wckey);
+
+	if (!slurmdbd_conn->ctld_port) {
+		info("DBD_JOB_COMPLETE: cluster not registered");
+		slurmdbd_conn->ctld_port =
+			clusteracct_storage_g_register_disconn_ctld(
+				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
+	}
+
 end_it:
 	slurmdbd_free_job_complete_msg(job_comp_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
@@ -1952,7 +1888,8 @@ static int  _job_suspend(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&details, 0, sizeof(struct job_details));
 
 	job.assoc_id = job_suspend_msg->assoc_id;
-	job.db_index = job_suspend_msg->db_index;
+	if (job_suspend_msg->db_index != NO_VAL)
+		job.db_index = job_suspend_msg->db_index;
 	job.job_id = job_suspend_msg->job_id;
 	job.job_state = job_suspend_msg->job_state;
 	details.submit_time = job_suspend_msg->submit_time;
@@ -2645,7 +2582,8 @@ static void _process_job_start(slurmdbd_conn_t *slurmdbd_conn,
 	job.account = _replace_double_quotes(job_start_msg->account);
 	job.assoc_id = job_start_msg->assoc_id;
 	job.comment = job_start_msg->block_id;
-	job.db_index = job_start_msg->db_index;
+	if (job_start_msg->db_index != NO_VAL)
+		job.db_index = job_start_msg->db_index;
 	details.begin_time = job_start_msg->eligible_time;
 	job.user_id = job_start_msg->uid;
 	job.group_id = job_start_msg->gid;
@@ -2688,6 +2626,13 @@ static void _process_job_start(slurmdbd_conn_t *slurmdbd_conn,
 	/* just incase job.wckey was set because we didn't send one */
 	if (!job_start_msg->wckey)
 		xfree(job.wckey);
+
+	if (!slurmdbd_conn->ctld_port) {
+		info("DBD_JOB_START: cluster not registered");
+		slurmdbd_conn->ctld_port =
+			clusteracct_storage_g_register_disconn_ctld(
+				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
+	}
 }
 
 static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
@@ -2784,6 +2729,10 @@ static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
 #endif
 
 end_it:
+
+	if (rc == SLURM_SUCCESS)
+		slurmdbd_conn->ctld_port = register_ctld_msg->port;
+
 	slurmdbd_free_register_ctld_msg(register_ctld_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
 				      rc, comment, DBD_REGISTER_CTLD);
@@ -3493,7 +3442,8 @@ static int  _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&details, 0, sizeof(struct job_details));
 
 	job.assoc_id = step_comp_msg->assoc_id;
-	job.db_index = step_comp_msg->db_index;
+	if (step_comp_msg->db_index != NO_VAL)
+		job.db_index = step_comp_msg->db_index;
 	job.end_time = step_comp_msg->end_time;
 	step.exit_code = step_comp_msg->exit_code;
 	step.jobacct = step_comp_msg->jobacct;
@@ -3514,6 +3464,14 @@ static int  _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 		rc = SLURM_SUCCESS;
 	/* just incase this gets set we need to clear it */
 	xfree(job.wckey);
+
+	if (!slurmdbd_conn->ctld_port) {
+		info("DBD_STEP_COMPLETE: cluster not registered");
+		slurmdbd_conn->ctld_port =
+			clusteracct_storage_g_register_disconn_ctld(
+				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
+	}
+
 end_it:
 	slurmdbd_free_step_complete_msg(step_comp_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
@@ -3558,7 +3516,8 @@ static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&layout, 0, sizeof(slurm_step_layout_t));
 
 	job.assoc_id = step_start_msg->assoc_id;
-	job.db_index = step_start_msg->db_index;
+	if (step_start_msg->db_index != NO_VAL)
+		job.db_index = step_start_msg->db_index;
 	job.job_id = step_start_msg->job_id;
 	step.name = step_start_msg->name;
 	job.nodes = step_start_msg->nodes;
@@ -3583,6 +3542,14 @@ static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 
 	/* just incase this gets set we need to clear it */
 	xfree(job.wckey);
+
+	if (!slurmdbd_conn->ctld_port) {
+		info("DBD_STEP_START: cluster not registered");
+		slurmdbd_conn->ctld_port =
+			clusteracct_storage_g_register_disconn_ctld(
+				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
+	}
+
 end_it:
 	slurmdbd_free_step_start_msg(step_start_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
diff --git a/src/slurmdbd/proc_req.h b/src/slurmdbd/proc_req.h
index 582995fea..1800dd7ce 100644
--- a/src/slurmdbd/proc_req.h
+++ b/src/slurmdbd/proc_req.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,8 @@
 
 typedef struct {
 	char *cluster_name;
+	uint32_t cluster_cpus;
+	uint16_t ctld_port; /* slurmctld_port */
 	void *db_conn; /* database connection */
 	char ip[32];
 	slurm_fd_t newsockfd; /* socket connection descriptor */
diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c
index d4fb105ad..388b4a65c 100644
--- a/src/slurmdbd/read_config.c
+++ b/src/slurmdbd/read_config.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,7 +44,8 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
+
+#include "slurm/slurm_errno.h"
 
 #include "src/common/log.h"
 #include "src/common/list.h"
@@ -112,6 +113,7 @@ static void _clear_slurmdbd_conf(void)
 		xfree(slurmdbd_conf->storage_type);
 		xfree(slurmdbd_conf->storage_user);
 		slurmdbd_conf->track_wckey = 0;
+		slurmdbd_conf->track_ctld = 0;
 	}
 }
 
@@ -162,6 +164,7 @@ extern int read_slurmdbd_conf(void)
 		{"StorageType", S_P_STRING},
 		{"StorageUser", S_P_STRING},
 		{"TrackWCKey", S_P_BOOLEAN},
+		{"TrackSlurmctldDown", S_P_BOOLEAN},
 		{NULL} };
 	s_p_hashtbl_t *tbl = NULL;
 	char *conf_path = NULL;
@@ -186,7 +189,8 @@ extern int read_slurmdbd_conf(void)
 		debug("Reading slurmdbd.conf file %s", conf_path);
 
 		tbl = s_p_hashtbl_create(options);
-		if (s_p_parse_file(tbl, NULL, conf_path) == SLURM_ERROR) {
+		if (s_p_parse_file(tbl, NULL, conf_path, false)
+		    == SLURM_ERROR) {
 			fatal("Could not open/read/parse slurmdbd.conf file %s",
 			      conf_path);
 		}
@@ -358,6 +362,10 @@ extern int read_slurmdbd_conf(void)
 				     "TrackWCKey", tbl))
 			slurmdbd_conf->track_wckey = false;
 
+		if (!s_p_get_boolean((bool *)&slurmdbd_conf->track_ctld,
+				     "TrackSlurmctldDown", tbl))
+			slurmdbd_conf->track_ctld = false;
+
 		if (a_events)
 			slurmdbd_conf->purge_event |= SLURMDB_PURGE_ARCHIVE;
 		if (a_jobs)
@@ -536,6 +544,7 @@ extern void log_config(void)
 	debug2("StorageUser       = %s", slurmdbd_conf->storage_user);
 
 	debug2("TrackWCKey        = %u", slurmdbd_conf->track_wckey);
+	debug2("TrackSlurmctldDown= %u", slurmdbd_conf->track_ctld);
 }
 
 /* Return the DbdPort value */
@@ -805,5 +814,11 @@ extern List dump_config(void)
 	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->track_wckey);
 	list_append(my_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("TrackSlurmctldDown");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->track_ctld);
+	list_append(my_list, key_pair);
+
 	return my_list;
 }
diff --git a/src/slurmdbd/read_config.h b/src/slurmdbd/read_config.h
index 550dbf4e8..f8567460e 100644
--- a/src/slurmdbd/read_config.h
+++ b/src/slurmdbd/read_config.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -105,6 +105,8 @@ typedef struct slurm_dbd_conf {
 	char *		storage_type;	/* DB to be used for storage	*/
 	char *		storage_user;	/* user authorized to write DB	*/
 	uint16_t        track_wckey;    /* Whether or not to track wckey*/
+	uint16_t        track_ctld;     /* Whether or not track when a
+					 * slurmctld goes down or not   */
 } slurm_dbd_conf_t;
 
 extern pthread_mutex_t conf_mutex;
diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c
index 14ce1e428..1e538f7a2 100644
--- a/src/slurmdbd/rpc_mgr.c
+++ b/src/slurmdbd/rpc_mgr.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -248,11 +248,23 @@ static void * _service_connection(void *arg)
 		xfree(msg);
 	}
 
+	if (conn->ctld_port && !shutdown_time) {
+		slurmdb_cluster_rec_t cluster_rec;
+		memset(&cluster_rec, 0, sizeof(slurmdb_cluster_rec_t));
+		cluster_rec.name = conn->cluster_name;
+		cluster_rec.control_host = conn->ip;
+		cluster_rec.control_port = conn->ctld_port;
+		cluster_rec.cpu_count = conn->cluster_cpus;
+		debug("cluster %s has disconnected", conn->cluster_name);
+		clusteracct_storage_g_fini_ctld(conn->db_conn, &cluster_rec);
+	}
+
 	acct_storage_g_close_connection(&conn->db_conn);
 	if (slurm_close_accepted_conn(conn->newsockfd) < 0)
 		error("close(%d): %m(%s)",  conn->newsockfd, conn->ip);
 	else
 		debug2("Closed connection %d uid(%d)", conn->newsockfd, uid);
+
 	xfree(conn->cluster_name);
 	xfree(conn);
 	_free_server_thread(pthread_self());
diff --git a/src/slurmdbd/rpc_mgr.h b/src/slurmdbd/rpc_mgr.h
index 6be8ff6f8..308d55390 100644
--- a/src/slurmdbd/rpc_mgr.h
+++ b/src/slurmdbd/rpc_mgr.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c
index 12f663002..dc38fef7c 100644
--- a/src/slurmdbd/slurmdbd.c
+++ b/src/slurmdbd/slurmdbd.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -93,8 +93,10 @@ static void  _init_config(void);
 static void  _init_pidfile(void);
 static void  _kill_old_slurmdbd(void);
 static void  _parse_commandline(int argc, char *argv[]);
+static void  _request_registrations(void *db_conn);
 static void  _rollup_handler_cancel();
 static void *_rollup_handler(void *no_data);
+static int _send_slurmctld_register_req(slurmdb_cluster_rec_t *cluster_rec);
 static void *_signal_handler(void *no_data);
 static void  _update_logging(void);
 static void  _update_nice(void);
@@ -148,8 +150,6 @@ int main(int argc, char *argv[])
 	if (xsignal_block(dbd_sigarray) < 0)
 		error("Unable to block signals");
 
-	db_conn = acct_storage_g_get_connection(NULL, 0, false, NULL);
-
 	/* Create attached thread for signal handling */
 	slurm_attr_init(&thread_attr);
 	if (pthread_create(&signal_handler_thread, &thread_attr,
@@ -165,7 +165,8 @@ int main(int argc, char *argv[])
 	if (slurmdbd_conf->track_wckey)
 		assoc_init_arg.cache_level |= ASSOC_MGR_CACHE_WCKEY;
 
-	if (assoc_mgr_init(db_conn, &assoc_init_arg) == SLURM_ERROR) {
+	db_conn = acct_storage_g_get_connection(NULL, 0, false, NULL);
+	if (assoc_mgr_init(db_conn, &assoc_init_arg, errno) == SLURM_ERROR) {
 		error("Problem getting cache of data");
 		acct_storage_g_close_connection(&db_conn);
 		goto end_it;
@@ -224,6 +225,8 @@ int main(int argc, char *argv[])
 				run_backup();
 		}
 
+		_request_registrations(db_conn);
+
 		/* this is only ran if not backup */
 		if (rollup_handler_thread)
 			pthread_join(rollup_handler_thread, NULL);
@@ -457,6 +460,28 @@ static void _daemonize(void)
 	}
 }
 
+static void _request_registrations(void *db_conn)
+{
+	List cluster_list = acct_storage_g_get_clusters(
+		db_conn, getuid(), NULL);
+	ListIterator itr;
+	slurmdb_cluster_rec_t *cluster_rec = NULL;
+
+	if (!cluster_list)
+		return;
+	itr = list_iterator_create(cluster_list);
+	while ((cluster_rec = list_next(itr))) {
+		if (!cluster_rec->control_port
+		    || (cluster_rec->rpc_version < 9))
+			continue;
+		if (_send_slurmctld_register_req(cluster_rec) != SLURM_SUCCESS)
+			/* mark this cluster as unresponsive */
+			clusteracct_storage_g_fini_ctld(db_conn, cluster_rec);
+	}
+	list_iterator_destroy(itr);
+	list_destroy(cluster_list);
+}
+
 static void _rollup_handler_cancel()
 {
 	if (running_rollup)
@@ -523,6 +548,39 @@ static void *_rollup_handler(void *db_conn)
 	return NULL;
 }
 
+/*
+ * send_slurmctld_register_req - request register from slurmctld
+ * IN host: control host of cluster
+ * IN port: control port of cluster
+ * IN rpc_version: rpc version of cluster
+ * RET:  error code
+ */
+static int _send_slurmctld_register_req(slurmdb_cluster_rec_t *cluster_rec)
+{
+	slurm_addr_t ctld_address;
+	slurm_fd_t fd;
+	int rc = SLURM_SUCCESS;
+
+	slurm_set_addr_char(&ctld_address, cluster_rec->control_port,
+			    cluster_rec->control_host);
+	fd = slurm_open_msg_conn(&ctld_address);
+	if (fd < 0) {
+		rc = SLURM_ERROR;
+	} else {
+		slurm_msg_t out_msg;
+		slurm_msg_t_init(&out_msg);
+		out_msg.msg_type = ACCOUNTING_REGISTER_CTLD;
+		out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
+		slurm_send_node_msg(fd, &out_msg);
+		/* We probably need to add matching recv_msg function
+		 * for an arbitray fd or should these be fire
+		 * and forget?  For this, that we can probably
+		 * forget about it */
+		slurm_close_stream(fd);
+	}
+	return rc;
+}
+
 /* _signal_handler - Process daemon-wide signals */
 static void *_signal_handler(void *no_data)
 {
diff --git a/src/slurmdbd/slurmdbd.h b/src/slurmdbd/slurmdbd.h
index c1138dc30..ad4a3d79c 100644
--- a/src/slurmdbd/slurmdbd.h
+++ b/src/slurmdbd/slurmdbd.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/smap/Makefile.am b/src/smap/Makefile.am
index 18e3f7a41..629963b7a 100644
--- a/src/smap/Makefile.am
+++ b/src/smap/Makefile.am
@@ -13,15 +13,21 @@ if HAVE_SOME_CURSES
 
 bin_PROGRAMS = smap
 
-smap_LDADD = \
-	$(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
-	$(top_builddir)/src/api/libslurm.o -ldl
-
 noinst_HEADERS = smap.h
 smap_SOURCES = smap.c \
 	job_functions.c partition_functions.c \
-	configure_functions.c grid_functions.c \
-	reservation_functions.c opts.c
+	grid_functions.c reservation_functions.c opts.c
+
+smap_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
+
+if BLUEGENE_LOADED
+bg_dir = $(top_builddir)/src/plugins/select/bluegene
+
+smap_LDADD += $(bg_dir)/libconfigure_api.la
+
+smap_SOURCES += configure_functions.c
+
+endif
 
 force:
 $(smap_LDADD) : force
diff --git a/src/smap/Makefile.in b/src/smap/Makefile.in
index 3aed1b7d7..36ab94cd1 100644
--- a/src/smap/Makefile.in
+++ b/src/smap/Makefile.in
@@ -43,6 +43,8 @@ build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
 @HAVE_SOME_CURSES_TRUE@bin_PROGRAMS = smap$(EXEEXT)
+@BLUEGENE_LOADED_TRUE@@HAVE_SOME_CURSES_TRUE@am__append_1 = $(bg_dir)/libconfigure_api.la
+@BLUEGENE_LOADED_TRUE@@HAVE_SOME_CURSES_TRUE@am__append_2 = configure_functions.c
 subdir = src/smap
 DIST_COMMON = $(am__noinst_HEADERS_DIST) $(srcdir)/Makefile.am \
 	$(srcdir)/Makefile.in
@@ -70,6 +72,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -80,6 +83,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,21 +95,22 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 PROGRAMS = $(bin_PROGRAMS)
 am__smap_SOURCES_DIST = smap.c job_functions.c partition_functions.c \
-	configure_functions.c grid_functions.c reservation_functions.c \
-	opts.c
+	grid_functions.c reservation_functions.c opts.c \
+	configure_functions.c
+@BLUEGENE_LOADED_TRUE@@HAVE_SOME_CURSES_TRUE@am__objects_1 = configure_functions.$(OBJEXT)
 @HAVE_SOME_CURSES_TRUE@am_smap_OBJECTS = smap.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	job_functions.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	partition_functions.$(OBJEXT) \
-@HAVE_SOME_CURSES_TRUE@	configure_functions.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	grid_functions.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	reservation_functions.$(OBJEXT) \
-@HAVE_SOME_CURSES_TRUE@	opts.$(OBJEXT)
+@HAVE_SOME_CURSES_TRUE@	opts.$(OBJEXT) $(am__objects_1)
 am__EXTRA_smap_SOURCES_DIST = smap.h smap.c job_functions.c \
 	partition_functions.c configure_functions.c grid_functions.c \
 	reservation_functions.c opts.c
 smap_OBJECTS = $(am_smap_OBJECTS)
-@HAVE_SOME_CURSES_TRUE@smap_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
-@HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/api/libslurm.o
+@HAVE_SOME_CURSES_TRUE@smap_DEPENDENCIES =  \
+@HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/api/libslurm.o \
+@HAVE_SOME_CURSES_TRUE@	$(am__append_1)
 smap_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(smap_LDFLAGS) \
 	$(LDFLAGS) -o $@
@@ -139,7 +144,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -176,6 +184,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -233,6 +242,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -268,6 +278,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -322,16 +333,15 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
-@HAVE_SOME_CURSES_TRUE@smap_LDADD = \
-@HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
-@HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/api/libslurm.o -ldl
-
 @HAVE_SOME_CURSES_TRUE@noinst_HEADERS = smap.h
-@HAVE_SOME_CURSES_TRUE@smap_SOURCES = smap.c \
-@HAVE_SOME_CURSES_TRUE@	job_functions.c partition_functions.c \
-@HAVE_SOME_CURSES_TRUE@	configure_functions.c grid_functions.c \
-@HAVE_SOME_CURSES_TRUE@	reservation_functions.c opts.c
-
+@HAVE_SOME_CURSES_TRUE@smap_SOURCES = smap.c job_functions.c \
+@HAVE_SOME_CURSES_TRUE@	partition_functions.c grid_functions.c \
+@HAVE_SOME_CURSES_TRUE@	reservation_functions.c opts.c \
+@HAVE_SOME_CURSES_TRUE@	$(am__append_2)
+@HAVE_SOME_CURSES_TRUE@smap_LDADD =  \
+@HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/api/libslurm.o -ldl \
+@HAVE_SOME_CURSES_TRUE@	$(am__append_1)
+@BLUEGENE_LOADED_TRUE@@HAVE_SOME_CURSES_TRUE@bg_dir = $(top_builddir)/src/plugins/select/bluegene
 @HAVE_SOME_CURSES_TRUE@smap_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 @HAVE_SOME_CURSES_FALSE@EXTRA_smap_SOURCES = smap.h smap.c \
 @HAVE_SOME_CURSES_FALSE@	job_functions.c partition_functions.c \
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 3f65bf4b2..9f4cc3ea0 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -2,14 +2,15 @@
  *  configure_functions.c - Functions related to configure mode of smap.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Copyright (C) 2011 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
+ *  Written by Danny Auble <da@schedmd.com>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -38,115 +39,224 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "src/common/xstring.h"
-#include "src/common/uid.h"
 #include "src/smap/smap.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
+#include "src/common/proc_args.h"
+
+///////////////////////////////////////////////////////////////////////
 
 typedef struct {
 	int color;
+	int color_count;
 	char letter;
 	List nodes;
-	ba_request_t *request;
+	select_ba_request_t *request;
 } allocated_block_t;
 
-static void	_delete_allocated_blocks(List allocated_blocks);
-static allocated_block_t *_make_request(ba_request_t *request);
-static int      _set_layout(char *com);
-static int      _set_base_part_cnt(char *com);
-static int      _set_nodecard_cnt(char *com);
-static int	_create_allocation(char *com, List allocated_blocks);
-static int	_resolve(char *com);
+static int	_add_bg_record(select_ba_request_t *blockreq,
+			       List allocated_blocks);
 static int	_change_state_all_bps(char *com, int state);
 static int	_change_state_bps(char *com, int state);
-static int	_remove_allocation(char *com, List allocated_blocks);
-static int	_alter_allocation(char *com, List allocated_blocks);
 static int	_copy_allocation(char *com, List allocated_blocks);
-static int	_save_allocation(char *com, List allocated_blocks);
-static int	_add_bg_record(blockreq_t *blockreq, List allocated_blocks);
+static int	_create_allocation(char *com, List allocated_blocks);
 static int	_load_configuration(char *com, List allocated_blocks);
+static allocated_block_t *_make_request(select_ba_request_t *request);
 static void	_print_header_command(void);
 static void	_print_text_command(allocated_block_t *allocated_block);
+static int	_remove_allocation(char *com, List allocated_blocks);
+static int	_resolve(char *com);
+static int	_save_allocation(char *com, List allocated_blocks);
+static int      _set_layout(char *com);
+static int      _set_base_part_cnt(char *com);
+static int      _set_nodecard_cnt(char *com);
 
+int color_count = 0;
 char error_string[255];
 int base_part_node_cnt = 512;
 int nodecard_node_cnt = 32;
 char *layout_mode = "STATIC";
 
-static void _delete_allocated_blocks(List allocated_blocks)
+static void _set_nodes(List nodes, int color, char letter)
 {
-	allocated_block_t *allocated_block = NULL;
+	ListIterator itr;
+	smap_node_t *smap_node;
+	ba_mp_t *ba_mp;
+
+	if (!nodes || !smap_system_ptr)
+		return;
 
-	while ((allocated_block = list_pop(allocated_blocks)) != NULL) {
-		remove_block(allocated_block->nodes, 0,
-			     allocated_block->request->conn_type);
-		list_destroy(allocated_block->nodes);
-		delete_ba_request(allocated_block->request);
+	itr = list_iterator_create(nodes);
+	while ((ba_mp = list_next(itr))) {
+		if (!ba_mp->used)
+			continue;
+		smap_node = smap_system_ptr->grid[ba_mp->index];
+		smap_node->color = color;
+		smap_node->letter = letter;
+	}
+	list_iterator_destroy(itr);
+	return;
+}
+
+static void _destroy_allocated_block(void *object)
+{
+	allocated_block_t *allocated_block = (allocated_block_t *)object;
+
+	if (allocated_block) {
+		bool is_small = (allocated_block->request->conn_type[0] >=
+				 SELECT_SMALL);
+		if (allocated_block->nodes) {
+			_set_nodes(allocated_block->nodes, 0, '.');
+			bg_configure_remove_block(
+				allocated_block->nodes, is_small);
+			list_destroy(allocated_block->nodes);
+		}
+		destroy_select_ba_request(allocated_block->request);
 		xfree(allocated_block);
 	}
-	list_destroy(allocated_blocks);
 }
 
-static allocated_block_t *_make_request(ba_request_t *request)
+static allocated_block_t *_make_request(select_ba_request_t *request)
 {
 	List results = list_create(NULL);
-	ListIterator results_i;
 	allocated_block_t *allocated_block = NULL;
-	ba_node_t *current = NULL;
 
-	if (!allocate_block(request, results)){
-		memset(error_string,0,255);
-		sprintf(error_string,"allocate failure for %dx%dx%d",
-			  request->geometry[0], request->geometry[1],
-			  request->geometry[2]);
-		return NULL;
-	} else {
-		char *pass = ba_passthroughs_string(request->deny_pass);
-		if(pass) {
+#ifdef HAVE_BGQ
+	results = list_create(bg_configure_destroy_ba_mp);
+#else
+	results = list_create(NULL);
+#endif
+
+	if (bg_configure_allocate_block(request, results)) {
+		char *pass = bg_configure_ba_passthroughs_string(
+			request->deny_pass);
+		if (pass) {
 			sprintf(error_string,"THERE ARE PASSTHROUGHS IN "
 				"THIS ALLOCATION DIM %s!!!!!!!", pass);
 			xfree(pass);
 		}
 
-		allocated_block = (allocated_block_t *)xmalloc(
-			sizeof(allocated_block_t));
+		allocated_block = xmalloc(sizeof(allocated_block_t));
 		allocated_block->request = request;
-		allocated_block->nodes = list_create(NULL);
-		results_i = list_iterator_create(results);
-		while ((current = list_next(results_i)) != NULL) {
-			list_append(allocated_block->nodes,current);
-			allocated_block->color = current->color;
-			allocated_block->letter = current->letter;
+		allocated_block->nodes = results;
+		allocated_block->letter = letters[color_count%62];
+		allocated_block->color  = colors[color_count%6];
+		allocated_block->color_count = color_count++;
+		_set_nodes(allocated_block->nodes,
+			   allocated_block->color,
+			   allocated_block->letter);
+		results = NULL;
+	}
+
+	if (results)
+		list_destroy(results);
+	return allocated_block;
+
+}
+
+static int _full_request(select_ba_request_t *request,
+			 bitstr_t *usable_mp_bitmap,
+			 List allocated_blocks)
+{
+	char *tmp_char = NULL, *tmp_char2 = NULL;
+	allocated_block_t *allocated_block;
+	int rc = 1;
+
+	if (!strcasecmp(layout_mode,"OVERLAP"))
+		bg_configure_reset_ba_system(true);
+
+	if (usable_mp_bitmap)
+		bg_configure_ba_set_removable_mps(usable_mp_bitmap, 1);
+
+	/*
+	 * Here is where we do the allocating of the partition.
+	 * It will send a request back which we will throw into
+	 * a list just incase we change something later.
+	 */
+	if (!bg_configure_new_ba_request(request)) {
+		memset(error_string, 0, 255);
+		if (request->size != -1) {
+			sprintf(error_string,
+				"Problems with request for %d\n"
+				"Either you put in something "
+				"that doesn't work,\n"
+				"or we are unable to process "
+				"your request.",
+				request->size);
+			rc = 0;
+		} else {
+			tmp_char = bg_configure_give_geo(request->geometry,
+							 params.cluster_dims,
+							 1);
+			sprintf(error_string,
+				"Problems with request of size %s\n"
+				"Either you put in something "
+				"that doesn't work,\n"
+				"or we are unable to process "
+				"your request.",
+				tmp_char);
+			xfree(tmp_char);
+			rc = 0;
+		}
+	} else {
+		if ((allocated_block = _make_request(request)) != NULL)
+			list_append(allocated_blocks, allocated_block);
+		else {
+			if (request->geometry[0] != (uint16_t)NO_VAL)
+				tmp_char = bg_configure_give_geo(
+					request->geometry,
+					params.cluster_dims, 1);
+			tmp_char2 = bg_configure_give_geo(request->start,
+							  params.cluster_dims,
+							  1);
+
+			memset(error_string, 0, 255);
+			sprintf(error_string,
+				"allocate failure\nSize requested "
+				"was %d MidPlanes\n",
+				request->size);
+			if (tmp_char) {
+				sprintf(error_string + strlen(error_string),
+					"Geo requested was %s\n", tmp_char);
+				xfree(tmp_char);
+			} else {
+				sprintf(error_string + strlen(error_string),
+					"No geometry could be laid out "
+					"for that size\n");
+			}
+			sprintf(error_string + strlen(error_string),
+				"Start position was %s", tmp_char2);
+			xfree(tmp_char2);
+			rc = 0;
 		}
-		list_iterator_destroy(results_i);
 	}
-	list_destroy(results);
-	return(allocated_block);
 
+	if (usable_mp_bitmap)
+		bg_configure_ba_reset_all_removed_mps();
+
+	return rc;
 }
 
 static int _set_layout(char *com)
 {
-	int i=0;
-	int len = strlen(com);
+	int i;
 
-	while(i<len) {
-		if(!strncasecmp(com+i, "dynamic", 7)) {
+	for (i = 0; com[i]; i++) {
+		if (!strncasecmp(com+i, "dynamic", 7)) {
 			layout_mode = "DYNAMIC";
 			break;
-		} else if(!strncasecmp(com+i, "static", 6)) {
+		} else if (!strncasecmp(com+i, "static", 6)) {
 			layout_mode = "STATIC";
 			break;
-		} else if(!strncasecmp(com+i, "overlap", 7)) {
+		} else if (!strncasecmp(com+i, "overlap", 7)) {
 			layout_mode = "OVERLAP";
 			break;
-		} else {
-			i++;
 		}
 	}
-	if(i>=len) {
+	if (com[i] == '\0') {
 		sprintf(error_string,
 			"You didn't put in a mode that I recognized. \n"
-			"Please use (STATIC, OVERLAP, or DYNAMIC)\n");
+			"Please use STATIC, OVERLAP, or DYNAMIC\n");
 		return 0;
 	}
 	sprintf(error_string,
@@ -156,22 +266,20 @@ static int _set_layout(char *com)
 
 static int _set_base_part_cnt(char *com)
 {
-	int i=0;
-	int len = strlen(com);
+	int i;
 
-	while(i<len) {
-		if(com[i] < 58 && com[i] > 47) {
+	for (i = 0; com[i]; i++) {
+		if ((com[i] >= '0') && (com[i] <= '9'))
 			break;
-		} else {
-			i++;
-		}
 	}
-	if(i>=len) {
+	if (com[i] == '\0') {
 		sprintf(error_string,
 			"I didn't notice the number you typed in\n");
 		return 0;
 	}
+
 	base_part_node_cnt = atoi(&com[i]);
+	memset(error_string, 0, 255);
 	sprintf(error_string,
 		"BasePartitionNodeCnt set to %d\n", base_part_node_cnt);
 
@@ -180,124 +288,151 @@ static int _set_base_part_cnt(char *com)
 
 static int _set_nodecard_cnt(char *com)
 {
-	int i=0;
-	int len = strlen(com);
+	int i;
 
-	while(i<len) {
-		if(com[i] < 58 && com[i] > 47) {
+	for (i = 0; com[i]; i++) {
+		if ((com[i] >= '0') && (com[i] <= '9'))
 			break;
-		} else {
-			i++;
-		}
 	}
-	if(i>=len) {
+	if (com[i] == '\0') {
 		sprintf(error_string,
 			"I didn't notice the number you typed in\n");
 		return 0;
 	}
+
 	nodecard_node_cnt = atoi(&com[i]);
+	memset(error_string, 0, 255);
 	sprintf(error_string,
 		"NodeCardNodeCnt set to %d\n", nodecard_node_cnt);
 
 	return 1;
 }
 
+static int _xlate_coord(char *str, int len)
+{
+	if (len > 1)
+		return xstrntol(str, NULL, len, 10);
+	else
+		return xstrntol(str, NULL, len, params.cluster_base);
+}
+
 static int _create_allocation(char *com, List allocated_blocks)
 {
-	int i=6, geoi=-1, starti=-1, i2=0, small32=-1, small128=-1;
+	int i=6, j, geoi=-1, starti=-1, i2=0, small32=-1, small128=-1;
 	int len = strlen(com);
-	allocated_block_t *allocated_block = NULL;
-	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t));
+	select_ba_request_t *request;
+	char fini_char;
 	int diff=0;
 #ifndef HAVE_BGL
-	int small16=-1, small64=-1, small256=-1;
+#ifdef HAVE_BGP
+	int small16=-1;
 #endif
-	request->geometry[0] = (uint16_t)NO_VAL;
-	request->conn_type=SELECT_TORUS;
+	int small64=-1, small256=-1;
+#endif
+	request = (select_ba_request_t*) xmalloc(sizeof(select_ba_request_t));
 	request->rotate = false;
 	request->elongate = false;
-	request->start_req=0;
+	request->start_req = 0;
 	request->size = 0;
 	request->small32 = 0;
 	request->small128 = 0;
 	request->deny_pass = 0;
-	request->avail_node_bitmap = NULL;
-
-	while(i<len) {
-		if(!strncasecmp(com+i, "mesh", 4)) {
-			request->conn_type=SELECT_MESH;
-			i+=4;
-		} else if(!strncasecmp(com+i, "small", 5)) {
-			request->conn_type = SELECT_SMALL;
-			i+=5;
-		} else if(!strncasecmp(com+i, "deny", 4)) {
-			i+=4;
-			if(strstr(com+i, "X"))
+	request->avail_mp_bitmap = NULL;
+	for (j = 0; j < params.cluster_dims; j++) {
+		request->geometry[j]  = (uint16_t) NO_VAL;
+		request->conn_type[j] = SELECT_TORUS;
+	}
+
+	while (i < len) {
+		if (!strncasecmp(com+i, "mesh", 4)
+		    || !strncasecmp(com+i, "small", 5)
+		    || !strncasecmp(com+i, "torus", 5)) {
+			char conn_type[200];
+			j = i;
+			while (j < len) {
+				if (com[j] == ' ')
+					break;
+				conn_type[j-i] = com[j];
+				j++;
+				if (j >= 200)
+					break;
+			}
+			conn_type[(j-i)+1] = '\0';
+			verify_conn_type(conn_type, request->conn_type);
+			i += j;
+		} else if (!strncasecmp(com+i, "deny", 4)) {
+			i += 4;
+			if (strstr(com+i, "A"))
+				request->deny_pass |= PASS_DENY_A;
+			if (strstr(com+i, "X"))
 				request->deny_pass |= PASS_DENY_X;
-			if(strstr(com+i, "Y"))
+			if (strstr(com+i, "Y"))
 				request->deny_pass |= PASS_DENY_Y;
-			if(strstr(com+i, "Z"))
+			if (strstr(com+i, "Z"))
 				request->deny_pass |= PASS_DENY_Z;
-			if(!strcasecmp(com+i, "ALL"))
+			if (!strcasecmp(com+i, "ALL"))
 				request->deny_pass |= PASS_DENY_ALL;
-		} else if(!strncasecmp(com+i, "nodecard", 8)) {
-			small32=0;
-			i+=8;
-		} else if(!strncasecmp(com+i, "quarter", 7)) {
-			small128=0;
-			i+=7;
-		} else if(!strncasecmp(com+i, "32CN", 4)) {
-			small32=0;
-			i+=4;
-		} else if(!strncasecmp(com+i, "128CN", 5)) {
-			small128=0;
-			i+=5;
-		} else if(!strncasecmp(com+i, "rotate", 6)) {
-			request->rotate=true;
-			i+=6;
-		} else if(!strncasecmp(com+i, "elongate", 8)) {
-			request->elongate=true;
-			i+=8;
-		} else if(!strncasecmp(com+i, "start", 5)) {
-			request->start_req=1;
-			i+=5;
-		} else if(request->start_req
-			  && starti<0
-			  && ((com[i] >= '0' && com[i] <= '9')
-			      || (com[i] >= 'A' && com[i] <= 'Z'))) {
-			starti=i;
+		} else if (!strncasecmp(com+i, "nodecard", 8)) {
+			small32 = 0;
+			i += 8;
+		} else if (!strncasecmp(com+i, "quarter", 7)) {
+			small128 = 0;
+			i += 7;
+		} else if (!strncasecmp(com+i, "32CN", 4)) {
+			small32 = 0;
+			i += 4;
+		} else if (!strncasecmp(com+i, "128CN", 5)) {
+			small128 = 0;
+			i += 5;
+		} else if (!strncasecmp(com+i, "rotate", 6)) {
+			request->rotate = true;
+			i += 6;
+		} else if (!strncasecmp(com+i, "elongate", 8)) {
+			request->elongate = true;
+			i += 8;
+		} else if (!strncasecmp(com+i, "start", 5)) {
+			request->start_req = 1;
+			i += 5;
+		} else if (request->start_req && (starti < 0) &&
+			   (((com[i] >= '0') && (com[i] <= '9')) ||
+			    ((com[i] >= 'A') && (com[i] <= 'Z')))) {
+			starti = i;
 			i++;
-		} else if(small32 == 0 && (com[i] >= '0' && com[i] <= '9')) {
-			small32=i;
+		} else if (small32 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small32 = i;
 			i++;
-		} else if(small128 == 0 && (com[i] >= '0' && com[i] <= '9')) {
-			small128=i;
+		} else if (small128 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small128 = i;
 			i++;
 		}
-#ifndef HAVE_BGL
-		else if(!strncasecmp(com+i, "16CN", 4)) {
-			small16=0;
-			i+=4;
-		} else if(!strncasecmp(com+i, "64CN", 4)) {
-			small64=0;
-			i+=4;
-		} else if(!strncasecmp(com+i, "256CN", 5)) {
-			small256=0;
-			i+=5;
-		} else if(small16 == 0 && (com[i] >= '0' && com[i] <= '9')) {
-			small16=i;
+#ifdef HAVE_BGP
+		else if (!strncasecmp(com+i, "16CN", 4)) {
+			small16 = 0;
+			i += 4;
+		} else if (small16 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small16 = i;
 			i++;
-		} else if(small64 == 0 && (com[i] >= '0' && com[i] <= '9')) {
-			small64=i;
+		}
+#endif
+#ifndef HAVE_BGL
+		else if (!strncasecmp(com+i, "64CN", 4)) {
+			small64 = 0;
+			i += 4;
+		} else if (!strncasecmp(com+i, "256CN", 5)) {
+			small256 = 0;
+			i += 5;
+		} else if (small64 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small64 = i;
 			i++;
-		} else if(small256 == 0 && (com[i] >= '0' && com[i] <= '9')) {
-			small256=i;
+		} else if (small256 == 0 && (com[i] >= '0' && com[i] <= '9')) {
+			small256 = i;
 			i++;
 		}
 #endif
-		else if(geoi<0 && ((com[i] >= '0' && com[i] <= '9')
-				     || (com[i] >= 'A' && com[i] <= 'Z'))) {
-			geoi=i;
+		else if ((geoi < 0) &&
+			 (((com[i] >= '0') && (com[i] <= '9')) ||
+			  ((com[i] >= 'A') && (com[i] <= 'Z')))) {
+			geoi = i;
 			i++;
 		} else {
 			i++;
@@ -305,35 +440,36 @@ static int _create_allocation(char *com, List allocated_blocks)
 
 	}
 
-	if(request->conn_type == SELECT_SMALL) {
+	if (request->conn_type[0] >= SELECT_SMALL) {
 		int total = 512;
-#ifndef HAVE_BGL
-		if(small16 > 0) {
+#ifdef HAVE_BGP
+		if (small16 > 0) {
 			request->small16 = atoi(&com[small16]);
 			total -= request->small16 * 16;
 		}
-
-		if(small64 > 0) {
+#endif
+#ifndef HAVE_BGL
+		if (small64 > 0) {
 			request->small64 = atoi(&com[small64]);
 			total -= request->small64 * 64;
 		}
 
-		if(small256 > 0) {
+		if (small256 > 0) {
 			request->small256 = atoi(&com[small256]);
 			total -= request->small256 * 256;
 		}
 #endif
 
-		if(small32 > 0) {
+		if (small32 > 0) {
 			request->small32 = atoi(&com[small32]);
 			total -= request->small32 * 32;
 		}
 
-		if(small128 > 0) {
+		if (small128 > 0) {
 			request->small128 = atoi(&com[small128]);
 			total -= request->small128 * 128;
 		}
-		if(total < 0) {
+		if (total < 0) {
 			sprintf(error_string,
 				"You asked for %d more nodes than "
 				"are in a Midplane\n", total * 2);
@@ -342,31 +478,35 @@ static int _create_allocation(char *com, List allocated_blocks)
 		}
 
 #ifndef HAVE_BGL
-		while(total > 0) {
-			if(total >= 256) {
+		while (total > 0) {
+			if (total >= 256) {
 				request->small256++;
 				total -= 256;
-			} else if(total >= 128) {
+			} else if (total >= 128) {
 				request->small128++;
 				total -= 128;
-			} else if(total >= 64) {
+			} else if (total >= 64) {
 				request->small64++;
 				total -= 64;
-			} else if(total >= 32) {
+			} else if (total >= 32) {
 				request->small32++;
 				total -= 32;
-			} else if(total >= 16) {
+			}
+#ifdef HAVE_BGP
+			else if (total >= 16) {
 				request->small16++;
 				total -= 16;
-			} else
+			}
+#endif
+			else
 				break;
 		}
 #else
-		while(total > 0) {
-			if(total >= 128) {
+		while (total > 0) {
+			if (total >= 128) {
 				request->small128++;
 				total -= 128;
-			} else if(total >= 32) {
+			} else if (total >= 32) {
 				request->small32++;
 				total -= 32;
 			} else
@@ -381,188 +521,83 @@ static int _create_allocation(char *com, List allocated_blocks)
 /* 			request->small256); */
 	}
 
-	if(geoi<0 && !request->size) {
-		memset(error_string,0,255);
+	if ((geoi < 0) && !request->size) {
+		memset(error_string, 0, 255);
 		sprintf(error_string,
 			"No size or dimension specified, please re-enter");
 	} else {
-		i2=geoi;
-		while(i2<len) {
-			if(request->size)
+		i2 = geoi;
+		while (i2 < len) {
+			if (request->size)
 				break;
-			if(com[i2]==' ' || i2==(len-1)) {
+			if ((com[i2] == ' ') || (i2 == (len-1))) {
+				char *p;
 				/* for size */
-				request->size = atoi(&com[geoi]);
+				request->size = strtol(&com[geoi], &p, 10);
+				if (*p == 'k' || *p == 'K') {
+					request->size *= 2; /* (1024 / 512) */
+					p++;
+				}
 				break;
 			}
-			if(com[i2]=='x') {
-				diff = i2-geoi;
+			if (com[i2]=='x') {
+				request->size = -1;
+				diff = i2 - geoi;
 				/* for geometery */
-				if(diff>1) {
-					request->geometry[X] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 10);
-				} else {
-					request->geometry[X] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 params.cluster_base);
+				request->geometry[0] = _xlate_coord(&com[geoi],
+								    diff);
+				for (j = 1; j < params.cluster_dims; j++) {
+					geoi += diff;
+					diff = geoi;
+					while ((com[geoi-1]!='x') && com[geoi])
+						geoi++;
+					if (com[geoi] == '\0')
+						goto geo_error_message;
+					diff = geoi - diff;
+					request->geometry[j] =
+						_xlate_coord(&com[geoi], diff);
 				}
-				geoi += diff;
-				diff = geoi;
-
-				while(com[geoi-1]!='x' && geoi<len)
-					geoi++;
-				if(geoi==len)
-					goto geo_error_message;
-				diff = geoi - diff;
-				if(diff>1) {
-					request->geometry[Y] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 10);
-				} else {
-					request->geometry[Y] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 params.cluster_base);
-				}
-				geoi += diff;
-				diff = geoi;
-				while(com[geoi-1]!='x' && geoi<len)
-					geoi++;
-				if(geoi==len)
-					goto geo_error_message;
-				diff = geoi - diff;
-
-				if(diff>1) {
-					request->geometry[Z] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 10);
-				} else {
-					request->geometry[Z] =
-						xstrntol(&com[geoi],
-							 NULL, diff,
-							 params.cluster_base);
-				}
-				request->size = -1;
 				break;
 			}
 			i2++;
 		}
 
-		if(request->start_req) {
+		if (request->start_req) {
 			i2 = starti;
-			while(com[i2]!='x' && i2<len)
+			while (com[i2]!='x' && i2<len)
 				i2++;
 			diff = i2-starti;
-			if(diff>1) {
-				request->start[X] = xstrntol(&com[starti],
-							     NULL, diff,
-							     10);
-			} else {
-				request->start[X] = xstrntol(&com[starti],
-							     NULL, diff,
-							     params.
-							     cluster_base);
-			}
-			starti += diff;
-			if(starti==len)
-				goto start_request;
-
-			starti++;
-			i2 = starti;
-			while(com[i2]!='x' && i2<len)
-				i2++;
-			diff = i2-starti;
-
-			if(diff>1) {
-				request->start[Y] = xstrntol(&com[starti],
-							     NULL, diff,
-							     10);
-			} else {
-				request->start[Y] = xstrntol(&com[starti],
-							     NULL, diff,
-							     params.cluster_base);
-			}
-			starti += diff;
-			if(starti==len)
-				goto start_request;
-
-			starti++;
-			i2 = starti;
-			while(com[i2]!=' ' && i2<len)
-				i2++;
-			diff = i2-starti;
-
-			if(diff>1) {
-				request->start[Z] = xstrntol(&com[starti],
-							     NULL, diff,
-							     10);
-			} else {
-				request->start[Z] = xstrntol(&com[starti],
-							     NULL, diff,
-							     params.
-							     cluster_base);
+			request->start[0] = _xlate_coord(&com[starti], diff);
+
+			for (j = 1; j < params.cluster_dims; j++) {
+				starti += diff;
+				if (starti == len)
+					goto start_request;
+				starti++;
+				i2 = starti;
+				if (j == (params.cluster_dims - 1))
+					fini_char = ' ';
+				else
+					fini_char = 'x';
+				while ((com[i2] != fini_char) && com[i2])
+					i2++;
+				diff = i2 - starti;
+				request->start[j] = _xlate_coord(&com[starti],
+								 diff);
 			}
 		}
 	start_request:
-		if(!strcasecmp(layout_mode,"OVERLAP"))
-			reset_ba_system(true);
-
-		/*
-		  Here is where we do the allocating of the partition.
-		  It will send a request back which we will throw into
-		  a list just incase we change something later.
-		*/
-		if(!new_ba_request(request)) {
-			memset(error_string,0,255);
-			if(request->size!=-1) {
-				sprintf(error_string,
-					"Problems with request for %d\n"
-					"Either you put in something "
-					"that doesn't work,\n"
-					"or we are unable to process "
-					"your request.",
-					request->size);
-			} else
-				sprintf(error_string,
-					"Problems with request for %dx%dx%d\n"
-					"Either you put in something "
-					"that doesn't work,\n"
-					"or we are unable to process "
-					"your request.",
-					request->geometry[0],
-					request->geometry[1],
-					request->geometry[2]);
-		} else {
-			if((allocated_block = _make_request(request)) != NULL)
-				list_append(allocated_blocks,
-					    allocated_block);
-			else {
-				i2 = strlen(error_string);
-				sprintf(error_string+i2,
-					"\nGeo requested was %d (%dx%dx%d)\n"
-					"Start position was %dx%dx%d",
-					request->size,
-					request->geometry[0],
-					request->geometry[1],
-					request->geometry[2],
-					request->start[0],
-					request->start[1],
-					request->start[2]);
-			}
-		}
+		if(!_full_request(request, NULL, allocated_blocks))
+			destroy_select_ba_request(request);
+
 	}
 	return 1;
 
 geo_error_message:
-	memset(error_string,0,255);
+	destroy_select_ba_request(request);
+	memset(error_string, 0, 255);
 	sprintf(error_string,
-		"Error in geo dimension "
-		"specified, please re-enter");
+		"Error in geo dimension specified, please re-enter");
 
 	return 0;
 }
@@ -570,104 +605,74 @@ geo_error_message:
 static int _resolve(char *com)
 {
 	int i=0;
-#ifdef HAVE_BG_FILES
-	int len=strlen(com);
-	char *rack_mid = NULL;
-	uint16_t *coord = NULL;
-#endif
+	char *ret_str;
 
-	while(com[i] != '\0') {
-		if((i>0) && (com[i-1] != ' '))
+	while (com[i] != '\0') {
+		if ((i>0) && (com[i-1] != ' '))
 			break;
 		i++;
 	}
-	if(com[i] == 'r')
+	if (com[i] == 'r')
 		com[i] = 'R';
-
-	memset(error_string,0,255);
-#ifdef HAVE_BG_FILES
-	if (!have_db2) {
-		sprintf(error_string, "Must be on BG SN to resolve\n");
-		goto resolve_error;
+	ret_str = resolve_mp(com+i);
+	if (ret_str) {
+		snprintf(error_string, sizeof(error_string), "%s", ret_str);
+		xfree(ret_str);
 	}
 
-	if(len-i<3) {
-		sprintf(error_string, "Must enter 3 coords to resolve.\n");
-		goto resolve_error;
-	}
-	if(com[i] != 'R') {
-		rack_mid = find_bp_rack_mid(com+i);
-
-		if(rack_mid)
-			sprintf(error_string,
-				"X=%c Y=%c Z=%c resolves to %s\n",
-				com[X+i],com[Y+i],com[Z+i], rack_mid);
-		else
-			sprintf(error_string,
-				"X=%c Y=%c Z=%c has no resolve\n",
-				com[X+i],com[Y+i],com[Z+i]);
-
-	} else {
-		coord = find_bp_loc(com+i);
-
-		if(coord)
-			sprintf(error_string,
-				"%s resolves to X=%d Y=%d Z=%d\n",
-				com+i,coord[X],coord[Y],coord[Z]);
-		else
-			sprintf(error_string, "%s has no resolve.\n",
-				com+i);
+	if (params.commandline)
+		printf("%s", error_string);
+	else {
+		wnoutrefresh(text_win);
+		doupdate();
 	}
-resolve_error:
-#else
-			sprintf(error_string,
-				"Must be on BG SN to resolve.\n");
-#endif
-	wnoutrefresh(text_win);
-	doupdate();
 
 	return 1;
 }
+
 static int _change_state_all_bps(char *com, int state)
 {
+	char start_loc[32], end_loc[32];
 	char allnodes[50];
-	memset(allnodes,0,50);
+	int i;
 
-	if(params.cluster_dims == 3)
-		sprintf(allnodes, "000x%c%c%c",
-			alpha_num[DIM_SIZE[X]-1], alpha_num[DIM_SIZE[Y]-1],
-			alpha_num[DIM_SIZE[Z]-1]);
-	else
-		sprintf(allnodes, "0-%d",
-			DIM_SIZE[X]);
+	xassert(params.cluster_dims < 31);
+	for (i = 0; i < params.cluster_dims; i++) {
+		start_loc[i] = '0';
+		end_loc[i]   = alpha_num[dim_size[i] - 1];
+	}
+	start_loc[i] = '\0';
+	end_loc[i]   = '\0';
+
+	memset(allnodes, 0, 50);
+	sprintf(allnodes, "%sx%s", start_loc, end_loc);
 
 	return _change_state_bps(allnodes, state);
 
 }
 static int _change_state_bps(char *com, int state)
 {
-	int i=0, x;
-	int len = strlen(com);
-	int start[params.cluster_dims], end[params.cluster_dims];
-	int number=0, y=0, z=0, j=0;
+	char *host;
+	int i = 0;
+	uint16_t pos[params.cluster_dims];
 	char letter = '.';
-	char opposite = '#';
 	bool used = false;
 	char *c_state = "up";
-	char *p = '\0';
+	hostlist_t hl = NULL;
+	int rc = 1;
 
-	if(state == NODE_STATE_DOWN) {
+	if (state == NODE_STATE_DOWN) {
 		letter = '#';
-		opposite = '.';
 		used = true;
 		c_state = "down";
 	}
-	while(i<len
-	      && (com[i] < '0' || com[i] > 'Z'
-		  || (com[i] > '9' && com[i] < 'A')))
+
+	while (com[i] && (com[i] != '[') &&
+	       ((com[i] < '0') || (com[i] > '9')) &&
+	       ((com[i] < 'A') || (com[i] > 'Z')))
 		i++;
-	if(i>(len-1)) {
-		memset(error_string,0,255);
+	if (com[i] == '\0') {
+		memset(error_string, 0, 255);
 		sprintf(error_string,
 			"You didn't specify any nodes to make %s. "
 			"in statement '%s'",
@@ -675,125 +680,37 @@ static int _change_state_bps(char *com, int state)
 		return 0;
 	}
 
-	if(params.cluster_dims == 1) {
-		if ((com[i+3] == 'x')
-		    || (com[i+3] == '-')) {
-			start[X] =  xstrntol(com + i, NULL,
-					     params.cluster_dims,
-					     params.cluster_base);
-			i += 4;
-			end[X] =  xstrntol(com + i, NULL,
-					   params.cluster_dims,
-					   params.cluster_base);
-		} else {
-			start[X] = end[X] =  xstrntol(com + i, NULL,
-						      params.cluster_dims,
-						      params.cluster_base);
-		}
-
-		if((start[X]>end[X])
-		   || (start[X]<0)
-		   || (end[X]>DIM_SIZE[X]-1))
-			goto error_message;
+	if (!(hl = hostlist_create(com+i))) {
+		memset(error_string, 0, 255);
+		sprintf(error_string, "Bad hostlist given '%s'", com+i);
+		return 0;
 
-		for(x=start[X];x<=end[X];x++) {
-			ba_system_ptr->grid[x][0][0].color = 0;
-			ba_system_ptr->grid[x][0][0].letter = letter;
-			ba_system_ptr->grid[x][0][0].used = used;
-		}
-		return 1;
 	}
 
-	if ((com[i+3] == 'x')
-	    || (com[i+3] == '-')) {
-		for(j=0; j<3; j++) {
-			if (((i+j) <= len) &&
-			    (((com[i+j] >= '0') && (com[i+j] <= '9')) ||
-			     ((com[i+j] >= 'A') && (com[i+j] <= 'Z'))))
-				continue;
-			goto error_message2;
+	while ((host = hostlist_shift(hl))) {
+		ba_mp_t *ba_mp;
+		smap_node_t *smap_node;
 
+		for (i = 0; i < params.cluster_dims; i++)
+			pos[i] = select_char2coord(host[i]);
+		if (!(ba_mp = bg_configure_coord2ba_mp(pos))) {
+			memset(error_string, 0, 255);
+			sprintf(error_string, "Bad host given '%s'", host);
+			rc = 0;
+			break;
 		}
-		number = xstrntol(com + i, &p, params.cluster_dims,
-				  params.cluster_base);
-		hostlist_parse_int_to_array(
-			number, start, params.cluster_dims,
-			params.cluster_base);
-
-		i += 4;
-		for(j=0; j<3; j++) {
-			if (((i+j) <= len) &&
-			    (((com[i+j] >= '0') && (com[i+j] <= '9')) ||
-			     ((com[i+j] >= 'A') && (com[i+j] <= 'Z'))))
-				continue;
-			goto error_message2;
-		}
-		number = xstrntol(com + i, &p, params.cluster_dims,
-				  params.cluster_base);
-		hostlist_parse_int_to_array(
-			number, end, params.cluster_dims,
-			params.cluster_base);
-	} else {
-		for(j=0; j<3; j++) {
-			if (((i+j) <= len) &&
-			    (((com[i+j] >= '0') && (com[i+j] <= '9')) ||
-			     ((com[i+j] >= 'A') && (com[i+j] <= 'Z'))))
-				continue;
-			goto error_message2;
-		}
-		number = xstrntol(com + i, &p, params.cluster_dims,
-				  params.cluster_base);
-		hostlist_parse_int_to_array(
-			number, start, params.cluster_dims,
-			params.cluster_base);
-	}
-	if((start[X]>end[X]
-	    || start[Y]>end[Y]
-	    || start[Z]>end[Z])
-	   || (start[X]<0
-	       || start[Y]<0
-	       || start[Z]<0)
-	   || (end[X]>DIM_SIZE[X]-1
-	       || end[Y]>DIM_SIZE[Y]-1
-	       || end[Z]>DIM_SIZE[Z]-1))
-		goto error_message;
-
-	for(x=start[X];x<=end[X];x++) {
-		for(y=start[Y];y<=end[Y];y++) {
-			for(z=start[Z];z<=end[Z];z++) {
-				if(ba_system_ptr->grid[x][y][z].letter
-				   != opposite)
-					continue;
-				ba_system_ptr->grid[x][y][z].color = 0;
-				ba_system_ptr->grid[x][y][z].letter = letter;
-				ba_system_ptr->grid[x][y][z].used = used;
-			}
-		}
+		bg_configure_ba_update_mp_state(ba_mp, state);
+		smap_node = smap_system_ptr->grid[ba_mp->index];
+		smap_node->color = 0;
+		smap_node->letter = letter;
+		smap_node->used = used;
+		free(host);
 	}
-	return 1;
-error_message:
-	memset(error_string,0,255);
-	if(params.cluster_dims == 1) {
-		sprintf(error_string,
-			"Problem with nodes,  specified range was %d-%d",
-			start[X],end[X]);
-	} else {
-		sprintf(error_string,
-			"Problem with base partitions, "
-			"specified range was %d%d%dx%d%d%d",
-			alpha_num[start[X]],alpha_num[start[Y]],
-			alpha_num[start[Z]],
-			alpha_num[end[X]],alpha_num[end[Y]],alpha_num[end[Z]]);
-	}
-	return 0;
-error_message2:
-	memset(error_string,0,255);
-	sprintf(error_string,
-		"There was a problem with '%s'\nIn your request '%s'"
-		"You need to specify XYZ or XYZxXYZ",
-		com+i,com);
-	return 0;
+	hostlist_destroy(hl);
+
+	return rc;
 }
+
 static int _remove_allocation(char *com, List allocated_blocks)
 {
 	ListIterator results_i;
@@ -802,49 +719,33 @@ static int _remove_allocation(char *com, List allocated_blocks)
 	int len = strlen(com);
 	char letter;
 
-	int color_count = 0;
-	while(com[i-1]!=' ' && i<len) {
+	while (com[i-1]!=' ' && i<len) {
 		i++;
 	}
 
-	if(i>(len-1)) {
-		memset(error_string,0,255);
+	if (i>(len-1)) {
+		memset(error_string, 0, 255);
 		sprintf(error_string,
 			"You need to specify which letter to delete.");
 		return 0;
 	} else {
 		letter = com[i];
 		results_i = list_iterator_create(allocated_blocks);
-		while((allocated_block = list_next(results_i)) != NULL) {
-			if(found) {
-				if(redo_block(allocated_block->nodes,
-					      allocated_block->
-					      request->geometry,
-					      allocated_block->
-					      request->conn_type,
-					      color_count) == SLURM_ERROR) {
-					memset(error_string,0,255);
-					sprintf(error_string,
-						"problem redoing the part.");
-					return 0;
-				}
+		while ((allocated_block = list_next(results_i)) != NULL) {
+			if (found) {
 				allocated_block->letter =
 					letters[color_count%62];
 				allocated_block->color =
 					colors[color_count%6];
-
-			} else if(allocated_block->letter == letter) {
-				found=1;
-				remove_block(allocated_block->nodes,
-					     color_count,
-					     allocated_block->request->
-					     conn_type);
-				list_destroy(allocated_block->nodes);
-				delete_ba_request(allocated_block->request);
-				list_remove(results_i);
-				color_count--;
+				allocated_block->color_count = color_count++;
+				_set_nodes(allocated_block->nodes,
+					   allocated_block->color,
+					   allocated_block->letter);
+			} else if (allocated_block->letter == letter) {
+				found = 1;
+				color_count = allocated_block->color_count;
+				list_delete_item(results_i);
 			}
-			color_count++;
 		}
 		list_iterator_destroy(results_i);
 	}
@@ -852,103 +753,70 @@ static int _remove_allocation(char *com, List allocated_blocks)
 	return 1;
 }
 
-static int _alter_allocation(char *com, List allocated_blocks)
-{
-	/* this doesn't do anything yet. */
-
-	/* int torus=SELECT_TORUS, i=5, i2=0; */
-/* 	int len = strlen(com); */
-/* 	bool rotate = false; */
-/* 	bool elongate = false; */
-
-/* 	while(i<len) { */
-
-/* 		while(com[i-1]!=' ' && i<len) { */
-/* 			i++; */
-/* 		} */
-/* 		if(!strncasecmp(com+i, "mesh", 4)) { */
-/* 			torus=SELECT_MESH; */
-/* 			i+=4; */
-/* 		} else if(!strncasecmp(com+i, "rotate", 6)) { */
-/* 			rotate=true; */
-/* 			i+=6; */
-/* 		} else if(!strncasecmp(com+i, "elongate", 8)) { */
-/* 			elongate=true; */
-/* 			i+=8; */
-/* 		} else if(com[i] < 58 && com[i] > 47) { */
-/* 			i2=i; */
-/* 			i++; */
-/* 		} else { */
-/* 			i++; */
-/* 		} */
-
-/* 	} */
-	return 1;
-}
-
 static int _copy_allocation(char *com, List allocated_blocks)
 {
 	ListIterator results_i;
 	allocated_block_t *allocated_block = NULL;
 	allocated_block_t *temp_block = NULL;
-	ba_request_t *request = NULL;
+	select_ba_request_t *request = NULL;
 
-	int i=1;
+	int i = 1, j;
 	int len = strlen(com);
 	char letter = '\0';
 	int count = 1;
 	int *geo = NULL, *geo_ptr = NULL;
 
 	/* look for the space after copy */
-	while(com[i-1]!=' ' && i<len)
+	while ((com[i-1] != ' ') && (i < len))
 		i++;
 
-	if(i<=len) {
+	if (i <= len) {
 		/* Here we are looking for a real number for the count
-		   instead of the params.cluster_base so atoi is ok
-		*/
-		if(com[i]>='0' && com[i]<='9')
+		 * instead of the params.cluster_base so atoi is ok */
+		if ((com[i] >= '0') && (com[i] <= '9'))
 			count = atoi(com+i);
 		else {
 			letter = com[i];
 			i++;
-			if(com[i]!='\n') {
-				while(com[i-1]!=' ' && i<len)
+			if (com[i] != '\n') {
+				while ((com[i-1] != ' ') && (i < len))
 					i++;
 
-				if(com[i]>='0' && com[i]<='9')
+				if ((com[i] >= '0') && (com[i] <= '9'))
 					count = atoi(com+i);
 			}
 		}
 	}
 
 	results_i = list_iterator_create(allocated_blocks);
-	while((allocated_block = list_next(results_i)) != NULL) {
+	while ((allocated_block = list_next(results_i)) != NULL) {
 		temp_block = allocated_block;
-		if(allocated_block->letter != letter)
+		if (allocated_block->letter != letter)
 			continue;
 		break;
 	}
 	list_iterator_destroy(results_i);
 
-	if(!letter)
+	if (!letter)
 		allocated_block = temp_block;
 
-	if(!allocated_block) {
-		memset(error_string,0,255);
+	if (!allocated_block) {
+		memset(error_string, 0, 255);
 		sprintf(error_string,
 			"Could not find requested record to copy");
 		return 0;
 	}
 
-	for(i=0;i<count;i++) {
-		request = (ba_request_t*) xmalloc(sizeof(ba_request_t));
-
-		request->geometry[X] = allocated_block->request->geometry[X];
-		request->geometry[Y] = allocated_block->request->geometry[Y];
-		request->geometry[Z] = allocated_block->request->geometry[Z];
+	for (i = 0; i < count; i++) {
+		request = (select_ba_request_t*)
+			  xmalloc(sizeof(select_ba_request_t));
+		for (j = 0; j < params.cluster_dims; j++) {
+			request->geometry[j] = allocated_block->request->
+					       geometry[j];
+			request->conn_type[j] = allocated_block->request->
+						conn_type[j];
+		}
 		request->size = allocated_block->request->size;
-		request->conn_type=allocated_block->request->conn_type;
 		request->rotate =allocated_block->request->rotate;
 		request->elongate = allocated_block->request->elongate;
 		request->deny_pass = allocated_block->request->deny_pass;
@@ -963,21 +831,19 @@ static int _copy_allocation(char *com, List allocated_blocks)
 		request->rotate_count= 0;
 		request->elongate_count = 0;
 	       	request->elongate_geos = list_create(NULL);
-		request->avail_node_bitmap = NULL;
+		request->avail_mp_bitmap = NULL;
 
 		results_i = list_iterator_create(request->elongate_geos);
 		while ((geo_ptr = list_next(results_i)) != NULL) {
-			geo = xmalloc(sizeof(int)*3);
-			geo[X] = geo_ptr[X];
-			geo[Y] = geo_ptr[Y];
-			geo[Z] = geo_ptr[Z];
-
+			geo = xmalloc(sizeof(int) * params.cluster_dims);
+			for (j = 0; j < params.cluster_dims; j++)
+				geo[j] = geo_ptr[j];
 			list_append(request->elongate_geos, geo);
 		}
 		list_iterator_destroy(results_i);
 
-		if((allocated_block = _make_request(request)) == NULL) {
-			memset(error_string,0,255);
+		if ((allocated_block = _make_request(request)) == NULL) {
+			memset(error_string, 0, 255);
 			sprintf(error_string,
 				"Problem with the copy\n"
 				"Are you sure there is enough room for it?");
@@ -985,7 +851,6 @@ static int _copy_allocation(char *com, List allocated_blocks)
 			return 0;
 		}
 		list_append(allocated_blocks, allocated_block);
-
 	}
 	return 1;
 
@@ -999,25 +864,24 @@ static int _save_allocation(char *com, List allocated_blocks)
 	char filename[50];
 	char *save_string = NULL;
 	FILE *file_ptr = NULL;
-	char *conn_type = NULL;
 	char *extra = NULL;
 
 	ListIterator results_i;
 
-	memset(filename,0,50);
-	if(len>5)
-		while(i<len) {
+	memset(filename, 0, 50);
+	if (len > 5)
+		while (i<len) {
 
-			while(com[i-1]!=' ' && i<len) {
+			while (com[i-1]!=' ' && i<len) {
 				i++;
 			}
-			while(i<len && com[i]!=' ') {
+			while (i<len && com[i]!=' ') {
 				filename[j] = com[i];
 				i++;
 				j++;
 			}
 		}
-	if(filename[0]=='\0') {
+	if (filename[0]=='\0') {
 		time_t now_time = time(NULL);
 		sprintf(filename,"bluegene.conf.%ld",
 			(long int) now_time);
@@ -1026,84 +890,118 @@ static int _save_allocation(char *com, List allocated_blocks)
 	file_ptr = fopen(filename,"w");
 
 	if (file_ptr!=NULL) {
+		char *image_dir = NULL;
+
 		xstrcat(save_string,
 			"#\n# bluegene.conf file generated by smap\n");
 		xstrcat(save_string,
 			"# See the bluegene.conf man page for "
 			"more information\n");
 		xstrcat(save_string, "#\n");
-#ifndef HAVE_BGL
-		xstrcat(save_string, "CnloadImage="
-			"/bgsys/drivers/ppcfloor/boot/cns,"
-			"/bgsys/drivers/ppcfloor/boot/cnk\n");
-		xstrcat(save_string, "MloaderImage="
-			"/bgsys/drivers/ppcfloor/boot/uloader\n");
-		xstrcat(save_string, "IoloadImage="
-			"/bgsys/drivers/ppcfloor/boot/cns,"
-			"/bgsys/drivers/ppcfloor/boot/linux,"
-			"/bgsys/drivers/ppcfloor/boot/ramdisk\n");
-#else
-		xstrcat(save_string, "BlrtsImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/rts_hw.rts\n");
-		xstrcat(save_string, "LinuxImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/zImage.elf\n");
-		xstrcat(save_string, "MloaderImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/mmcs-mloader.rts\n");
-		xstrcat(save_string, "RamDiskImage="
-		       "/bgl/BlueLight/ppcfloor/bglsys/bin/ramdisk.elf\n");
-#endif
-		xstrcat(save_string, "BridgeAPILogFile="
-		       "/var/log/slurm/bridgeapi.log\n");
-#ifndef HAVE_BGL
+#ifdef HAVE_BGL
+		image_dir = "/bgl/BlueLight/ppcfloor/bglsys/bin";
+		xstrfmtcat(save_string, "BlrtsImage=%s/rts_hw.rts\n",
+			   image_dir);
+		xstrfmtcat(save_string, "LinuxImage=%s/zImage.elf\n",
+			   image_dir);
+		xstrfmtcat(save_string,
+			   "MloaderImage=%s/mmcs-mloader.rts\n",
+			   image_dir);
+		xstrfmtcat(save_string,
+			   "RamDiskImage=%s/ramdisk.elf\n",
+			   image_dir);
+
+		xstrcat(save_string, "Numpsets=8 # io poor\n");
+		xstrcat(save_string, "# Numpsets=64 # io rich\n");
+#elif defined HAVE_BGP
+		image_dir = "/bgsys/drivers/ppcfloor/boot";
+		xstrfmtcat(save_string, "CnloadImage=%s/cns,%s/cnk\n",
+			   image_dir, image_dir);
+		xstrfmtcat(save_string, "MloaderImage=%s/uloader\n",
+			   image_dir);
+		xstrfmtcat(save_string,
+			   "IoloadImage=%s/cns,%s/linux,%s/ramdisk\n",
+			   image_dir, image_dir, image_dir);
 		xstrcat(save_string, "Numpsets=4 # io poor\n");
 		xstrcat(save_string, "# Numpsets=32 # io rich\n");
 #else
-		xstrcat(save_string, "Numpsets=8 # io poor\n");
-		xstrcat(save_string, "# Numpsets=64 # io rich\n");
+		image_dir = "/bgsys/drivers/ppcfloor/boot";
+		xstrfmtcat(save_string, "MloaderImage=%s/uloader\n",
+			   image_dir);
+		xstrcat(save_string, "Numpsets=4 # io semi-poor\n");
+		xstrcat(save_string, "# Numpsets=16 # io rich\n");
 #endif
+
+		xstrcat(save_string, "BridgeAPILogFile="
+		       "/var/log/slurm/bridgeapi.log\n");
+
 		xstrcat(save_string, "BridgeAPIVerbose=2\n");
 
 		xstrfmtcat(save_string, "BasePartitionNodeCnt=%d\n",
 			   base_part_node_cnt);
 		xstrfmtcat(save_string, "NodeCardNodeCnt=%d\n",
 			   nodecard_node_cnt);
-		if(!list_count(allocated_blocks))
+		if (!list_count(allocated_blocks))
 			xstrcat(save_string, "LayoutMode=DYNAMIC\n");
 		else {
 			xstrfmtcat(save_string, "LayoutMode=%s\n", layout_mode);
 			xstrfmtcat(save_string, "#\n# Block Layout\n#\n");
 		}
 		results_i = list_iterator_create(allocated_blocks);
-		while((allocated_block = list_next(results_i)) != NULL) {
-			if(allocated_block->request->conn_type == SELECT_TORUS)
-				conn_type = "TORUS";
-			else if(allocated_block->request->conn_type
-				== SELECT_MESH)
-				conn_type = "MESH";
-			else {
-				conn_type = "SMALL";
-#ifndef HAVE_BGL
+		while ((allocated_block = list_next(results_i)) != NULL) {
+			select_ba_request_t *request = allocated_block->request;
+
+			if (request->small16 || request->small32
+			    || request->small64 || request->small128
+			    || request->small256) {
+#ifdef HAVE_BGL
+				xstrfmtcat(extra,
+					   " 32CNBlocks=%d "
+					   "128CNBlocks=%d",
+					   request->small32,
+					   request->small128);
+#elif defined HAVE_BGP
 				xstrfmtcat(extra,
-					   " 16CNBlocks=%d 32CNBlocks=%d "
-					   "64CNBlocks=%d 128CNBlocks=%d "
+					   " 16CNBlocks=%d "
+					   "32CNBlocks=%d "
+					   "64CNBlocks=%d "
+					   "128CNBlocks=%d "
 					   "256CNBlocks=%d",
-					   allocated_block->request->small16,
-					   allocated_block->request->small32,
-					   allocated_block->request->small64,
-					   allocated_block->request->small128,
-					   allocated_block->request->small256);
+					   request->small16,
+					   request->small32,
+					   request->small64,
+					   request->small128,
+					   request->small256);
 #else
 				xstrfmtcat(extra,
-					   " 32CNBlocks=%d 128CNBlocks=%d",
-					   allocated_block->request->small32,
-					   allocated_block->request->small128);
+					   " 32CNBlocks=%d "
+					   "64CNBlocks=%d "
+					   "128CNBlocks=%d "
+					   "256CNBlocks=%d",
+					   request->small32,
+					   request->small64,
+					   request->small128,
+					   request->small256);
+#endif
+			}
 
+			xstrfmtcat(save_string, "BPs=%s", request->save_name);
+
+			for (i=0; i<SYSTEM_DIMENSIONS; i++) {
+				if (request->conn_type[i] == (uint16_t)NO_VAL)
+					break;
+				if (i)
+					xstrcat(save_string, ",");
+				else
+					xstrcat(save_string, " Type=");
+				xstrfmtcat(save_string, "%s", conn_type_string(
+						   request->conn_type[i]));
+#ifdef HAVE_BG_L_P
+				break;
 #endif
 			}
-			xstrfmtcat(save_string, "BPs=%s Type=%s",
-				   allocated_block->request->save_name,
-				   conn_type);
-			if(extra) {
+
+			if (extra) {
 				xstrfmtcat(save_string, "%s\n", extra);
 				xfree(extra);
 			} else
@@ -1119,147 +1017,131 @@ static int _save_allocation(char *com, List allocated_blocks)
 	return 1;
 }
 
-static int _add_bg_record(blockreq_t *blockreq, List allocated_blocks)
+static int _add_bg_record(select_ba_request_t *blockreq, List allocated_blocks)
 {
+	int rc = 1;
 #ifdef HAVE_BG
-	char *nodes = NULL, *conn_type = NULL;
-	int bp_count = 0;
-	int diff=0;
-	int largest_diff=-1;
-	int start[params.cluster_dims];
-	int end[params.cluster_dims];
-	int start1[params.cluster_dims];
-	int end1[params.cluster_dims];
-	int geo[params.cluster_dims];
-	char com[255];
-	int j = 0, number;
-	int len = 0;
-	int x,y,z;
-	char *p = '\0';
-
-	geo[X] = 0;
-	geo[Y] = 0;
-	geo[Z] = 0;
-
-	start1[X] = -1;
-	start1[Y] = -1;
-	start1[Z] = -1;
-
-	end1[X] = -1;
-	end1[Y] = -1;
-	end1[Z] = -1;
-
-	switch(blockreq->conn_type) {
-	case SELECT_MESH:
-		conn_type = "mesh";
-		break;
-	case SELECT_SMALL:
-		conn_type = "small";
-		break;
-	case SELECT_TORUS:
-	default:
-		conn_type = "torus";
-		break;
+	char *nodes = NULL, *host;
+	int diff = 0;
+	int largest_diff = -1;
+	uint16_t start[params.cluster_dims];
+	uint16_t end[params.cluster_dims];
+	uint16_t best_start[params.cluster_dims];
+	int i, j = 0;
+	hostlist_t hl = NULL;
+	bitstr_t *mark_bitmap = NULL;
+	char tmp_char[params.cluster_dims+1],
+		tmp_char2[params.cluster_dims+1];
+
+	memset(tmp_char, 0, sizeof(tmp_char));
+	memset(tmp_char2, 0, sizeof(tmp_char2));
+
+	for (i = 0; i < params.cluster_dims; i++) {
+		best_start[0] = 0;
+		blockreq->geometry[i] = 0;
+		end[i] = (int16_t)-1;
 	}
 
-	nodes = blockreq->block;
-	if(!nodes)
+	nodes = blockreq->save_name;
+	if (!nodes)
 		return SLURM_SUCCESS;
-	len = strlen(nodes);
-	while (nodes[j] != '\0') {
-		if(j > len)
+
+	while (nodes[j] && (nodes[j] != '[') &&
+	       ((nodes[j] < '0') || (nodes[j] > '9')) &&
+	       ((nodes[j] < 'A') || (nodes[j] > 'Z')))
+		j++;
+	if (nodes[j] == '\0') {
+		snprintf(error_string, sizeof(error_string),
+			 "This block '%s' for some reason didn't contain "
+			 "any midplanes.",
+			 nodes);
+		rc = 0;
+		goto fini;
+	}
+
+	if (!(hl = hostlist_create(nodes+j))) {
+		snprintf(error_string, sizeof(error_string),
+			 "Bad hostlist given '%s'", nodes+j);
+		rc = 0;
+		goto fini;
+	}
+	/* figure out the geo and the size */
+	mark_bitmap = bit_alloc(smap_system_ptr->node_cnt);
+	while ((host = hostlist_shift(hl))) {
+		ba_mp_t *ba_mp;
+		uint16_t pos[params.cluster_dims];
+		for (i = 0; i < params.cluster_dims; i++)
+			pos[i] = select_char2coord(host[i]);
+		free(host);
+		if (!(ba_mp = bg_configure_coord2ba_mp(pos))) {
+			memset(error_string, 0, 255);
+			sprintf(error_string, "Bad host given '%s'", host);
+			rc = 0;
 			break;
-		else if ((nodes[j] == '[' || nodes[j] == ',')
-		    && (nodes[j+8] == ']' || nodes[j+8] == ',')
-		    && (nodes[j+4] == 'x' || nodes[j+4] == '-')) {
-			j++;
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j += 4;
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, end, params.cluster_dims,
-				params.cluster_base);
-			j += 3;
-			diff = end[X]-start[X];
-			if(diff > largest_diff) {
-				start1[X] = start[X];
-				start1[Y] = start[Y];
-				start1[Z] = start[Z];
+		}
+		bit_set(mark_bitmap, ba_mp->index);
+		for (i = 0; i < params.cluster_dims; i++) {
+			if (ba_mp->coord[i] > (int16_t)end[i]) {
+				blockreq->geometry[i]++;
+				end[i] = ba_mp->coord[i];
 			}
-			for (x = start[X]; x <= end[X]; x++)
-				for (y = start[Y]; y <= end[Y]; y++)
-					for (z = start[Z]; z <= end[Z]; z++) {
-						if(x>end1[X]) {
-						        geo[X]++;
-							end1[X] = x;
-						}
-						if(y>end1[Y]) {
-							geo[Y]++;
-							end1[Y] = y;
-						}
-						if(z>end1[Z]) {
-							geo[Z]++;
-							end1[Z] = z;
-						}
-						bp_count++;
-					}
-			if(nodes[j] != ',')
-				break;
-			j--;
-		} else if((nodes[j] >= '0' && nodes[j] <= '9')
-			  || (nodes[j] >= 'A' && nodes[j] <= 'Z')) {
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j+=3;
+		}
+	}
+	hostlist_destroy(hl);
+
+	if (!rc)
+		goto fini;
+
+	/* figure out the start pos */
+	while (nodes[j] != '\0') {
+		int mid = j   + params.cluster_dims + 1;
+		int fin = mid + params.cluster_dims + 1;
+		if (((nodes[j] == '[')   || (nodes[j] == ','))   &&
+		    ((nodes[mid] == 'x') || (nodes[mid] == '-')) &&
+		    ((nodes[fin] == ']') || (nodes[fin] == ','))) {
+			j++;	/* Skip leading '[' or ',' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
+			j++;	/* Skip middle 'x' or '-' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				end[i] = select_char2coord(nodes[j]);
+			diff = end[0] - start[0];
+		} else if (((nodes[j] >= '0') && (nodes[j] <= '9')) ||
+			   ((nodes[j] >= 'A') && (nodes[j] <= 'Z'))) {
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
 			diff = 0;
-			if(diff > largest_diff) {
-				start1[X] = start[X];
-				start1[Y] = start[Y];
-				start1[Z] = start[Z];
-			}
-			if(start[X]>end1[X]) {
-				geo[X]++;
-				end1[X] = start[X];
-			}
-			if(start[Y]>end1[Y]) {
-				geo[Y]++;
-				end1[Y] = start[Y];
-			}
-			if(start[Z]>end1[Z]) {
-				geo[Z]++;
-				end1[Z] = start[Z];
-			}
-			bp_count++;
-			if(nodes[j] != ',')
-				break;
-			j--;
+		} else {
+			j++;
+			continue;
 		}
-		j++;
+
+		if (diff > largest_diff) {
+			largest_diff = diff;
+			memcpy(best_start, start, sizeof(best_start));
+		}
+		if (nodes[j] != ',')
+			break;
 	}
-	memset(com,0,255);
-	sprintf(com,"create %dx%dx%d %s start %dx%dx%d "
-		"small32=%d small128=%d",
-		geo[X], geo[Y], geo[Z], conn_type,
-		start1[X], start1[Y], start1[Z],
-		blockreq->small32, blockreq->small128);
-	if(!strcasecmp(layout_mode, "OVERLAP"))
-		reset_ba_system(false);
-
-	set_all_bps_except(nodes);
-	_create_allocation(com, allocated_blocks);
-	reset_all_removed_bps();
+
+	if (largest_diff == -1) {
+		snprintf(error_string, sizeof(error_string),
+			 "No hostnames given here");
+		goto fini;
+	}
+
+	memcpy(blockreq->start, best_start, sizeof(blockreq->start));
+
+
+	if(!_full_request(blockreq, mark_bitmap, allocated_blocks))
+		destroy_select_ba_request(blockreq);
+fini:
+	FREE_NULL_BITMAP(mark_bitmap);
 
 #endif
-	return SLURM_SUCCESS;
+	return rc;
 }
+
 static int _load_configuration(char *com, List allocated_blocks)
 {
 	int len = strlen(com);
@@ -1267,24 +1149,26 @@ static int _load_configuration(char *com, List allocated_blocks)
 	char filename[100];
 	s_p_hashtbl_t *tbl = NULL;
 	char *layout = NULL;
-	blockreq_t **blockreq_array = NULL;
+	select_ba_request_t **blockreq_array = NULL;
 	int count = 0;
 
-	_delete_allocated_blocks(allocated_blocks);
-	allocated_blocks = list_create(NULL);
+	if (allocated_blocks)
+		list_flush(allocated_blocks);
+	else
+		allocated_blocks = list_create(_destroy_allocated_block);
 
-	memset(filename,0,100);
-	if(len>5)
-		while(i<len) {
-			while(com[i-1]!=' ' && i<len) {
+	memset(filename, 0, 100);
+	if (len>5)
+		while (i<len) {
+			while (com[i-1]!=' ' && i<len) {
 				i++;
 			}
-			while(i<len && com[i]!=' ') {
+			while (i<len && com[i]!=' ') {
 				filename[j] = com[i];
 				i++;
 				j++;
-				if(j>100) {
-					memset(error_string,0,255);
+				if (j>100) {
+					memset(error_string, 0, 255);
 					sprintf(error_string,
 						"filename is too long needs "
 						"to be under 100 chars");
@@ -1293,12 +1177,11 @@ static int _load_configuration(char *com, List allocated_blocks)
 			}
 		}
 
-	if(filename[0]=='\0') {
+	if (filename[0]=='\0') {
 		sprintf(filename,"bluegene.conf");
 	}
 
-	tbl = s_p_hashtbl_create(bg_conf_file_options);
-	if(s_p_parse_file(tbl, NULL, filename) == SLURM_ERROR) {
+	if (!(tbl = bg_configure_config_make_tbl(filename))) {
 		memset(error_string,0,255);
 		sprintf(error_string, "ERROR: couldn't open/read %s",
 			filename);
@@ -1315,10 +1198,10 @@ static int _load_configuration(char *com, List allocated_blocks)
 		xfree(layout);
 	}
 
-	if(strcasecmp(layout_mode, "DYNAMIC")) {
+	if (strcasecmp(layout_mode, "DYNAMIC")) {
 		if (!s_p_get_array((void ***)&blockreq_array,
 				   &count, "BPs", tbl)) {
-			memset(error_string,0,255);
+			memset(error_string, 0, 255);
 			sprintf(error_string,
 				"WARNING: no blocks defined in "
 				"bluegene.conf");
@@ -1326,6 +1209,10 @@ static int _load_configuration(char *com, List allocated_blocks)
 
 		for (i = 0; i < count; i++) {
 			_add_bg_record(blockreq_array[i], allocated_blocks);
+			/* The freeing of this will happen when
+			   allocated_blocks gets freed.
+			*/
+			blockreq_array[i] = NULL;
 		}
 	}
 
@@ -1341,7 +1228,7 @@ static void _print_header_command(void)
 	main_xcord += 4;
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "TYPE");
-	main_xcord += 7;
+	main_xcord += 8;
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "ROTATE");
 	main_xcord += 7;
@@ -1357,7 +1244,7 @@ static void _print_header_command(void)
 #endif
 	main_xcord += 10;
 
-#ifndef HAVE_BGL
+#ifdef HAVE_BGP
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "16CN");
 	main_xcord += 5;
@@ -1391,24 +1278,21 @@ static void _print_header_command(void)
 
 static void _print_text_command(allocated_block_t *allocated_block)
 {
+	char *tmp_char = NULL;
+
 	wattron(text_win,
 		COLOR_PAIR(allocated_block->color));
 
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "%c", allocated_block->letter);
 	main_xcord += 4;
-	if(allocated_block->request->conn_type==SELECT_TORUS)
-		mvwprintw(text_win, main_ycord,
-			  main_xcord, "TORUS");
-	else if (allocated_block->request->conn_type==SELECT_MESH)
-		mvwprintw(text_win, main_ycord,
-			  main_xcord, "MESH");
-	else
-		mvwprintw(text_win, main_ycord,
-			  main_xcord, "SMALL");
-	main_xcord += 7;
 
-	if(allocated_block->request->rotate)
+	tmp_char = conn_type_string_full(allocated_block->request->conn_type);
+	mvwprintw(text_win, main_ycord, main_xcord, tmp_char);
+	xfree(tmp_char);
+	main_xcord += 8;
+
+	if (allocated_block->request->rotate)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "Y");
 	else
@@ -1416,7 +1300,7 @@ static void _print_text_command(allocated_block_t *allocated_block)
 			  main_xcord, "N");
 	main_xcord += 7;
 
-	if(allocated_block->request->elongate)
+	if (allocated_block->request->elongate)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "Y");
 	else
@@ -1428,41 +1312,46 @@ static void _print_text_command(allocated_block_t *allocated_block)
 		  main_xcord, "%d", allocated_block->request->size);
 	main_xcord += 10;
 
-	if(allocated_block->request->conn_type == SELECT_SMALL) {
-#ifndef HAVE_BGL
+	if (allocated_block->request->conn_type[0] >= SELECT_SMALL) {
+#ifdef HAVE_BGP
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d",
 			  allocated_block->request->small16);
 		main_xcord += 5;
 #endif
+
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d",
 			  allocated_block->request->small32);
 		main_xcord += 5;
+
 #ifndef HAVE_BGL
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d",
 			  allocated_block->request->small64);
 		main_xcord += 5;
 #endif
+
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d",
 			  allocated_block->request->small128);
 		main_xcord += 6;
+
 #ifndef HAVE_BGL
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%d",
 			  allocated_block->request->small256);
 		main_xcord += 6;
 #endif
-
-	} else
-#ifndef HAVE_BGL
+	} else {
+#ifdef HAVE_BGL
+		main_xcord += 11;
+#elif defined HAVE_BGP
 		main_xcord += 27;
 #else
-		main_xcord += 11;
+		main_xcord += 22;
 #endif
-
+	}
 	mvwprintw(text_win, main_ycord,
 		  main_xcord, "%s",
 		  allocated_block->request->save_name);
@@ -1479,30 +1368,59 @@ void get_command(void)
 
 	int text_width, text_startx;
 	allocated_block_t *allocated_block = NULL;
-	int i=0;
-	int count=0;
+	int i = 0;
+	int count = 0;
 
-	WINDOW *command_win;
-        List allocated_blocks;
+	WINDOW *command_win = NULL;
+        List allocated_blocks = NULL;
 	ListIterator results_i;
 
-	if(params.commandline) {
+	if (params.commandline && !params.command) {
 		printf("Configure won't work with commandline mode.\n");
 		printf("Please remove the -c from the commandline.\n");
-		ba_fini();
+		bg_configure_ba_fini();
 		exit(0);
 	}
-	init_wires();
-	allocated_blocks = list_create(NULL);
 
-	text_width = text_win->_maxx;
-	text_startx = text_win->_begx;
-	command_win = newwin(3, text_width - 1, LINES - 4, text_startx + 1);
-	echo();
+	if (working_cluster_rec) {
+		char *cluster_name = slurm_get_cluster_name();
+		if (strcmp(working_cluster_rec->name, cluster_name)) {
+			xfree(cluster_name);
+			endwin();
+			printf("To use the configure option you must be on the "
+			       "cluster the configure is for.\nCross cluster "
+			       "support doesn't exist today.\nSorry for the "
+			       "inconvenince.\n");
+			bg_configure_ba_fini();
+			exit(0);
+		}
+		xfree(cluster_name);
+	}
+
+	bg_configure_ba_setup_wires();
+
+	color_count = 0;
+
+	allocated_blocks = list_create(_destroy_allocated_block);
+
+	if (params.commandline) {
+		snprintf(com, sizeof(com), "%s", params.command);
+		goto run_command;
+	} else {
+		/* make sure we don't get any noisy debug */
+		ba_configure_set_ba_debug_flags(0);
+
+		text_width = text_win->_maxx;
+		text_startx = text_win->_begx;
+		command_win = newwin(3, text_width - 1, LINES - 4,
+				     text_startx + 1);
+		curs_set(1);
+		echo();
+	}
 
 	while (strcmp(com, "quit")) {
 		clear_window(grid_win);
-		print_grid(0);
+		print_grid();
 		clear_window(text_win);
 		box(text_win, 0, 0);
 		box(grid_win, 0, 0);
@@ -1510,10 +1428,10 @@ void get_command(void)
 		if (!params.no_header)
 			_print_header_command();
 
-		if(error_string!=NULL) {
-			i=0;
-			while(error_string[i]!='\0') {
-				if(error_string[i]=='\n') {
+		if (error_string != NULL) {
+			i = 0;
+			while (error_string[i] != '\0') {
+				if (error_string[i] == '\n') {
 					main_ycord++;
 					main_xcord=1;
 					i++;
@@ -1526,19 +1444,19 @@ void get_command(void)
 				main_xcord++;
 			}
 			main_ycord++;
-			main_xcord=1;
-			memset(error_string,0,255);
+			main_xcord = 1;
+			memset(error_string, 0, 255);
 		}
 		results_i = list_iterator_create(allocated_blocks);
 
 		count = list_count(allocated_blocks)
 			- (LINES-(main_ycord+5));
 
-		if(count<0)
+		if (count<0)
 			count=0;
 		i=0;
-		while((allocated_block = list_next(results_i)) != NULL) {
-			if(i>=count)
+		while ((allocated_block = list_next(results_i)) != NULL) {
+			if (i >= count)
 				_print_text_command(allocated_block);
 			i++;
 		}
@@ -1558,10 +1476,12 @@ void get_command(void)
 
 		if (!strcmp(com, "exit")) {
 			endwin();
-			_delete_allocated_blocks(allocated_blocks);
-			ba_fini();
+			if (allocated_blocks)
+				list_destroy(allocated_blocks);
+			bg_configure_ba_fini();
 			exit(0);
 		}
+	run_command:
 
 		if (!strcmp(com, "quit") || !strcmp(com, "\\q")) {
 			break;
@@ -1594,8 +1514,6 @@ void get_command(void)
 			|| !strncasecmp(com, "delete", 6)
 			|| !strncasecmp(com, "drop", 4)) {
 			_remove_allocation(com, allocated_blocks);
-		} else if (!strncasecmp(com, "alter", 5)) {
-			_alter_allocation(com, allocated_blocks);
 		} else if (!strncasecmp(com, "create", 6)) {
 			_create_allocation(com, allocated_blocks);
 		} else if (!strncasecmp(com, "copy", 4)
@@ -1608,21 +1526,28 @@ void get_command(void)
 			_load_configuration(com, allocated_blocks);
 		} else if (!strncasecmp(com, "clear all", 9)
 			|| !strncasecmp(com, "clear", 5)) {
-			_delete_allocated_blocks(allocated_blocks);
-			allocated_blocks = list_create(NULL);
+			list_flush(allocated_blocks);
 		} else {
-			memset(error_string,0,255);
+			memset(error_string, 0, 255);
 			sprintf(error_string, "Unknown command '%s'",com);
 		}
+
+		if (params.commandline) {
+			bg_configure_ba_fini();
+			exit(1);
+		}
 	}
-	_delete_allocated_blocks(allocated_blocks);
+	if (allocated_blocks)
+		list_destroy(allocated_blocks);
 	params.display = 0;
 	noecho();
 
 	clear_window(text_win);
 	main_xcord = 1;
 	main_ycord = 1;
+	curs_set(0);
 	print_date();
 	get_job();
 	return;
 }
+
diff --git a/src/smap/grid_functions.c b/src/smap/grid_functions.c
index ea0d68791..667467460 100644
--- a/src/smap/grid_functions.c
+++ b/src/smap/grid_functions.c
@@ -2,14 +2,14 @@
  *  grid_functions.c - Functions related to curses display of smap.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,156 +40,315 @@
 
 #include "src/smap/smap.h"
 
-static int _coord(char coord)
+static void _calc_coord_3d(int x, int y, int z, int default_y_offset,
+			   int *coord_x, int *coord_y, int *dim_size)
 {
-	if ((coord >= '0') && (coord <= '9'))
-		return (coord - '0');
-	if ((coord >= 'A') && (coord <= 'Z'))
-		return (coord - 'A');
-	return -1;
+	int y_offset;
+
+	*coord_x = (x + (dim_size[2] - 1)) - z + 2;
+	y_offset = default_y_offset - (dim_size[2] * y);
+	*coord_y = (y_offset - y) + z;
 }
 
-extern int set_grid_inx(int start, int end, int count)
+static void _calc_coord_4d(int a, int x, int y, int z, int default_y_offset,
+			   int *coord_x, int *coord_y, int* dim_size)
 {
-	int x, y, z;
-
-	for (y = DIM_SIZE[Y] - 1; y >= 0; y--) {
-		for (z = 0; z < DIM_SIZE[Z]; z++) {
-			for (x = 0; x < DIM_SIZE[X]; x++) {
-				if ((ba_system_ptr->grid[x][y][z].index
-				     < start)
-				||  (ba_system_ptr->grid[x][y][z].index
-				     > end))
-					continue;
-				if ((ba_system_ptr->grid[x][y][z].state
-				     == NODE_STATE_DOWN)
-				    ||  (ba_system_ptr->grid[x][y][z].state
-					 & NODE_STATE_DRAIN))
-					continue;
+	int x_offset, y_offset;
 
-				ba_system_ptr->grid[x][y][z].letter =
-					letters[count%62];
-				ba_system_ptr->grid[x][y][z].color =
-					colors[count%6];
-			}
+	x_offset = (dim_size[1] + dim_size[3]) * a + 2;
+	*coord_x = x_offset + (x + (dim_size[3] - 1)) - z;
+	y_offset = default_y_offset - (dim_size[3] * y);
+	*coord_y = (y_offset - y) + z;
+}
+
+extern int *get_cluster_dims(node_info_msg_t *node_info_ptr)
+{
+	int *dim_size = slurmdb_setup_cluster_dim_size();
+
+	if ((params.cluster_flags & CLUSTER_FLAG_CRAYXT) && dim_size) {
+		static int cray_dim_size[3] = {-1, -1, -1};
+		/* For now, assume one node per coordinate all
+		 * May need to refine. */
+		cray_dim_size[0] = dim_size[0];
+		cray_dim_size[1] = dim_size[1];
+		cray_dim_size[2] = dim_size[2];
+		return cray_dim_size;
+	}
+
+	if (dim_size == NULL) {
+		static int default_dim_size[1];
+		default_dim_size[0] = node_info_ptr->record_count;
+		return default_dim_size;
+	}
+
+	return dim_size;
+}
+
+#ifdef HAVE_BG
+static void _internal_setup_grid(int level, uint16_t *coords)
+{
+	ba_mp_t *ba_mp;
+	smap_node_t *smap_node;
+
+	if (level > params.cluster_dims)
+		return;
+
+	if (level < params.cluster_dims) {
+		for (coords[level] = 0;
+		     coords[level] < dim_size[level];
+		     coords[level]++) {
+			/* handle the outer dims here */
+			_internal_setup_grid(level+1, coords);
 		}
+		return;
 	}
-	return 1;
+	ba_mp = bg_configure_coord2ba_mp(coords);
+
+	if (!ba_mp || ba_mp->index > smap_system_ptr->node_cnt)
+		return;
+	smap_node = xmalloc(sizeof(smap_node_t));
+	smap_node->coord = xmalloc(sizeof(uint16_t) * params.cluster_dims);
+
+	memcpy(smap_node->coord, coords,
+	       sizeof(uint16_t) * params.cluster_dims);
+	smap_node->index = ba_mp->index;
+	smap_system_ptr->grid[smap_node->index] = smap_node;
 }
+#endif
 
-extern int set_grid_inx2(char *node_names, int count)
+extern void set_grid_inx(int start, int end, int count)
 {
-	hostlist_t hl;
-	hostlist_iterator_t hl_iter;
-	char *host;
-	int i, x, y, z;
-
-	hl = hostlist_create(node_names);
-	hl_iter = hostlist_iterator_create(hl);
-	while ((host = hostlist_next(hl_iter))) {
-		i = strlen(host);
-		x = _coord(host[i-3]);
-		y = _coord(host[i-2]);
-		z = _coord(host[i-1]);
-		ba_system_ptr->grid[x][y][z].letter = letters[count%62];
-		ba_system_ptr->grid[x][y][z].color  = colors[count%6];
-		free(host);
+	int i;
+
+	if (!smap_system_ptr || !smap_system_ptr->grid)
+		return;
+
+	for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+		if ((smap_system_ptr->grid[i]->index < start) ||
+		    (smap_system_ptr->grid[i]->index > end))
+			continue;
+		if ((smap_system_ptr->grid[i]->state == NODE_STATE_DOWN) ||
+		    (smap_system_ptr->grid[i]->state & NODE_STATE_DRAIN))
+			continue;
+
+		smap_system_ptr->grid[i]->letter = letters[count%62];
+		smap_system_ptr->grid[i]->color  = colors[count%6];
 	}
-	hostlist_iterator_destroy(hl_iter);
-	return 1;
 }
 
 /* This function is only called when HAVE_BG is set */
 extern int set_grid_bg(int *start, int *end, int count, int set)
 {
-	int x=0, y=0, z=0;
-	int i = 0;
+	int node_cnt = 0, i, j;
+
+	if (!smap_system_ptr || !smap_system_ptr->grid)
+		return 0;
+
+	for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+		for (j = 0; j < params.cluster_dims; j++) {
+			if ((smap_system_ptr->grid[i]->coord[j] < start[j]) ||
+			    (smap_system_ptr->grid[i]->coord[j] > end[j]))
+				break;
+		}
+		if (j < params.cluster_dims)
+			continue;	/* outside of boundary */
+		if (set ||
+		    ((smap_system_ptr->grid[i]->letter == '.') &&
+		     (smap_system_ptr->grid[i]->letter != '#'))) {
+			smap_system_ptr->grid[i]->letter = letters[count%62];
+			smap_system_ptr->grid[i]->color  = colors[count%6];
+		}
+		node_cnt++;
+	}
+	return node_cnt;
+}
+
+/* Build the smap_system_ptr structure from the node records */
+extern void init_grid(node_info_msg_t *node_info_ptr)
+{
+	int i, j, len;
+	int default_y_offset = 0;
+	smap_node_t *smap_node;
+
+	smap_system_ptr = xmalloc(sizeof(smap_system_t));
+
+	if (!node_info_ptr) {
+		if (params.display != COMMANDS)
+			return;
+#ifdef HAVE_BG
+		uint16_t coords[params.cluster_dims];
+
+		smap_system_ptr->node_cnt = 1;
+		for (i=0; i<params.cluster_dims; i++)
+			smap_system_ptr->node_cnt *= dim_size[i];
+		smap_system_ptr->grid = xmalloc(sizeof(smap_node_t *) *
+						smap_system_ptr->node_cnt);
+		/* We need to make sure we set up the wires if we
+		   don't have a node_info_ptr.
+		*/
+		bg_configure_ba_setup_wires();
 
-	assert(end[X] < DIM_SIZE[X]);
-	assert(start[X] >= 0);
-	assert(count >= 0);
-	assert(set >= 0);
-	assert(set <= 2);
-	assert(end[Y] < DIM_SIZE[Y]);
-	assert(start[Y] >= 0);
-	assert(end[Z] < DIM_SIZE[Z]);
-	assert(start[Z] >= 0);
-
-	for (x = start[X]; x <= end[X]; x++) {
-		for (y = start[Y]; y <= end[Y]; y++) {
-			for (z = start[Z]; z <= end[Z]; z++) {
-				/* set the color and letter of the
-				   block if the set flag is specified
-				   or if the letter hasn't been set yet
-				*/
-				if(set
-				   || ((ba_system_ptr->grid[x][y][z].letter
-					== '.')
-				       && (ba_system_ptr->grid[x][y][z].letter
-					   != '#'))) {
-
-						ba_system_ptr->
-							grid[x][y][z].letter =
-							letters[count%62];
-						ba_system_ptr->
-							grid[x][y][z].color =
-							colors[count%6];
+		_internal_setup_grid(0, coords);
+#endif
+	} else {
+		smap_system_ptr->grid = xmalloc(sizeof(smap_node_t *) *
+						node_info_ptr->record_count);
+		for (i = 0; i < node_info_ptr->record_count; i++) {
+			node_info_t *node_ptr = &node_info_ptr->node_array[i];
+
+			if ((node_ptr->name == NULL) ||
+			    (node_ptr->name[0] == '\0'))
+				continue;
+
+			smap_node = xmalloc(sizeof(smap_node_t));
+
+			len = strlen(node_ptr->name);
+			if (params.cluster_dims == 1) {
+				smap_node->coord = xmalloc(sizeof(uint16_t));
+				j = len - 1;
+				while ((node_ptr->name[j] >= '0') &&
+				       (node_ptr->name[j] <= '9')) {
+					smap_node->coord[0] *= 10;
+					smap_node->coord[0] +=
+						node_ptr->name[j] - '0';
+					j++;
+				}
+			} else if (params.cluster_flags & CLUSTER_FLAG_CRAYXT) {
+				int len_a, len_h;
+				len_a = strlen(node_ptr->node_addr);
+				len_h = strlen(node_ptr->node_hostname);
+				if (len_a < params.cluster_dims) {
+					printf("Invalid node addr %s\n",
+					       node_ptr->node_addr);
+					xfree(smap_node);
+					continue;
+				}
+				if (len_h < 1) {
+					printf("Invalid node hostname %s\n",
+					       node_ptr->node_hostname);
+					xfree(smap_node);
+					continue;
+				}
+				smap_node->coord = xmalloc(sizeof(uint16_t) *
+							   params.cluster_dims);
+				len_a -= params.cluster_dims;
+				for (j = 0; j < params.cluster_dims; j++) {
+					smap_node->coord[j] = select_char2coord(
+						node_ptr->node_addr[len_a+j]);
+				}
+			} else {
+				len -= params.cluster_dims;
+				if (len < 0) {
+					printf("Invalid node name: %s.\n",
+					       node_ptr->name);
+					xfree(smap_node);
+					continue;
+				}
+				smap_node->coord = xmalloc(sizeof(uint16_t) *
+							   params.cluster_dims);
+				for (j = 0; j < params.cluster_dims; j++) {
+					smap_node->coord[j] = select_char2coord(
+						node_ptr->name[len+j]);
 				}
-				i++;
 			}
+			smap_node->index = i;
+			smap_system_ptr->grid[i] = smap_node;
+			smap_system_ptr->node_cnt++;
 		}
 	}
 
-	return i;
+	if (params.cluster_dims == 3) {
+		default_y_offset = (dim_size[2] * dim_size[1]) +
+				   (dim_size[1] - dim_size[2]);
+	} else if (params.cluster_dims == 4) {
+		default_y_offset = (dim_size[3] * dim_size[2]) +
+				   (dim_size[2] - dim_size[3]);
+	}
+	for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+		smap_node = smap_system_ptr->grid[i];
+		if (params.cluster_dims == 1) {
+			smap_node->grid_xcord = i + 1;
+			smap_node->grid_ycord = 1;
+		} else if (params.cluster_dims == 2) {
+			smap_node->grid_xcord = smap_node->coord[0] + 1;
+			smap_node->grid_ycord =
+				dim_size[1] - smap_node->coord[1];
+		} else if (params.cluster_dims == 3) {
+			_calc_coord_3d(smap_node->coord[0], smap_node->coord[1],
+				       smap_node->coord[2],
+				       default_y_offset,
+				       &smap_node->grid_xcord,
+				       &smap_node->grid_ycord, dim_size);
+		} else if (params.cluster_dims == 4) {
+			_calc_coord_4d(smap_node->coord[0], smap_node->coord[1],
+				       smap_node->coord[2], smap_node->coord[3],
+				       default_y_offset,
+				       &smap_node->grid_xcord,
+				       &smap_node->grid_ycord, dim_size);
+		}
+	}
 }
 
-/* print_grid - print values of every grid point */
-extern void print_grid(int dir)
+extern void clear_grid(void)
 {
-	int x;
-	int grid_xcord, grid_ycord = 2;
-	int y, z, offset = DIM_SIZE[Z];
-
-	for (y = DIM_SIZE[Y] - 1; y >= 0; y--) {
-		offset = DIM_SIZE[Z] + 1;
-		for (z = 0; z < DIM_SIZE[Z]; z++) {
-			grid_xcord = offset;
-
-			for (x = 0; x < DIM_SIZE[X]; x++) {
-				if (ba_system_ptr->grid[x][y][z].color)
-					init_pair(ba_system_ptr->
-						  grid[x][y][z].color,
-						  ba_system_ptr->
-						  grid[x][y][z].color,
-						  COLOR_BLACK);
-				else
-					init_pair(ba_system_ptr->
-						  grid[x][y][z].color,
-						  ba_system_ptr->
-						  grid[x][y][z].color,
-                                                  7);
-
-				wattron(grid_win,
-					COLOR_PAIR(ba_system_ptr->
-						   grid[x][y][z].color));
-
-				mvwprintw(grid_win,
-					  grid_ycord, grid_xcord, "%c",
-					  ba_system_ptr->grid[x][y][z].letter);
-				wattroff(grid_win,
-					 COLOR_PAIR(ba_system_ptr->
-						    grid[x][y][z].color));
-				grid_xcord++;
-			}
-			grid_ycord++;
-			offset--;
+	smap_node_t *smap_node;
+	int i;
+
+	if (!smap_system_ptr || !smap_system_ptr->grid)
+		return;
+
+	for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+		smap_node = smap_system_ptr->grid[i];
+		smap_node->color = COLOR_WHITE;
+		smap_node->letter = '.';
+	}
+}
+
+extern void free_grid(void)
+{
+	int i;
+
+	if (!smap_system_ptr)
+		return;
+
+	if (smap_system_ptr->grid) {
+		for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+			smap_node_t *smap_node = smap_system_ptr->grid[i];
+			xfree(smap_node->coord);
+			xfree(smap_node);
 		}
-		grid_ycord++;
+		xfree(smap_system_ptr->grid);
+	}
+	xfree(smap_system_ptr);
+}
+
+
+/* print_grid - print values of every grid point */
+extern void print_grid(void)
+{
+	int i;
+
+	if (!smap_system_ptr || !smap_system_ptr->grid)
+		return;
+
+	for (i = 0; i < smap_system_ptr->node_cnt; i++) {
+		if (smap_system_ptr->grid[i]->color)
+			init_pair(smap_system_ptr->grid[i]->color,
+				  smap_system_ptr->grid[i]->color, COLOR_BLACK);
+		else
+			init_pair(smap_system_ptr->grid[i]->color,
+				  smap_system_ptr->grid[i]->color, 7);
+		wattron(grid_win, COLOR_PAIR(smap_system_ptr->grid[i]->color));
+		mvwprintw(grid_win,
+			  smap_system_ptr->grid[i]->grid_ycord,
+			  smap_system_ptr->grid[i]->grid_xcord, "%c",
+			  smap_system_ptr->grid[i]->letter);
+		wattroff(grid_win, COLOR_PAIR(smap_system_ptr->grid[i]->color));
 	}
 	return;
 }
 
-bitstr_t *get_requested_node_bitmap()
+bitstr_t *get_requested_node_bitmap(void)
 {
 	static bitstr_t *bitmap = NULL;
 	static node_info_msg_t *old_node_ptr = NULL, *new_node_ptr;
@@ -197,22 +356,22 @@ bitstr_t *get_requested_node_bitmap()
 	int i = 0;
 	node_info_t *node_ptr = NULL;
 
-	if(!params.hl)
+	if (!params.hl)
 		return NULL;
 
 	if (old_node_ptr) {
-		error_code =
-			slurm_load_node(old_node_ptr->last_update,
-					&new_node_ptr, SHOW_ALL);
+		error_code = slurm_load_node(old_node_ptr->last_update,
+					     &new_node_ptr, SHOW_ALL);
 		if (error_code == SLURM_SUCCESS)
 			slurm_free_node_info_msg(old_node_ptr);
 		else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA)
 			return bitmap;
-	} else
+	} else {
 		error_code = slurm_load_node((time_t) NULL, &new_node_ptr,
 					     SHOW_ALL);
+	}
 
-	if(bitmap)
+	if (bitmap)
 		FREE_NULL_BITMAP(bitmap);
 
 	if (error_code) {
@@ -223,9 +382,9 @@ bitstr_t *get_requested_node_bitmap()
 	old_node_ptr = new_node_ptr;
 
 	bitmap = bit_alloc(old_node_ptr->record_count);
-	for(i=0; i<old_node_ptr->record_count; i++) {
+	for (i = 0; i < old_node_ptr->record_count; i++) {
 		node_ptr = &(old_node_ptr->node_array[i]);
-		if(hostlist_find(params.hl, node_ptr->name) != -1)
+		if (hostlist_find(params.hl, node_ptr->name) != -1)
 			bit_set(bitmap, i);
 	}
 	return bitmap;
diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c
index 9f4c2a438..3a5dd6308 100644
--- a/src/smap/job_functions.c
+++ b/src/smap/job_functions.c
@@ -2,14 +2,14 @@
  *  job_functions.c - Functions related to job display mode of smap.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,10 +60,10 @@ extern void get_job(void)
 	bitstr_t *nodes_req = NULL;
 	static uint16_t last_flags = 0;
 
-	if(params.all_flag)
+	if (params.all_flag)
 		show_flags |= SHOW_ALL;
 	if (job_info_ptr) {
-		if(show_flags != last_flags)
+		if (show_flags != last_flags)
 			job_info_ptr->last_update = 0;
 		error_code = slurm_load_jobs(job_info_ptr->last_update,
 					     &new_job_ptr, show_flags);
@@ -80,7 +80,7 @@ extern void get_job(void)
 	last_flags = show_flags;
 	if (error_code) {
 		if (quiet_flag != 1) {
-			if(!params.commandline) {
+			if (!params.commandline) {
 				mvwprintw(text_win,
 					  main_ycord, 1,
 					  "slurm_load_job: %s",
@@ -101,52 +101,44 @@ extern void get_job(void)
 	else
 		recs = 0;
 
-	if(!params.commandline)
-		if((text_line_cnt+printed_jobs) > count)
+	if (!params.commandline)
+		if ((text_line_cnt+printed_jobs) > count)
 			text_line_cnt--;
 	printed_jobs = 0;
 	count = 0;
 
-	if(params.hl)
+	if (params.hl)
 		nodes_req = get_requested_node_bitmap();
 	for (i = 0; i < recs; i++) {
 		job_ptr = &(new_job_ptr->job_array[i]);
-		if(!IS_JOB_PENDING(job_ptr) && !IS_JOB_RUNNING(job_ptr)
-		   && !IS_JOB_SUSPENDED(job_ptr)
-		   && !IS_JOB_COMPLETING(job_ptr))
+		if (!IS_JOB_PENDING(job_ptr)   && !IS_JOB_RUNNING(job_ptr) &&
+		    !IS_JOB_SUSPENDED(job_ptr) && !IS_JOB_COMPLETING(job_ptr))
 			continue;	/* job has completed */
-		if(nodes_req) {
+		if (nodes_req) {
 			int overlap = 0;
 			bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req));
 			inx2bitstr(loc_bitmap, job_ptr->node_inx);
 			overlap = bit_overlap(loc_bitmap, nodes_req);
 			FREE_NULL_BITMAP(loc_bitmap);
-			if(!overlap)
+			if (!overlap)
 				continue;
 		}
 
 		if (job_ptr->node_inx[0] != -1) {
-			if (((params.cluster_flags & CLUSTER_FLAG_BG) == 0) &&
-			    (params.cluster_dims == 3)) {
-				set_grid_inx2(job_ptr->nodes, count);
-			} else {
-				int j = 0;
-				job_ptr->num_nodes = 0;
-				while (job_ptr->node_inx[j] >= 0) {
-					job_ptr->num_nodes +=
-						(job_ptr->node_inx[j + 1] + 1) -
-						 job_ptr->node_inx[j];
-					set_grid_inx(job_ptr->node_inx[j],
-						     job_ptr->node_inx[j + 1],
-						     count);
-					j += 2;
-				}
+			int j = 0;
+			job_ptr->num_nodes = 0;
+			while (job_ptr->node_inx[j] >= 0) {
+				job_ptr->num_nodes +=
+					(job_ptr->node_inx[j + 1] + 1) -
+					 job_ptr->node_inx[j];
+				set_grid_inx(job_ptr->node_inx[j],
+					     job_ptr->node_inx[j + 1], count);
+				j += 2;
 			}
 
-			if(!params.commandline) {
-				if((count>=text_line_cnt)
-				   && (printed_jobs
-				       < (text_win->_maxy-3))) {
+			if (!params.commandline) {
+				if ((count >= text_line_cnt) &&
+				    (printed_jobs < (text_win->_maxy-3))) {
 					job_ptr->num_cpus =
 						(int)letters[count%62];
 					wattron(text_win,
@@ -162,8 +154,8 @@ extern void get_job(void)
 			}
 			count++;
 		}
-		if(count==128)
-			count=0;
+		if (count == 128)
+			count = 0;
 	}
 
 	for (i = 0; i < recs; i++) {
@@ -172,10 +164,9 @@ extern void get_job(void)
 		if (!IS_JOB_PENDING(job_ptr))
 			continue;	/* job has completed */
 
-		if(!params.commandline) {
-			if((count>=text_line_cnt)
-			   && (printed_jobs
-			       < (text_win->_maxy-3))) {
+		if (!params.commandline) {
+			if ((count>=text_line_cnt) &&
+			    (printed_jobs < (text_win->_maxy-3))) {
 				xfree(job_ptr->nodes);
 				job_ptr->nodes = xstrdup("waiting...");
 				job_ptr->num_cpus = (int) letters[count%62];
@@ -195,14 +186,14 @@ extern void get_job(void)
 		}
 		count++;
 
-		if(count==128)
-			count=0;
+		if (count == 128)
+			count = 0;
 	}
 
 	if (params.commandline && params.iterate)
 		printf("\n");
 
-	if(!params.commandline)
+	if (!params.commandline)
 		main_ycord++;
 
 	job_info_ptr = new_job_ptr;
@@ -211,7 +202,7 @@ extern void get_job(void)
 
 static void _print_header_job(void)
 {
-	if(!params.commandline) {
+	if (!params.commandline) {
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "ID");
 		main_xcord += 3;
@@ -221,12 +212,12 @@ static void _print_header_job(void)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "PARTITION");
 		main_xcord += 10;
-		if(params.cluster_flags & CLUSTER_FLAG_BG) {
+		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "BG_BLOCK");
 			main_xcord += 18;
 		}
-		if(params.cluster_flags & CLUSTER_FLAG_CRAYXT) {
+		if (params.cluster_flags & CLUSTER_FLAG_CRAYXT) {
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "RESV_ID");
 			main_xcord += 18;
@@ -246,7 +237,7 @@ static void _print_header_job(void)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "NODES");
 		main_xcord += 6;
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "BP_LIST");
 		else
@@ -257,19 +248,39 @@ static void _print_header_job(void)
 	} else {
 		printf("   JOBID ");
 		printf("PARTITION ");
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			printf("        BG_BLOCK ");
 		printf("    USER ");
 		printf("  NAME ");
 		printf("ST ");
 		printf("      TIME ");
 		printf("NODES ");
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			printf("BP_LIST\n");
 		else
 			printf("NODELIST\n");
 	}
 }
+static long _job_time_used(job_info_t * job_ptr)
+{
+	time_t end_time;
+
+	if ((job_ptr->start_time == 0) || IS_JOB_PENDING(job_ptr))
+		return 0L;
+
+	if (IS_JOB_SUSPENDED(job_ptr))
+		return (long) job_ptr->pre_sus_time;
+
+	if (IS_JOB_RUNNING(job_ptr) || (job_ptr->end_time == 0))
+		end_time = time(NULL);
+	else
+		end_time = job_ptr->end_time;
+
+	if (job_ptr->suspend_time)
+		return (long) (difftime(end_time, job_ptr->suspend_time)
+				+ job_ptr->pre_sus_time);
+	return (long) (difftime(end_time, job_ptr->start_time));
+}
 
 static int _print_text_job(job_info_t * job_ptr)
 {
@@ -283,16 +294,15 @@ static int _print_text_job(job_info_t * job_ptr)
 	char tmp_cnt[8];
 	uint32_t node_cnt = 0;
 	char *ionodes = NULL, *uname;
-	time_t now_time = time(NULL);
 
-	if(params.cluster_flags & CLUSTER_FLAG_BG) {
+	if (params.cluster_flags & CLUSTER_FLAG_BG) {
 		select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_IONODES,
 					    &ionodes);
 		select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_NODE_CNT,
 					    &node_cnt);
-		if(!strcasecmp(job_ptr->nodes,"waiting..."))
+		if (!strcasecmp(job_ptr->nodes,"waiting..."))
 			xfree(ionodes);
 	} else
 		node_cnt = job_ptr->num_nodes;
@@ -300,13 +310,13 @@ static int _print_text_job(job_info_t * job_ptr)
 	if ((node_cnt  == 0) || (node_cnt == NO_VAL))
 		node_cnt = _get_node_cnt(job_ptr);
 
-	if(params.cluster_flags & CLUSTER_FLAG_BG)
+	if (params.cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)node_cnt, tmp_cnt,
 				 sizeof(tmp_cnt), UNIT_NONE);
 	else
 		snprintf(tmp_cnt, sizeof(tmp_cnt), "%d", node_cnt);
 
-	if(!params.commandline) {
+	if (!params.commandline) {
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%c", job_ptr->num_cpus);
 		main_xcord += 3;
@@ -316,7 +326,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%.10s", job_ptr->partition);
 		main_xcord += 10;
-		if(params.cluster_flags & CLUSTER_FLAG_BG) {
+		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "%.16s",
 				  select_g_select_jobinfo_sprint(
@@ -326,7 +336,7 @@ static int _print_text_job(job_info_t * job_ptr)
 					  SELECT_PRINT_BG_ID));
 			main_xcord += 18;
 		}
-		if(params.cluster_flags & CLUSTER_FLAG_CRAYXT) {
+		if (params.cluster_flags & CLUSTER_FLAG_CRAYXT) {
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "%.16s",
 				  select_g_select_jobinfo_sprint(
@@ -347,10 +357,10 @@ static int _print_text_job(job_info_t * job_ptr)
 			  main_xcord, "%.2s",
 			  job_state_string_compact(job_ptr->job_state));
 		main_xcord += 2;
-		if(!strcasecmp(job_ptr->nodes,"waiting...")) {
+		if (!strcasecmp(job_ptr->nodes,"waiting...")) {
 			sprintf(time_buf,"00:00:00");
 		} else {
-			time_diff = now_time - job_ptr->start_time;
+			time_diff = (time_t) _job_time_used(job_ptr);
 			secs2time_str(time_diff, time_buf, sizeof(time_buf));
 		}
 		width = strlen(time_buf);
@@ -388,7 +398,7 @@ static int _print_text_job(job_info_t * job_ptr)
 			}
 			i++;
 		}
-		if(ionodes) {
+		if (ionodes) {
 			mvwprintw(text_win,
 				  main_ycord,
 				  main_xcord, "[%s]",
@@ -402,13 +412,13 @@ static int _print_text_job(job_info_t * job_ptr)
 	} else {
 		printf("%8d ", job_ptr->job_id);
 		printf("%9.9s ", job_ptr->partition);
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			printf("%16.16s ",
 			       select_g_select_jobinfo_sprint(
 				       job_ptr->select_jobinfo,
 				       time_buf, sizeof(time_buf),
 				       SELECT_PRINT_BG_ID));
-		if(params.cluster_flags & CLUSTER_FLAG_CRAYXT)
+		if (params.cluster_flags & CLUSTER_FLAG_CRAYXT)
 			printf("%16.16s ",
 			       select_g_select_jobinfo_sprint(
 				       job_ptr->select_jobinfo,
@@ -420,10 +430,10 @@ static int _print_text_job(job_info_t * job_ptr)
 		printf("%6.6s ", job_ptr->name);
 		printf("%2.2s ",
 		       job_state_string_compact(job_ptr->job_state));
-		if(!strcasecmp(job_ptr->nodes,"waiting...")) {
+		if (!strcasecmp(job_ptr->nodes,"waiting...")) {
 			sprintf(time_buf,"00:00:00");
 		} else {
-			time_diff = now_time - job_ptr->start_time;
+			time_diff = (time_t) _job_time_used(job_ptr);
 			secs2time_str(time_diff, time_buf, sizeof(time_buf));
 		}
 
@@ -432,7 +442,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		printf("%5s ", tmp_cnt);
 
 		printf("%s", job_ptr->nodes);
-		if(ionodes) {
+		if (ionodes) {
 			printf("[%s]", ionodes);
 			xfree(ionodes);
 		}
diff --git a/src/smap/opts.c b/src/smap/opts.c
index 11aee909e..fb86a4f41 100644
--- a/src/smap/opts.c
+++ b/src/smap/opts.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -55,6 +55,7 @@ extern void parse_command_line(int argc, char *argv[])
 
 	static struct option long_options[] = {
 		{"commandline", no_argument, 0, 'c'},
+		{"command", required_argument, 0, 'C'},
 		{"display", required_argument, 0, 'D'},
 		{"noheader", no_argument, 0, 'h'},
 		{"iterate", required_argument, 0, 'i'},
@@ -75,7 +76,7 @@ extern void parse_command_line(int argc, char *argv[])
 	memset(&params, 0, sizeof(params));
 
 	while ((opt_char =
-		getopt_long(argc, argv, "cD:hi:I:Hn:M:QR:vV",
+		getopt_long(argc, argv, "cC:D:hi:I:Hn:M:QR:vV",
 			    long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case '?':
@@ -86,6 +87,9 @@ extern void parse_command_line(int argc, char *argv[])
 		case 'c':
 			params.commandline = TRUE;
 			break;
+		case 'C':
+			params.command = xstrdup(optarg);
+			break;
 		case 'D':
 			if (!strcmp(optarg, "j"))
 				tmp = JOBS;
@@ -132,7 +136,10 @@ extern void parse_command_line(int argc, char *argv[])
 				list_destroy(params.clusters);
 			if(!(params.clusters =
 			     slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -170,12 +177,15 @@ extern void parse_command_line(int argc, char *argv[])
 			exit(0);
 		}
 	}
-	params.cluster_base = hostlist_get_base();
+
 	params.cluster_dims = slurmdb_setup_cluster_dims();
+	if (params.cluster_dims > 4)
+		fatal("smap is unable to support more than four dimensions");
+	params.cluster_base = hostlist_get_base(params.cluster_dims);
 	params.cluster_flags = slurmdb_setup_cluster_flags();
 }
 
-extern void print_date()
+extern void print_date(void)
 {
 	time_t now_time = time(NULL);
 
@@ -200,6 +210,51 @@ extern void clear_window(WINDOW *win)
 	wnoutrefresh(win);
 }
 
+extern char *resolve_mp(char *desc)
+{
+	char *ret_str = NULL;
+#if defined HAVE_BG_FILES
+	ba_mp_t *ba_mp = NULL;
+	int i;
+
+	if (!desc) {
+		ret_str = xstrdup("No Description given.\n");
+		goto fini;
+	}
+
+#ifdef HAVE_BG
+	bg_configure_ba_setup_wires();
+#endif
+	i = strlen(desc) - params.cluster_dims;
+	if (i < 0) {
+		ret_str = xstrdup_printf("Must enter %d coords to resolve.\n",
+					 params.cluster_dims);
+		goto fini;
+	}
+
+	if (desc[0] != 'R') {
+		ba_mp = bg_configure_str2ba_mp(desc+i);
+		if (ba_mp)
+			ret_str = xstrdup_printf("%s resolves to %s\n",
+						 ba_mp->coord_str, ba_mp->loc);
+		else
+			ret_str = xstrdup_printf("%s has no resolve\n", desc+i);
+	} else {
+		ba_mp = bg_configure_loc2ba_mp(desc);
+		if (ba_mp)
+			ret_str = xstrdup_printf("%s resolves to %s\n",
+						 desc, ba_mp->coord_str);
+		else
+			ret_str = xstrdup_printf("%s has no resolve.\n", desc);
+	}
+fini:
+#else
+	ret_str = xstrdup("Must be physically on a BlueGene system for support "
+			  "of resolve option.\n");
+#endif
+	return ret_str;
+}
+
 static void _usage(void)
 {
 #ifdef HAVE_BG
diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c
index 6ee360735..604b1abde 100644
--- a/src/smap/partition_functions.c
+++ b/src/smap/partition_functions.c
@@ -3,14 +3,14 @@
  *  mode of smap.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -42,43 +42,42 @@
 #include "src/smap/smap.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
-#include "src/plugins/select/bluegene/plugin/bluegene.h"
 
 #define _DEBUG 0
 
 typedef struct {
 	char *bg_block_name;
-	enum connection_type bg_conn_type;
-	enum node_use_type bg_node_use;
+	uint16_t bg_conn_type[HIGHEST_DIMENSIONS];
+	uint16_t bg_node_use;
 	char *bg_user_name;
-	char *ionodes;
+	char *ionode_str;
 	int job_running;
 	int letter_num;
 	List nodelist;
-	char *nodes;
-	int node_cnt;
+	char *mp_str;
+	int cnode_cnt;
 	bool printed;
 	int size;
 	char *slurm_part_name;
-	rm_partition_state_t state;
+	uint16_t state;
 } db2_block_info_t;
 
 static List block_list = NULL;
 
-static int _marknodes(db2_block_info_t *block_ptr, int count);
+static void _block_list_del(void *object);
+static int  _in_slurm_partition(List slurm_nodes, List bg_nodes);
+static int  _list_match_all(void *object, void *key);
+static int  _make_nodelist(char *nodes, List nodelist);
+static void _marknodes(db2_block_info_t *block_ptr, int count);
+static void _nodelist_del(void *object);
 static void _print_header_part(void);
+static int  _print_rest(db2_block_info_t *block_ptr);
 static int  _print_text_part(partition_info_t *part_ptr,
 			     db2_block_info_t *db2_info_ptr);
-static void _block_list_del(void *object);
-static void _nodelist_del(void *object);
-static int _list_match_all(void *object, void *key);
-static int _in_slurm_partition(List slurm_nodes, List bg_nodes);
-static int _print_rest(db2_block_info_t *block_ptr);
-static int _make_nodelist(char *nodes, List nodelist);
 
-extern void get_slurm_part()
+extern void get_slurm_part(void)
 {
-	int error_code, i, recs, count = 0;
+	int error_code, i, j, recs, count = 0;
 	static partition_info_msg_t *part_info_ptr = NULL;
 	static partition_info_msg_t *new_part_ptr = NULL;
 	partition_info_t part;
@@ -86,10 +85,10 @@ extern void get_slurm_part()
 	bitstr_t *nodes_req = NULL;
 	static uint16_t last_flags = 0;
 
-	if(params.all_flag)
+	if (params.all_flag)
 		show_flags |= SHOW_ALL;
 	if (part_info_ptr) {
-		if(show_flags != last_flags)
+		if (show_flags != last_flags)
 			part_info_ptr->last_update = 0;
 		error_code = slurm_load_partitions(part_info_ptr->last_update,
 						   &new_part_ptr, show_flags);
@@ -107,7 +106,7 @@ extern void get_slurm_part()
 	last_flags = show_flags;
 	if (error_code) {
 		if (quiet_flag != 1) {
-			if(!params.commandline) {
+			if (!params.commandline) {
 				mvwprintw(text_win,
 					  main_ycord, 1,
 					  "slurm_load_partitions: %s",
@@ -129,37 +128,32 @@ extern void get_slurm_part()
 	else
 		recs = 0;
 	if (!params.commandline)
-		if((recs - text_line_cnt) < (text_win->_maxy-3))
+		if ((recs - text_line_cnt) < (text_win->_maxy - 3))
 			text_line_cnt--;
 
-	if(params.hl)
+	if (params.hl)
 		nodes_req = get_requested_node_bitmap();
 	for (i = 0; i < recs; i++) {
 		part = new_part_ptr->partition_array[i];
 
-		if(nodes_req) {
+		if (nodes_req) {
 			int overlap = 0;
 			bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req));
 			inx2bitstr(loc_bitmap, part.node_inx);
 			overlap = bit_overlap(loc_bitmap, nodes_req);
 			FREE_NULL_BITMAP(loc_bitmap);
-			if(!overlap)
+			if (!overlap)
 				continue;
 		}
-		if (((params.cluster_flags & CLUSTER_FLAG_BG) == 0) &&
-		    (params.cluster_dims == 3)) {
-			set_grid_inx2(part.nodes, count);
-		} else {
-			int j = 0;
-			while (part.node_inx[j] >= 0) {
-				set_grid_inx(part.node_inx[j],
-					     part.node_inx[j + 1], count);
-				j += 2;
-			}
+		j = 0;
+		while (part.node_inx[j] >= 0) {
+			set_grid_inx(part.node_inx[j],
+				     part.node_inx[j + 1], count);
+			j += 2;
 		}
 
-		if(!params.commandline) {
-			if(i>=text_line_cnt) {
+		if (!params.commandline) {
+			if (i >= text_line_cnt) {
 				part.flags = (int) letters[count%62];
 				wattron(text_win,
 					COLOR_PAIR(colors[count%6]));
@@ -174,8 +168,8 @@ extern void get_slurm_part()
 		count++;
 
 	}
-	if(count==128)
-		count=0;
+	if (count == 128)
+		count = 0;
 	if (params.commandline && params.iterate)
 		printf("\n");
 
@@ -183,9 +177,9 @@ extern void get_slurm_part()
 	return;
 }
 
-extern void get_bg_part()
+extern void get_bg_part(void)
 {
-	int error_code, i, j, recs=0, count = 0, last_count = -1;
+	int error_code, i, recs=0, count = 0, last_count = -1;
 	static partition_info_msg_t *part_info_ptr = NULL;
 	static partition_info_msg_t *new_part_ptr = NULL;
 	static block_info_msg_t *bg_info_ptr = NULL;
@@ -198,7 +192,7 @@ extern void get_bg_part()
 	List nodelist = NULL;
 	bitstr_t *nodes_req = NULL;
 
-	if(!(params.cluster_flags & CLUSTER_FLAG_BG))
+	if (!(params.cluster_flags & CLUSTER_FLAG_BG))
 		return;
 
 	if (params.all_flag)
@@ -219,7 +213,7 @@ extern void get_bg_part()
 
 	if (error_code) {
 		if (quiet_flag != 1) {
-			if(!params.commandline) {
+			if (!params.commandline) {
 				mvwprintw(text_win,
 					  main_ycord, 1,
 					  "slurm_load_partitions: %s",
@@ -247,7 +241,7 @@ extern void get_bg_part()
 	}
 	if (error_code) {
 		if (quiet_flag != 1) {
-			if(!params.commandline) {
+			if (!params.commandline) {
 				mvwprintw(text_win,
 					  main_ycord, 1,
 					  "slurm_load_block: %s",
@@ -271,32 +265,31 @@ extern void get_bg_part()
 		}
 	}
 	if (!params.commandline)
-		if((new_bg_ptr->record_count - text_line_cnt)
+		if ((new_bg_ptr->record_count - text_line_cnt)
 		   < (text_win->_maxy-3))
 			text_line_cnt--;
-	if(params.hl)
+	if (params.hl)
 		nodes_req = get_requested_node_bitmap();
-	for (i=0; i<new_bg_ptr->record_count; i++) {
-		if(nodes_req) {
+	for (i = 0; i < new_bg_ptr->record_count; i++) {
+		if (nodes_req) {
 			int overlap = 0;
 			bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req));
 			inx2bitstr(loc_bitmap,
-				   new_bg_ptr->block_array[i].bp_inx);
+				   new_bg_ptr->block_array[i].mp_inx);
 			overlap = bit_overlap(loc_bitmap, nodes_req);
 			FREE_NULL_BITMAP(loc_bitmap);
-			if(!overlap)
+			if (!overlap)
 				continue;
 		}
-		if(params.io_bit && new_bg_ptr->block_array[i].ionodes) {
+		if (params.io_bit && new_bg_ptr->block_array[i].ionode_str) {
 			int overlap = 0;
 			bitstr_t *loc_bitmap =
 				bit_alloc(bit_size(params.io_bit));
 			inx2bitstr(loc_bitmap,
 				   new_bg_ptr->block_array[i].ionode_inx);
-			overlap = bit_overlap(loc_bitmap,
-					      params.io_bit);
+			overlap = bit_overlap(loc_bitmap, params.io_bit);
 			FREE_NULL_BITMAP(loc_bitmap);
-			if(!overlap)
+			if (!overlap)
 				continue;
 		}
 
@@ -304,26 +297,29 @@ extern void get_bg_part()
 
 		block_ptr->bg_block_name
 			= xstrdup(new_bg_ptr->block_array[i].bg_block_id);
-		block_ptr->nodes = xstrdup(new_bg_ptr->block_array[i].nodes);
+		block_ptr->mp_str = xstrdup(new_bg_ptr->block_array[i].mp_str);
 		block_ptr->nodelist = list_create(_nodelist_del);
-		_make_nodelist(block_ptr->nodes,block_ptr->nodelist);
+		_make_nodelist(block_ptr->mp_str, block_ptr->nodelist);
 
 		block_ptr->bg_user_name
 			= xstrdup(new_bg_ptr->block_array[i].owner_name);
 		block_ptr->state = new_bg_ptr->block_array[i].state;
-		block_ptr->bg_conn_type	= new_bg_ptr->block_array[i].conn_type;
-		if(params.cluster_flags & CLUSTER_FLAG_BGL)
+
+		memcpy(block_ptr->bg_conn_type,
+		       new_bg_ptr->block_array[i].conn_type,
+		       sizeof(block_ptr->bg_conn_type));
+
+		if (params.cluster_flags & CLUSTER_FLAG_BGL)
 			block_ptr->bg_node_use =
 				new_bg_ptr->block_array[i].node_use;
 
-		block_ptr->ionodes
-			= xstrdup(new_bg_ptr->block_array[i].ionodes);
-		block_ptr->node_cnt = new_bg_ptr->block_array[i].node_cnt;
+		block_ptr->ionode_str
+			= xstrdup(new_bg_ptr->block_array[i].ionode_str);
+		block_ptr->cnode_cnt = new_bg_ptr->block_array[i].cnode_cnt;
 
 		itr = list_iterator_create(block_list);
-		while ((found_block = (db2_block_info_t*)list_next(itr))
-		       != NULL) {
-			if(!strcmp(block_ptr->nodes, found_block->nodes)) {
+		while ((found_block = (db2_block_info_t*)list_next(itr))) {
+			if (!strcmp(block_ptr->mp_str, found_block->mp_str)) {
 				block_ptr->letter_num =
 					found_block->letter_num;
 				break;
@@ -331,13 +327,13 @@ extern void get_bg_part()
 		}
 		list_iterator_destroy(itr);
 
-		if(!found_block) {
+		if (!found_block) {
 			last_count++;
 			_marknodes(block_ptr, last_count);
 		}
 		block_ptr->job_running =
 			new_bg_ptr->block_array[i].job_running;
-		if(block_ptr->bg_conn_type >= SELECT_SMALL)
+		if (block_ptr->bg_conn_type[0] >= SELECT_SMALL)
 			block_ptr->size = 0;
 
 		list_append(block_list, block_ptr);
@@ -352,19 +348,18 @@ extern void get_bg_part()
 		recs = 0;
 
 	for (i = 0; i < recs; i++) {
-		j = 0;
 		part = new_part_ptr->partition_array[i];
 
 		if (!part.nodes || (part.nodes[0] == '\0'))
 			continue;	/* empty partition */
 		nodelist = list_create(_nodelist_del);
-		_make_nodelist(part.nodes,nodelist);
+		_make_nodelist(part.nodes, nodelist);
 
 		if (block_list) {
 			itr = list_iterator_create(block_list);
 			while ((block_ptr = (db2_block_info_t*)
 				list_next(itr)) != NULL) {
-				if(_in_slurm_partition(nodelist,
+				if (_in_slurm_partition(nodelist,
 						       block_ptr->nodelist)) {
 					block_ptr->slurm_part_name
 						= xstrdup(part.name);
@@ -383,13 +378,12 @@ extern void get_bg_part()
 			if (params.commandline)
 				block_ptr->printed = 1;
 			else {
-				if(count>=text_line_cnt)
+				if (count>=text_line_cnt)
 					block_ptr->printed = 1;
 			}
 			_print_rest(block_ptr);
 			count++;
 		}
-
 		list_iterator_destroy(itr);
 	}
 
@@ -402,75 +396,50 @@ extern void get_bg_part()
 	return;
 }
 
-static int _marknodes(db2_block_info_t *block_ptr, int count)
+static void _marknodes(db2_block_info_t *block_ptr, int count)
 {
-	int j=0;
+	int i, j = 0;
 	int start[params.cluster_dims];
 	int end[params.cluster_dims];
-	int number = 0;
-	char *p = '\0';
+	char *nodes = block_ptr->mp_str;
 
 	block_ptr->letter_num = count;
-	while (block_ptr->nodes[j] != '\0') {
-		if ((block_ptr->nodes[j] == '['
-		     || block_ptr->nodes[j] == ',')
-		    && (block_ptr->nodes[j+8] == ']'
-			|| block_ptr->nodes[j+8] == ',')
-		    && (block_ptr->nodes[j+4] == 'x'
-			|| block_ptr->nodes[j+4] == '-')) {
-			j++;
-
-			number = xstrntol(block_ptr->nodes + j, &p,
-					  params.cluster_dims,
-					  params.cluster_base);
-
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j += 4;
-			number = xstrntol(block_ptr->nodes + j, &p,
-					  params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, end, params.cluster_dims,
-				params.cluster_base);
-			j += 3;
-
-			if(block_ptr->state != RM_PARTITION_FREE)
+	while (nodes[j] != '\0') {
+		int mid = j   + params.cluster_dims + 1;
+		int fin = mid + params.cluster_dims + 1;
+		if (((nodes[j] == '[')   || (nodes[j] == ','))   &&
+		    ((nodes[mid] == 'x') || (nodes[mid] == '-')) &&
+		    ((nodes[fin] == ']') || (nodes[fin] == ','))) {
+			j++;	/* Skip leading '[' or ',' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
+			j++;	/* Skip middle 'x' or '-' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				end[i] = select_char2coord(nodes[j]);
+			if (block_ptr->state != BG_BLOCK_FREE) {
 				block_ptr->size += set_grid_bg(
 					start, end, count, 1);
-			else
+			} else {
 				block_ptr->size += set_grid_bg(
 					start, end, count, 0);
-			if(block_ptr->nodes[j] != ',')
+			}
+			if (nodes[j] != ',')
 				break;
-			j--;
-		} else if((block_ptr->nodes[j] >= '0'
-			   && block_ptr->nodes[j] <= '9')
-			  || (block_ptr->nodes[j] >= 'A'
-			      && block_ptr->nodes[j] <= 'Z')) {
-
-			number = xstrntol(block_ptr->nodes + j, &p,
-					  params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j+=3;
-			block_ptr->size += set_grid_bg(
-				start, start, count, 0);
-			if(block_ptr->nodes[j] != ',')
+		} else if (((nodes[j] >= '0') && (nodes[j] <= '9')) ||
+			   ((nodes[j] >= 'A') && (nodes[j] <= 'Z'))) {
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
+			block_ptr->size += set_grid_bg(start, start, count, 1);
+			if (nodes[j] != ',')
 				break;
-			j--;
-		}
-		j++;
+		} else
+			j++;
 	}
-	return SLURM_SUCCESS;
 }
 
 static void _print_header_part(void)
 {
-	if(!params.commandline) {
+	if (!params.commandline) {
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "ID");
 		main_xcord += 4;
@@ -507,8 +476,8 @@ static void _print_header_part(void)
 			mvwprintw(text_win,
 				  main_ycord,
 				  main_xcord, "CONN");
-			main_xcord += 7;
-			if(params.cluster_flags & CLUSTER_FLAG_BGL) {
+			main_xcord += 8;
+			if (params.cluster_flags & CLUSTER_FLAG_BGL) {
 				mvwprintw(text_win,
 					  main_ycord,
 					  main_xcord, "NODE_USE");
@@ -519,7 +488,7 @@ static void _print_header_part(void)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "NODES");
 		main_xcord += 7;
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			mvwprintw(text_win, main_ycord,
 				  main_xcord, "BP_LIST");
 		else
@@ -537,13 +506,13 @@ static void _print_header_part(void)
 			printf("STATE ");
 			printf("   JOBID ");
 			printf("    USER ");
-			printf(" CONN ");
-			if(params.cluster_flags & CLUSTER_FLAG_BGL)
+			printf("    CONN ");
+			if (params.cluster_flags & CLUSTER_FLAG_BGL)
 				printf(" NODE_USE ");
 		}
 
 		printf("NODES ");
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			printf("BP_LIST\n");
 		else
 			printf("NODELIST\n");
@@ -558,17 +527,17 @@ static int _print_text_part(partition_info_t *part_ptr,
 	int prefixlen;
 	int i = 0;
 	int width = 0;
-	char *nodes = NULL, time_buf[20];
+	char *nodes = NULL, time_buf[20], *conn_str = NULL;
 	char tmp_cnt[8];
 	char tmp_char[8];
 
-	if(params.cluster_flags & CLUSTER_FLAG_BG)
+	if (params.cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_nodes, tmp_cnt,
 				 sizeof(tmp_cnt), UNIT_NONE);
 	else
 		snprintf(tmp_cnt, sizeof(tmp_cnt), "%u", part_ptr->total_nodes);
 
-	if(!params.commandline) {
+	if (!params.commandline) {
 		mvwprintw(text_win,
 			  main_ycord,
 			  main_xcord, "%c",
@@ -632,7 +601,7 @@ static int _print_text_part(partition_info_t *part_ptr,
 						  db2_info_ptr->state));
 				main_xcord += 7;
 
-				if(db2_info_ptr->job_running > NO_JOB_RUNNING)
+				if (db2_info_ptr->job_running > NO_JOB_RUNNING)
 					snprintf(tmp_char, sizeof(tmp_char),
 						 "%d",
 						 db2_info_ptr->job_running);
@@ -652,14 +621,16 @@ static int _print_text_part(partition_info_t *part_ptr,
 					  db2_info_ptr->bg_user_name);
 				main_xcord += 9;
 
+				conn_str = conn_type_string_full(
+					db2_info_ptr->bg_conn_type);
 				mvwprintw(text_win,
 					  main_ycord,
-					  main_xcord, "%.5s",
-					  conn_type_string(
-						  db2_info_ptr->
-						  bg_conn_type));
-				main_xcord += 7;
-				if(params.cluster_flags & CLUSTER_FLAG_BGL) {
+					  main_xcord, "%.7s",
+					  conn_str);
+				xfree(conn_str);
+				main_xcord += 8;
+
+				if (params.cluster_flags & CLUSTER_FLAG_BGL) {
 					mvwprintw(text_win,
 						  main_ycord,
 						  main_xcord, "%.9s",
@@ -707,14 +678,14 @@ static int _print_text_part(partition_info_t *part_ptr,
 			nodes = part_ptr->allow_groups;
 		else
 			nodes = part_ptr->nodes;
-		i=0;
+		i = 0;
 		prefixlen = i;
 		while (nodes && nodes[i]) {
 			width = text_win->_maxx
 				- main_xcord;
 
-			if (!prefixlen && nodes[i] == '['
-			    && nodes[i - 1] == ',')
+			if (!prefixlen && (nodes[i] == '[') &&
+			    (nodes[i - 1] == ','))
 				prefixlen = i + 1;
 
 			if (nodes[i - 1] == ',' && (width - 12) <= 0) {
@@ -736,12 +707,12 @@ static int _print_text_part(partition_info_t *part_ptr,
 
 			i++;
 		}
-		if((params.display == BGPART) && db2_info_ptr
-		   && (db2_info_ptr->ionodes)) {
+		if ((params.display == BGPART) && db2_info_ptr &&
+		    (db2_info_ptr->ionode_str)) {
 			mvwprintw(text_win,
 				  main_ycord,
 				  main_xcord, "[%s]",
-				  db2_info_ptr->ionodes);
+				  db2_info_ptr->ionode_str);
 		}
 
 		main_xcord = 1;
@@ -785,7 +756,7 @@ static int _print_text_part(partition_info_t *part_ptr,
 				       bg_block_state_string(
 					       db2_info_ptr->state));
 
-				if(db2_info_ptr->job_running > NO_JOB_RUNNING)
+				if (db2_info_ptr->job_running > NO_JOB_RUNNING)
 					snprintf(tmp_char, sizeof(tmp_char),
 						 "%d",
 						 db2_info_ptr->job_running);
@@ -796,9 +767,12 @@ static int _print_text_part(partition_info_t *part_ptr,
 				printf("%8.8s ", tmp_char);
 				printf("%8.8s ", db2_info_ptr->bg_user_name);
 
-				printf("%5.5s ", conn_type_string(
-					       db2_info_ptr->bg_conn_type));
-				if(params.cluster_flags & CLUSTER_FLAG_BGL)
+				conn_str = conn_type_string_full(
+					db2_info_ptr->bg_conn_type);
+				printf("%8.8s ", conn_str);
+				xfree(conn_str);
+
+				if (params.cluster_flags & CLUSTER_FLAG_BGL)
 					printf("%9.9s ", node_use_string(
 						       db2_info_ptr->
 						       bg_node_use));
@@ -812,9 +786,9 @@ static int _print_text_part(partition_info_t *part_ptr,
 		else
 			nodes = part_ptr->nodes;
 
-		if((params.display == BGPART) && db2_info_ptr
-		   && (db2_info_ptr->ionodes)) {
-			printf("%s[%s]\n", nodes, db2_info_ptr->ionodes);
+		if ((params.display == BGPART) && db2_info_ptr &&
+		    (db2_info_ptr->ionode_str)) {
+			printf("%s[%s]\n", nodes, db2_info_ptr->ionode_str);
 		} else
 			printf("%s\n",nodes);
 	}
@@ -829,9 +803,9 @@ static void _block_list_del(void *object)
 		xfree(block_ptr->bg_user_name);
 		xfree(block_ptr->bg_block_name);
 		xfree(block_ptr->slurm_part_name);
-		xfree(block_ptr->nodes);
-		xfree(block_ptr->ionodes);
-		if(block_ptr->nodelist)
+		xfree(block_ptr->mp_str);
+		xfree(block_ptr->ionode_str);
+		if (block_ptr->nodelist)
 			list_destroy(block_ptr->nodelist);
 
 		xfree(block_ptr);
@@ -848,7 +822,6 @@ static void _nodelist_del(void *object)
 
 static int _list_match_all(void *object, void *key)
 {
-
 	return 1;
 }
 
@@ -858,7 +831,7 @@ static int _in_slurm_partition(List slurm_nodes, List bg_nodes)
 	ListIterator bg_itr;
 	int *coord = NULL;
 	int *slurm_coord = NULL;
-	int found = 0;
+	int found = 0, i;
 
 	bg_itr = list_iterator_create(bg_nodes);
 	slurm_itr = list_iterator_create(slurm_nodes);
@@ -866,21 +839,22 @@ static int _in_slurm_partition(List slurm_nodes, List bg_nodes)
 		list_iterator_reset(slurm_itr);
 		found = 0;
 		while ((slurm_coord = list_next(slurm_itr)) != NULL) {
-			if((coord[X] == slurm_coord[X])
-			   && (coord[Y] == slurm_coord[Y])
-			   && (coord[Z] == slurm_coord[Z])) {
-				found=1;
+			for (i = 0; i < params.cluster_dims; i++) {
+				if (coord[i] != slurm_coord[i])
+					break;
+			}
+			if (i >= params.cluster_dims) {
+				found = 1;
 				break;
 			}
 		}
-		if(!found) {
+		if (!found)
 			break;
-		}
 	}
 	list_iterator_destroy(slurm_itr);
 	list_iterator_destroy(bg_itr);
 
-	if(found)
+	if (found)
 		return 1;
 	else
 		return 0;
@@ -891,19 +865,19 @@ static int _print_rest(db2_block_info_t *block_ptr)
 {
 	partition_info_t part;
 
-	if(block_ptr->node_cnt == 0)
-		block_ptr->node_cnt = block_ptr->size;
-	part.total_nodes = block_ptr->node_cnt;
-	if(block_ptr->slurm_part_name)
+	if (block_ptr->cnode_cnt == 0)
+		block_ptr->cnode_cnt = block_ptr->size;
+	part.total_nodes = block_ptr->cnode_cnt;
+	if (block_ptr->slurm_part_name)
 		part.name = block_ptr->slurm_part_name;
 	else
 		part.name = "no part";
 
 	if (!block_ptr->printed)
 		return SLURM_SUCCESS;
-	part.allow_groups = block_ptr->nodes;
+	part.allow_groups = block_ptr->mp_str;
 	part.flags = (int) letters[block_ptr->letter_num%62];
-	if(!params.commandline) {
+	if (!params.commandline) {
 		wattron(text_win,
 			COLOR_PAIR(colors[block_ptr->letter_num%6]));
 		_print_text_part(&part, block_ptr);
@@ -915,85 +889,90 @@ static int _print_rest(db2_block_info_t *block_ptr)
 	return SLURM_SUCCESS;
 }
 
-static int _addto_nodelist(List nodelist, int *start, int *end)
+static int *_build_coord(int *current)
+{
+	int i;
+	int *coord = NULL;
+
+	coord = xmalloc(sizeof(int) * params.cluster_dims);
+	for (i = 0; i < params.cluster_dims; i++)
+		coord[i] = current[i];
+	return coord;
+}
+
+/* increment an array, return false if can't be incremented (reached limts) */
+static bool _incr_coord(int *start, int *end, int *current)
+{
+	int i;
+
+	for (i = 0; i < params.cluster_dims; i++) {
+		current[i]++;
+		if (current[i] <= end[i])
+			return true;
+		current[i] = start[i];
+	}
+	return false;
+}
+
+static void _addto_nodelist(List nodelist, int *start, int *end)
 {
 	int *coord = NULL;
-	int x,y,z;
-	if(end[X] >= DIM_SIZE[X]
-	   || end[Y] >= DIM_SIZE[Y]
-	   || end[Z] >= DIM_SIZE[Z]) {
+	int i;
+
+	coord = xmalloc(sizeof(int) * params.cluster_dims);
+	for (i = 0; i < params.cluster_dims; i++) {
+		xassert(start[i] >= 0);
+		coord[i] = start[i];
+		if (end[i] < dim_size[i])
+			continue;
 		fatal("It appears the slurm.conf file has changed since "
 		      "the last restart.\nThings are in an incompatible "
 		      "state, please restart the slurmctld.");
 	}
 
-	assert(start[X] >= 0);
-	assert(start[Y] >= 0);
-	assert(start[X] >= 0);
-
-	for (x = start[X]; x <= end[X]; x++) {
-		for (y = start[Y]; y <= end[Y]; y++) {
-			for (z = start[Z]; z <= end[Z]; z++) {
-				coord = xmalloc(sizeof(int)*3);
-				coord[X] = x;
-				coord[Y] = y;
-				coord[Z] = z;
-				list_append(nodelist, coord);
-			}
-		}
-	}
-	return 1;
+	do {
+		list_append(nodelist, _build_coord(coord));
+	} while (_incr_coord(start, end, coord));
+
+	xfree(coord);
 }
 
 static int _make_nodelist(char *nodes, List nodelist)
 {
-	int j = 0;
-	int number;
+	int i, j = 0;
 	int start[params.cluster_dims];
 	int end[params.cluster_dims];
-	char *p = '\0';
 
-	if(!nodelist)
+	if (!nodelist)
 		nodelist = list_create(_nodelist_del);
+
 	while (nodes[j] != '\0') {
-		if ((nodes[j] == '['
-		     || nodes[j] == ',')
-		    && (nodes[j+8] == ']'
-			|| nodes[j+8] == ',')
-		    && (nodes[j+4] == 'x'
-			|| nodes[j+4] == '-')) {
-			j++;
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j += 4;
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, end, params.cluster_dims,
-				params.cluster_base);
-			j += 3;
+		int mid = j   + params.cluster_dims + 1;
+		int fin = mid + params.cluster_dims + 1;
+		if (((nodes[j] == '[')   || (nodes[j] == ','))   &&
+		    ((nodes[mid] == 'x') || (nodes[mid] == '-')) &&
+		    ((nodes[fin] == ']') || (nodes[fin] == ','))) {
+			j++;	/* Skip leading '[' or ',' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
+			j++;	/* Skip middle 'x' or '-' */
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				end[i] = select_char2coord(nodes[j]);
 			_addto_nodelist(nodelist, start, end);
-			if(nodes[j] != ',')
+			if (nodes[j] != ',')
 				break;
 			j--;
-		} else if((nodes[j] >= '0' && nodes[j] <= '9')
-			  || (nodes[j] >= 'A' && nodes[j] <= 'Z')) {
-
-			number = xstrntol(nodes + j, &p, params.cluster_dims,
-					  params.cluster_base);
-			hostlist_parse_int_to_array(
-				number, start, params.cluster_dims,
-				params.cluster_base);
-			j+=3;
+		} else if (((nodes[j] >= '0') && (nodes[j] <= '9')) ||
+			   ((nodes[j] >= 'A') && (nodes[j] <= 'Z'))) {
+			for (i = 0; i < params.cluster_dims; i++, j++)
+				start[i] = select_char2coord(nodes[j]);
 			_addto_nodelist(nodelist, start, start);
-			if(nodes[j] != ',')
+			if (nodes[j] != ',')
 				break;
 			j--;
 		}
 		j++;
 	}
+
 	return 1;
 }
diff --git a/src/smap/reservation_functions.c b/src/smap/reservation_functions.c
index 2b9bf1725..2831ef72c 100644
--- a/src/smap/reservation_functions.c
+++ b/src/smap/reservation_functions.c
@@ -3,13 +3,13 @@
  *  of smap.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -69,7 +69,7 @@ extern void get_reservation(void)
 
 	if (error_code) {
 		if (quiet_flag != 1) {
-			if(!params.commandline) {
+			if (!params.commandline) {
 				mvwprintw(text_win,
 					  main_ycord, 1,
 					  "slurm_load_reservations: %s",
@@ -91,22 +91,22 @@ extern void get_reservation(void)
 		recs = 0;
 
 	if (!params.commandline) {
-		if((text_line_cnt+printed_resv) > count)
+		if ((text_line_cnt + printed_resv) > count)
 			text_line_cnt--;
 	}
 	printed_resv = 0;
 	count = 0;
-	if(params.hl)
+	if (params.hl)
 		nodes_req = get_requested_node_bitmap();
 	for (i = 0; i < recs; i++) {
 		resv = new_resv_ptr->reservation_array[i];
-		if(nodes_req) {
+		if (nodes_req) {
 			int overlap = 0;
 			bitstr_t *loc_bitmap = bit_alloc(bit_size(nodes_req));
 			inx2bitstr(loc_bitmap, resv.node_inx);
 			overlap = bit_overlap(loc_bitmap, nodes_req);
 			FREE_NULL_BITMAP(loc_bitmap);
-			if(!overlap)
+			if (!overlap)
 				continue;
 		}
 
@@ -116,21 +116,16 @@ extern void get_reservation(void)
 			active = 0;
 
 		if (active && (resv.node_inx[0] != -1)) {
-			if (((params.cluster_flags & CLUSTER_FLAG_BG) == 0) &&
-			    (params.cluster_dims == 3)) {
-				set_grid_inx2(resv.node_list, count);
-			} else {
-				int j = 0;
-				resv.node_cnt = 0;
-				while (resv.node_inx[j] >= 0) {
-					resv.node_cnt +=
-						(resv.node_inx[j + 1] + 1) -
-						 resv.node_inx[j];
-					set_grid_inx(resv.node_inx[j],
-						     resv.node_inx[j + 1],
-						     count);
-					j += 2;
-				}
+			int j = 0;
+			resv.node_cnt = 0;
+			while (resv.node_inx[j] >= 0) {
+				resv.node_cnt +=
+					(resv.node_inx[j + 1] + 1) -
+					 resv.node_inx[j];
+				set_grid_inx(resv.node_inx[j],
+					     resv.node_inx[j + 1],
+					     count);
+				j += 2;
 			}
 		}
 
@@ -153,8 +148,8 @@ extern void get_reservation(void)
 			}
 			count++;
 		}
-		if (count==128)
-			count=0;
+		if (count == 128)
+			count = 0;
 	}
 
 	if (params.commandline && params.iterate)
diff --git a/src/smap/smap.c b/src/smap/smap.c
index 146ae939a..b2e0ef04f 100644
--- a/src/smap/smap.c
+++ b/src/smap/smap.c
@@ -2,14 +2,15 @@
  *  smap.c - Report overall state the system
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Copyright (C) 2011 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>
+ *  Written by Danny Auble <da@schedmd.com>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -52,12 +53,16 @@ static int min_screen_width = 72;
 int text_line_cnt = 0;
 
 smap_parameters_t params;
+smap_system_t *smap_system_ptr;
 
 int quiet_flag = 0;
 int grid_line_cnt = 0;
 int max_display;
 int resize_screen = 0;
 
+int *dim_size = NULL;
+char letters[62];
+char colors[6];
 int main_xcord = 1;
 int main_ycord = 1;
 WINDOW *grid_win = NULL;
@@ -67,9 +72,11 @@ WINDOW *text_win = NULL;
  * Functions *
  ************/
 
-static int _get_option();
+static int  _get_option(void);
+static void _init_colors(void);
 static void *_resize_handler(int sig);
-static int _set_pairs();
+static int  _set_pairs(void);
+static void _smap_exit(int rc);
 
 int main(int argc, char *argv[])
 {
@@ -84,8 +91,6 @@ int main(int argc, char *argv[])
 	int end = 0;
 	int i;
 	int rc;
-	int mapset = 0;
-	//char *name;
 
 	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_DAEMON, NULL);
 	parse_command_line(argc, argv);
@@ -94,97 +99,69 @@ int main(int argc, char *argv[])
 		log_alter(opts, SYSLOG_FACILITY_USER, NULL);
 	}
 
-	if(params.cluster_dims == 4) {
-		/* FIX ME: smap doesn't do anything correctly with
-		   more than 3 dims yet.
-		*/
-	} else if(params.cluster_dims == 3)
+	if (params.cluster_dims == 4) {
+		min_screen_width = 92;
+	} else if (params.cluster_dims == 3)
 		min_screen_width = 92;
 
-	while (slurm_load_node((time_t) NULL, &new_node_ptr, SHOW_ALL)) {
-		error_code = slurm_get_errno();
-		printf("slurm_load_node: %s\n", slurm_strerror(error_code));
-		if (params.display == COMMANDS) {
-			new_node_ptr = NULL;
-			break;		/* just continue */
+	/* no need for this if you are resolving */
+	if (!params.resolve) {
+		while (slurm_load_node((time_t) NULL,
+				       &new_node_ptr, SHOW_ALL)) {
+			error_code = slurm_get_errno();
+			printf("slurm_load_node: %s\n",
+			       slurm_strerror(error_code));
+			if (params.display == COMMANDS) {
+				new_node_ptr = NULL;
+				break;		/* just continue */
+			}
+			if (params.iterate == 0)
+				exit(1);
+			sleep(10);	/* keep trying to reconnect */
 		}
-		if (params.iterate == 0)
-			exit(1);
-		sleep(10);	/* keep trying to reconnect */
 	}
 
-	ba_init(new_node_ptr, 0);
-
-	if(params.resolve) {
-
-#ifdef HAVE_BG_FILES
-		if (!have_db2) {
-			printf("Required libraries can not be found "
-			       "to access the Bluegene system.\nPlease "
-			       "set your LD_LIBRARY_PATH correctly to "
-			       "point to them.\n");
-			goto part_fini;
-		}
-
-		if(!mapset)
-			mapset = set_bp_map();
-		if(params.resolve[0] != 'R') {
-			i = strlen(params.resolve);
-			i -= 3;
-			if(i<0) {
-				printf("No real block was entered\n");
-				goto part_fini;
-			}
-			char *rack_mid = find_bp_rack_mid(params.resolve+i);
-			if(rack_mid)
-				printf("X=%c Y=%c Z=%c resolves to %s\n",
-				       params.resolve[X+i],
-				       params.resolve[Y+i],
-				       params.resolve[Z+i],
-				       rack_mid);
-			else
-				printf("X=%c Y=%c Z=%c has no resolve\n",
-				       params.resolve[X+i],
-				       params.resolve[Y+i],
-				       params.resolve[Z+i]);
+#ifdef HAVE_BG
+	bg_configure_ba_init(new_node_ptr, 0);
+#endif
+	if (dim_size == NULL) {
+		dim_size = get_cluster_dims(new_node_ptr);
+		if ((dim_size == NULL) || (dim_size[0] < 1))
+			fatal("Invalid system dimensions");
+	}
+	_init_colors();
 
-		} else {
-			uint16_t *coord = find_bp_loc(params.resolve);
-			if(coord)
-				printf("%s resolves to X=%d Y=%d Z=%d\n",
-				       params.resolve,
-				       coord[X], coord[Y], coord[Z]);
-			else
-				printf("%s has no resolve.\n",
-				       params.resolve);
+	if (params.resolve) {
+		char *ret_str = resolve_mp(params.resolve);
+		if (ret_str) {
+			printf("%s", ret_str);
+			xfree(ret_str);
 		}
-part_fini:
-#else
-		printf("Must be physically on a BG System to resolve.\n");
-#endif
-		ba_fini();
-		xfree(params.resolve);
-		exit(0);
+		_smap_exit(0);	/* Calls exit(), no return */
 	}
-	if(!params.commandline) {
+	if (!params.commandline) {
 		int check_width = min_screen_width;
+
+		init_grid(new_node_ptr);
+
 		signal(SIGWINCH, (void (*)(int))_resize_handler);
 		initscr();
 
-		if(params.cluster_dims == 4) {
-			/* FIX ME: smap doesn't do anything correctly with
-			   more than 3 dims yet.
-			*/
-		} else if(params.cluster_dims == 3) {
-			height = DIM_SIZE[Y] * DIM_SIZE[Z] + DIM_SIZE[Y] + 3;
-			width = DIM_SIZE[X] + DIM_SIZE[Z] + 3;
+		if (params.cluster_dims == 4) {
+			height = dim_size[2] * dim_size[3] + dim_size[2] + 3;
+			width = (dim_size[1] + dim_size[3] + 1) * dim_size[0]
+				 + 2;
+			check_width += width;
+		} else if (params.cluster_dims == 3) {
+			height = dim_size[1] * dim_size[2] + dim_size[1] + 3;
+			width = dim_size[0] + dim_size[2] + 3;
 			check_width += width;
 		} else {
 			height = 10;
 			width = COLS;
 		}
 
-	        if (COLS < check_width || LINES < height) {
+	        if ((COLS < check_width) || (LINES < height)) {
 			endwin();
 			error("Screen is too small make sure the screen "
 			      "is at least %dx%d\n"
@@ -193,28 +170,27 @@ part_fini:
 			      height,
 			      COLS,
 			      LINES);
-			ba_fini();
-			exit(1);
+			_smap_exit(1);	/* Calls exit(), no return */
 		}
 
 		raw();
 		keypad(stdscr, TRUE);
 		noecho();
 		cbreak();
-		curs_set(1);
+		curs_set(0);
 		nodelay(stdscr, TRUE);
 		start_color();
 		_set_pairs();
 
 		grid_win = newwin(height, width, starty, startx);
 		max_display = grid_win->_maxy * grid_win->_maxx;
-		//scrollok(grid_win, TRUE);
 
-		if(params.cluster_dims == 4) {
-			/* FIX ME: smap doesn't do anything correctly with
-			   more than 3 dims yet.
-			*/
-		} else if(params.cluster_dims == 3) {
+		if (params.cluster_dims == 4) {
+			startx = width;
+			COLS -= 2;
+			width = COLS - width;
+			height = LINES;
+		} else if (params.cluster_dims == 3) {
 			startx = width;
 			COLS -= 2;
 			width = COLS - width;
@@ -228,22 +204,21 @@ part_fini:
 		text_win = newwin(height, width, starty, startx);
         }
 	while (!end) {
-		if(!params.commandline) {
+		if (!params.commandline) {
 			_get_option();
-		redraw:
-
+redraw:
 			clear_window(text_win);
 			clear_window(grid_win);
-			move(0,0);
+			move(0, 0);
 
-			init_grid(new_node_ptr);
 			main_xcord = 1;
 			main_ycord = 1;
 		}
 
-		if(!params.no_header)
+		if (!params.no_header)
 			print_date();
 
+		clear_grid();
 		switch (params.display) {
 		case JOBS:
 			get_job();
@@ -255,43 +230,35 @@ part_fini:
 			get_slurm_part();
 			break;
 		case COMMANDS:
-			if(params.cluster_flags & CLUSTER_FLAG_BG) {
-				if(!mapset) {
-					mapset = set_bp_map();
-					wclear(text_win);
-					//doupdate();
-					//move(0,0);
-				}
-				get_command();
-			} else {
-				error("Must be on a BG SYSTEM to "
-				      "run this command");
-				if(!params.commandline)
-					endwin();
-				ba_fini();
-				exit(1);
-			}
+#ifdef HAVE_BG
+			wclear(text_win);
+			get_command();
+#else
+			error("Must be on a real BG SYSTEM to "
+			      "run this command");
+			if (!params.commandline)
+				endwin();
+			_smap_exit(1);	/* Calls exit(), no return */
+#endif
 			break;
 		case BGPART:
-			if(params.cluster_flags & CLUSTER_FLAG_BG)
+			if (params.cluster_flags & CLUSTER_FLAG_BG)
 				get_bg_part();
 			else {
 				error("Must be on a BG SYSTEM to "
 				      "run this command");
-				if(!params.commandline)
+				if (!params.commandline)
 					endwin();
-				ba_fini();
-				exit(1);
+				_smap_exit(1);	/* Calls exit(), no return */
 			}
 			break;
 		}
 
-		if(!params.commandline) {
-			//wscrl(grid_win,-1);
+		if (!params.commandline) {
 			box(text_win, 0, 0);
 			wnoutrefresh(text_win);
 
-			print_grid(grid_line_cnt * (grid_win->_maxx-1));
+			print_grid();
 			box(grid_win, 0, 0);
 			wnoutrefresh(grid_win);
 
@@ -316,7 +283,7 @@ part_fini:
 					&new_node_ptr, SHOW_ALL);
 			}
 			if (error_code && (quiet_flag != 1)) {
-				if(!params.commandline) {
+				if (!params.commandline) {
 					mvwprintw(
 						text_win,
 						main_ycord,
@@ -335,9 +302,8 @@ part_fini:
 
 		if (params.iterate) {
 			for (i = 0; i < params.iterate; i++) {
-
 				sleep(1);
-				if(!params.commandline) {
+				if (!params.commandline) {
 					if ((rc = _get_option()) == 1)
 						goto redraw;
 					else if (resize_screen) {
@@ -351,17 +317,58 @@ part_fini:
 
 	}
 
-	if(!params.commandline) {
+	if (!params.commandline) {
 		nodelay(stdscr, FALSE);
 		getch();
 		endwin();
 	}
-	ba_fini();
 
-	exit(0);
+	_smap_exit(0);	/* Calls exit(), no return */
+	exit(0);	/* Redundant, but eliminates compiler warning */
 }
 
-static int _get_option()
+static void _init_colors(void)
+{
+	int x, y, z;
+	/* make the letters array only contain letters upper and lower (62) */
+	y = 'A';
+	for (x = 0; x < 62; x++) {
+		if (y == '[')
+			y = 'a';
+		else if (y == '{')
+			y = '0';
+		else if (y == ':')
+			y = 'A';
+		letters[x] = y;
+		y++;
+	}
+
+	z = 1;
+	for (x = 0; x < 6; x++) {
+		if (z == 4)
+			z++;
+		colors[x] = z;
+		z++;
+	}
+}
+
+/* Variation of exit() that releases memory as needed for memory leak test */
+static void _smap_exit(int rc)
+{
+#ifdef MEMORY_LEAK_DEBUG
+	free_grid();
+
+#ifdef HAVE_BG
+	bg_configure_ba_fini();
+#endif
+
+#endif
+	if (!params.commandline)
+		curs_set(1);
+	exit(rc);
+}
+
+static int _get_option(void)
 {
 	int ch;
 
@@ -371,28 +378,26 @@ static int _get_option()
 	case '-':
 	case '_':
 		text_line_cnt++;
-	return 1;
-	break;
+		return 1;
+		break;
 	case KEY_LEFT:
 	case '=':
 	case '+':
 		text_line_cnt--;
-	if(text_line_cnt<0) {
-		text_line_cnt = 0;
-		return 0;
-
-	}
-	return 1;
-	break;
-
+		if (text_line_cnt < 0) {
+			text_line_cnt = 0;
+			return 0;
+		}
+		return 1;
+		break;
 	case 'H':
 	case 'h':
-		if(params.all_flag)
+		if (params.all_flag)
 			params.all_flag = 0;
 		else
 			params.all_flag = 1;
 	return 1;
-	break;
+		break;
 	case 's':
 		text_line_cnt = 0;
 		grid_line_cnt = 0;
@@ -412,7 +417,7 @@ static int _get_option()
 		return 1;
 		break;
 	case 'b':
-		if(params.cluster_flags & CLUSTER_FLAG_BG) {
+		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			text_line_cnt = 0;
 			grid_line_cnt = 0;
 			params.display = BGPART;
@@ -420,16 +425,16 @@ static int _get_option()
 		}
 		break;
 	case 'c':
-		if(params.cluster_flags & CLUSTER_FLAG_BG) {
+		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			params.display = COMMANDS;
 			return 1;
 		}
 		break;
 	case 'u':
 	case KEY_UP:
-		if(!(params.cluster_flags & CLUSTER_FLAG_BG)) {
+		if (!(params.cluster_flags & CLUSTER_FLAG_BG)) {
 			grid_line_cnt--;
-			if(grid_line_cnt<0) {
+			if (grid_line_cnt<0) {
 				grid_line_cnt = 0;
 				return 0;
 			}
@@ -438,29 +443,28 @@ static int _get_option()
 	break;
 	case 'd':
 	case KEY_DOWN:
-		if(!(params.cluster_flags & CLUSTER_FLAG_BG)) {
+		if (!(params.cluster_flags & CLUSTER_FLAG_BG)) {
 			grid_line_cnt++;
-			if((((grid_line_cnt-2) * (grid_win->_maxx-1)) +
-			    max_display) > DIM_SIZE[X]) {
+			if ((((grid_line_cnt-2) * (grid_win->_maxx-1)) +
+			    max_display) > dim_size[0]) {
 				grid_line_cnt--;
 				return 0;
 			}
 			return 1;
 		}
-	break;
+		break;
 	case 'q':
 	case '\n':
 		endwin();
-	ba_fini();
-	exit(0);
-	break;
+		_smap_exit(0);	/* Calls exit(), no return */
+		break;
 	}
 	return 0;
 }
 
 static void *_resize_handler(int sig)
 {
-	int startx=0, starty=0;
+	int startx = 0, starty = 0;
 	int height = 40, width = 100;
 	int check_width = min_screen_width;
 	main_ycord = 1;
@@ -473,19 +477,19 @@ static void *_resize_handler(int sig)
 	delwin(text_win);
 
 	endwin();
-	COLS=0;
-	LINES=0;
+	COLS = 0;
+	LINES = 0;
 	initscr();
 	doupdate();	/* update now to make sure we get the new size */
-	getmaxyx(stdscr,LINES,COLS);
-
-	if(params.cluster_dims == 4) {
-		/* FIX ME: smap doesn't do anything correctly with
-		   more than 3 dims yet.
-		*/
-	} else if(params.cluster_dims == 3) {
-		height = DIM_SIZE[Y] * DIM_SIZE[Z] + DIM_SIZE[Y] + 3;
-		width = DIM_SIZE[X] + DIM_SIZE[Z] + 3;
+	getmaxyx(stdscr, LINES, COLS);
+
+	if (params.cluster_dims == 4) {
+		height = dim_size[2] * dim_size[3] + dim_size[2] + 3;
+		width = (dim_size[1] + dim_size[3] + 1) * dim_size[0];
+		check_width += width;
+	} else if (params.cluster_dims == 3) {
+		height = dim_size[1] * dim_size[2] + dim_size[1] + 3;
+		width = dim_size[0] + dim_size[2] + 3;
 		check_width += width;
 	} else {
 		height = 10;
@@ -497,18 +501,18 @@ static void *_resize_handler(int sig)
 		error("Screen is too small make sure "
 		      "the screen is at least %dx%d\n"
 		      "Right now it is %dx%d\n", width, height, COLS, LINES);
-		ba_fini();
-		exit(0);
+		_smap_exit(0);	/* Calls exit(), no return */
 	}
 
 	grid_win = newwin(height, width, starty, startx);
 	max_display = grid_win->_maxy * grid_win->_maxx;
 
-	if(params.cluster_dims == 4) {
-		/* FIX ME: smap doesn't do anything correctly with
-		   more than 3 dims yet.
-		*/
-	} else if(params.cluster_dims == 3) {
+	if (params.cluster_dims == 4) {
+		startx = width;
+		COLS -= 2;
+		width = COLS - width;
+		height = LINES;
+	} else if (params.cluster_dims == 3) {
 		startx = width;
 		COLS -= 2;
 		width = COLS - width;
@@ -533,16 +537,17 @@ static void *_resize_handler(int sig)
 		get_slurm_part();
 		break;
 	case COMMANDS:
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
-			get_command();
+#ifdef HAVE_BG
+		get_command();
+#endif
 		break;
 	case BGPART:
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
+		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			get_bg_part();
 		break;
 	}
 
-	print_grid(grid_line_cnt * (grid_win->_maxx-1));
+	print_grid();
 	box(text_win, 0, 0);
 	box(grid_win, 0, 0);
 	wnoutrefresh(text_win);
@@ -552,7 +557,7 @@ static void *_resize_handler(int sig)
 	return NULL;
 }
 
-static int _set_pairs()
+static int _set_pairs(void)
 {
 	int x;
 
diff --git a/src/smap/smap.h b/src/smap/smap.h
index c7f99e4e3..35debf01e 100644
--- a/src/smap/smap.h
+++ b/src/smap/smap.h
@@ -2,13 +2,13 @@
  *  smap.h - definitions used for smap data functions
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -105,11 +105,15 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
-#include "src/common/slurmdb_defs.h"
-#include "src/plugins/select/bluegene/block_allocator/block_allocator.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurmdb_defs.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
 
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
+#include "src/plugins/select/bluegene/ba_common.h"
+#include "src/plugins/select/bluegene/configure_api.h"
 
 /* getopt_long options, integers but not characters */
 #define OPT_LONG_HELP	0x100
@@ -118,8 +122,6 @@
 
 enum { JOBS, RESERVATIONS, SLURMPART, BGPART, COMMANDS };
 
-//typedef void (*sighandler_t) (int);
-
 /* Input parameters */
 typedef struct {
 	bool all_flag;
@@ -128,6 +130,7 @@ typedef struct {
 	uint16_t cluster_dims;
 	uint32_t cluster_flags;
 	bool commandline;
+	char *command;
 	int display;
 	int iterate;
 	bitstr_t *io_bit;
@@ -137,9 +140,37 @@ typedef struct {
 	int verbose;
 } smap_parameters_t;
 
+/*
+ * smap_node_t: node within the allocation system.
+ */
+typedef struct {
+	/* coordinates of midplane */
+	uint16_t *coord;
+	/* coordinates on display screen */
+	int grid_xcord, grid_ycord;
+	/* color of letter used in smap */
+	int color;
+	/* midplane index used for easy look up of the miplane */
+	int index;
+	/* letter used in smap */
+	char letter;
+	int state;
+	/* set if using this midplane in a block */
+	uint16_t used;
+} smap_node_t;
+
+typedef struct {
+	int node_cnt;
+	smap_node_t **grid;
+} smap_system_t;
+
 extern WINDOW *grid_win;
 extern WINDOW *text_win;
 
+extern int *dim_size;
+extern char letters[62]; /* complete list of letters used in smap */
+extern char colors[6]; /* index into colors used for smap */
+
 extern int main_xcord;
 extern int main_ycord;
 
@@ -148,19 +179,22 @@ extern int text_line_cnt;
 
 extern void parse_command_line(int argc, char *argv[]);
 
-extern ba_system_t *ba_system_ptr;
+extern smap_system_t *smap_system_ptr;
 extern int quiet_flag;
 
 extern void init_grid(node_info_msg_t *node_info_ptr);
-extern int set_grid_inx(int start, int end, int count);
-extern int set_grid_inx2(char *node_names, int count);
+extern void clear_grid(void);
+extern void free_grid(void);
+extern int *get_cluster_dims(node_info_msg_t *node_info_ptr);
+extern void set_grid_inx(int start, int end, int count);
 extern int set_grid_bg(int *start, int *end, int count, int set);
-extern void print_grid(int dir);
-bitstr_t *get_requested_node_bitmap();
+extern void print_grid(void);
+bitstr_t *get_requested_node_bitmap(void);
 
 extern void parse_command_line(int argc, char *argv[]);
 extern void print_date(void);
 extern void clear_window(WINDOW *win);
+extern char *resolve_mp(char *desc);
 
 extern void get_slurm_part(void);
 extern void get_bg_part(void);
diff --git a/src/sprio/Makefile.in b/src/sprio/Makefile.in
index 02e785d33..8425bec24 100644
--- a/src/sprio/Makefile.in
+++ b/src/sprio/Makefile.in
@@ -67,6 +67,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sprio/opts.c b/src/sprio/opts.c
index ddfffe834..b5afc0a39 100644
--- a/src/sprio/opts.c
+++ b/src/sprio/opts.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -76,6 +76,22 @@ static void  _parse_token( char *token, char *field, int *field_size,
 static void  _print_options( void );
 static void  _usage( void );
 
+static void _opt_env(void)
+{
+	char *env_val;
+
+	if ((env_val = getenv("SLURM_CLUSTERS"))) {
+		if (!(params.clusters = slurmdb_get_info_cluster(env_val))) {
+			error("'%s' can't be reached now, "
+			      "or it is an invalid entry for "
+			      "SLURM_CLUSTERS.  Use 'sacctmgr --list "
+			      "cluster' to see available clusters.",
+			      env_val);
+			exit(1);
+		}
+	}
+}
+
 /*
  * parse_command_line
  */
@@ -102,6 +118,9 @@ parse_command_line( int argc, char* argv[] )
 		{NULL,         0,                 0, 0}
 	};
 
+	/* get defaults from environment */
+	_opt_env();
+
 	while((opt_char = getopt_long(argc, argv, "hj::lM:no:u:vVw",
 				      long_options, &option_index)) != -1) {
 		switch (opt_char) {
@@ -127,11 +146,13 @@ parse_command_line( int argc, char* argv[] )
 				list_destroy(params.clusters);
 			if(!(params.clusters =
 			     slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
-			working_cluster_rec = list_peek(params.clusters);
 			break;
 		case (int) 'n':
 			params.normalized = true;
@@ -175,8 +196,15 @@ parse_command_line( int argc, char* argv[] )
 		}
 	}
 
-	if ( params.verbose )
+	if (params.verbose)
 		_print_options();
+	if (params.clusters) {
+		if (list_count(params.clusters) > 1) {
+			fatal("Only one cluster can be used at a time with "
+			      "sprio");
+		}
+		working_cluster_rec = list_peek(params.clusters);
+	}
 }
 
 /*
diff --git a/src/sprio/print.c b/src/sprio/print.c
index c2aa270d9..75d6ede18 100644
--- a/src/sprio/print.c
+++ b/src/sprio/print.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -132,15 +132,15 @@ static int _print_str(char *str, int width, bool right, bool cut_output)
 	return printed;
 }
 
-int _print_int(int number, int width, bool right, bool cut_output)
+static int _print_int(double number, int width, bool right, bool cut_output)
 {
 	char buf[32];
 
-	snprintf(buf, 32, "%d", number);
+	snprintf(buf, 32, "%.0f", number);
 	return _print_str(buf, width, right, cut_output);
 }
 
-int _print_norm(double number, int width, bool right, bool cut_output)
+static int _print_norm(double number, int width, bool right, bool cut_output)
 {
 	char buf[32];
 
@@ -313,7 +313,7 @@ int _print_job_priority_weighted(priority_factors_object_t * job, int width,
 	else if (job == (priority_factors_object_t *) -1)
 		_print_str("", width, right, true);
 	else {
-		sprintf(temp, "%u", (uint32_t)_get_priority(job));
+		sprintf(temp, "%lld", (long long)_get_priority(job));
 		_print_str(temp, width, right, true);
 	}
 	if (suffix)
diff --git a/src/sprio/print.h b/src/sprio/print.h
index d842fa2fd..49c57cde6 100644
--- a/src/sprio/print.h
+++ b/src/sprio/print.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,7 @@
 #ifndef _SPRIO_PRINT_H_
 #define _SPRIO_PRINT_H_
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/list.h"
 
diff --git a/src/sprio/sprio.c b/src/sprio/sprio.c
index f176eeaf6..18a34ebaa 100644
--- a/src/sprio/sprio.c
+++ b/src/sprio/sprio.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -75,24 +75,12 @@ static int _get_info(priority_factors_request_msg_t *factors_req,
 
 int main (int argc, char *argv[])
 {
-	char *temp = NULL;
+	char *prio_type = NULL;
 	int error_code = SLURM_SUCCESS;
 	priority_factors_request_msg_t req_msg;
 	priority_factors_response_msg_t *resp_msg = NULL;
 	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
 
-	/* Check to see if we are running a supported accounting plugin */
-	temp = slurm_get_priority_type();
-	if(strcasecmp(temp, "priority/multifactor")) {
-		fprintf (stderr, "You are not running a supported "
-			 "priority plugin\n(%s).\n"
-			 "Only 'priority/multifactor' is supported.\n",
-			temp);
-		xfree(temp);
-		exit(1);
-	}
-	xfree(temp);
-
 	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_USER, NULL);
 
 	parse_command_line( argc, argv );
@@ -115,6 +103,7 @@ int main (int argc, char *argv[])
 		weight_js   = slurm_ctl_conf_ptr->priority_weight_js;
 		weight_part = slurm_ctl_conf_ptr->priority_weight_part;
 		weight_qos  = slurm_ctl_conf_ptr->priority_weight_qos;
+		prio_type   = xstrdup(slurm_ctl_conf_ptr->priority_type);
 		slurm_free_ctl_conf(slurm_ctl_conf_ptr);
 	} else {
 		weight_age  = slurm_get_priority_weight_age();
@@ -122,8 +111,20 @@ int main (int argc, char *argv[])
 		weight_js   = slurm_get_priority_weight_job_size();
 		weight_part = slurm_get_priority_weight_partition();
 		weight_qos  = slurm_get_priority_weight_qos();
+		prio_type   = slurm_get_priority_type();
 	}
 
+	/* Check to see if we are running a supported accounting plugin */
+	if (strcasecmp(prio_type, "priority/multifactor")) {
+		fprintf (stderr, "You are not running a supported "
+			 "priority plugin\n(%s).\n"
+			 "Only 'priority/multifactor' is supported.\n",
+			 prio_type);
+		exit(1);
+	}
+	xfree(prio_type);
+
+
 	memset(&req_msg, 0, sizeof(priority_factors_request_msg_t));
 
 	if (params.jobs)
diff --git a/src/sprio/sprio.h b/src/sprio/sprio.h
index dd744c2bc..d8ef2a315 100644
--- a/src/sprio/sprio.h
+++ b/src/sprio/sprio.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -59,7 +59,7 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
diff --git a/src/squeue/Makefile.in b/src/squeue/Makefile.in
index a1c32ac81..f398864ce 100644
--- a/src/squeue/Makefile.in
+++ b/src/squeue/Makefile.in
@@ -67,6 +67,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -123,7 +125,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -160,6 +165,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -217,6 +223,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -252,6 +259,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index f450845e8..2cf932645 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -112,6 +112,7 @@ parse_command_line( int argc, char* argv[] )
 		{"noheader",   no_argument,       0, 'h'},
 		{"partitions", required_argument, 0, 'p'},
 		{"qos",        required_argument, 0, 'q'},
+		{"reservation",required_argument, 0, 'R'},
 		{"sort",       required_argument, 0, 'S'},
 		{"start",      no_argument,       0, OPT_LONG_START},
 		{"steps",      optional_argument, 0, 's'},
@@ -130,7 +131,10 @@ parse_command_line( int argc, char* argv[] )
 		params.sort = xstrdup(env_val);
 	if ( ( env_val = getenv("SLURM_CLUSTERS") ) ) {
 		if (!(params.clusters = slurmdb_get_info_cluster(env_val))) {
-			error("'%s' invalid entry for SLURM_CLUSTERS",
+			error("'%s' can't be reached now, "
+			      "or it is an invalid entry for "
+			      "SLURM_CLUSTERS.  Use 'sacctmgr --list "
+			      "cluster' to see available clusters.",
 			      env_val);
 			exit(1);
 		}
@@ -138,7 +142,7 @@ parse_command_line( int argc, char* argv[] )
 	}
 
 	while ((opt_char = getopt_long(argc, argv,
-				       "A:ahi:j::ln:M:o:p:q:s::S:t:u:U:vV",
+				       "A:ahi:j::ln:M:o:p:q:R:s::S:t:u:U:vV",
 				       long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
@@ -182,7 +186,10 @@ parse_command_line( int argc, char* argv[] )
 				list_destroy(params.clusters);
 			if (!(params.clusters =
 			    slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -218,6 +225,10 @@ parse_command_line( int argc, char* argv[] )
 			params.qos_list =
 				_build_str_list( params.qoss );
 			break;
+		case (int) 'R':
+			xfree(params.reservation);
+			params.reservation = xstrdup(optarg);
+			break;
 		case (int) 's':
 			if (optarg) {
 				params.steps = xstrdup(optarg);
@@ -545,6 +556,11 @@ extern int parse_format( char* format )
 				job_format_add_gres( params.format_list,
 						     field_size, right_justify,
 						     suffix );
+			else if (field[0] == 'B')
+				job_format_add_batch_host( params.format_list,
+							   field_size,
+							   right_justify,
+							   suffix );
 			else if (field[0] == 'c')
 				job_format_add_min_cpus( params.format_list,
 							 field_size,
@@ -731,6 +747,10 @@ extern int parse_format( char* format )
 						      field_size,
 						      right_justify,
 						      suffix );
+			else if (field[0] == 'W')
+				job_format_add_licenses( params.format_list,
+						     field_size,
+						     right_justify, suffix );
 			else if (field[0] == 'x')
 				job_format_add_exc_nodes( params.format_list,
 							  field_size,
@@ -819,7 +839,7 @@ _parse_token( char *token, char *field, int *field_size, bool *right_justify,
 
 /* print the parameters specified */
 static void
-_print_options()
+_print_options(void)
 {
 	ListIterator iterator;
 	int i;
@@ -837,21 +857,22 @@ _print_options()
 		hostlist[0] = '\0';
 
 	printf( "-----------------------------\n" );
-	printf( "all        = %s\n", params.all_flag ? "true" : "false");
-	printf( "format     = %s\n", params.format );
-	printf( "iterate    = %d\n", params.iterate );
-	printf( "job_flag   = %d\n", params.job_flag );
-	printf( "jobs       = %s\n", params.jobs );
-	printf( "max_cpus   = %d\n", params.max_cpus ) ;
-	printf( "nodes      = %s\n", hostlist ) ;
-	printf( "partitions = %s\n", params.partitions ) ;
-	printf( "sort       = %s\n", params.sort ) ;
-	printf( "start_flag = %d\n", params.start_flag );
-	printf( "states     = %s\n", params.states ) ;
-	printf( "step_flag  = %d\n", params.step_flag );
-	printf( "steps      = %s\n", params.steps );
-	printf( "users      = %s\n", params.users );
-	printf( "verbose    = %d\n", params.verbose );
+	printf( "all         = %s\n", params.all_flag ? "true" : "false");
+	printf( "format      = %s\n", params.format );
+	printf( "iterate     = %d\n", params.iterate );
+	printf( "job_flag    = %d\n", params.job_flag );
+	printf( "jobs        = %s\n", params.jobs );
+	printf( "max_cpus    = %d\n", params.max_cpus ) ;
+	printf( "nodes       = %s\n", hostlist ) ;
+	printf( "partitions  = %s\n", params.partitions ) ;
+	printf( "reservation = %s\n", params.reservation ) ;
+	printf( "sort        = %s\n", params.sort ) ;
+	printf( "start_flag  = %d\n", params.start_flag );
+	printf( "states      = %s\n", params.states ) ;
+	printf( "step_flag   = %d\n", params.step_flag );
+	printf( "steps       = %s\n", params.steps );
+	printf( "users       = %s\n", params.users );
+	printf( "verbose     = %d\n", params.verbose );
 
 	if ((params.verbose > 1) && params.job_list) {
 		i = 0;
@@ -1100,7 +1121,7 @@ static void _usage(void)
 	printf("\
 Usage: squeue [-i seconds] [-S fields] [--start] [-t states]\n\
 	      [-p partitions] [-n node] [-o format] [-u user_name]\n\
-	      [--usage] [-ahjlsv]\n");
+	      [-R reservation] [--usage] [-ahjlsv]\n");
 }
 
 static void _help(void)
@@ -1126,6 +1147,7 @@ Usage: squeue [OPTIONS]\n\
 				  to view, default is all partitions\n\
   -q, --qos=qos(s)                comma separated list of qos's\n\
 				  to view, default is all qos's\n\
+  -R, --reservation=name          reservation to view, default is all\n\
   -s, --step=step(s)              comma separated list of job steps\n\
 				  to view, default is all\n\
   -S, --sort=fields               comma separated list of fields to sort on\n\
diff --git a/src/squeue/print.c b/src/squeue/print.c
index c74723b24..aa6f6d1da 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -291,6 +291,21 @@ job_format_add_function(List list, int width, bool right, char *suffix,
 	return SLURM_SUCCESS;
 }
 
+int _print_job_batch_host(job_info_t * job, int width, bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("EXEC_HOST", width, right, true);
+	else {
+		char *eh = job->batch_flag ? job->batch_host : job->alloc_node;
+		char id[FORMAT_STRING_SIZE];
+
+		snprintf(id, FORMAT_STRING_SIZE, "%s", eh ? eh : "n/a");
+		_print_str(id, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
 
 int _print_job_job_id(job_info_t * job, int width, bool right, char* suffix)
 {
@@ -357,6 +372,18 @@ int _print_job_name(job_info_t * job, int width, bool right, char* suffix)
 	return SLURM_SUCCESS;
 }
 
+int _print_job_licenses(job_info_t * job, int width, bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("LICENSES", width, right, true);
+	else
+		_print_str(job->licenses, width, right, true);
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_job_wckey(job_info_t * job, int width, bool right, char* suffix)
 {
 	if (job == NULL)	/* Print the Header instead */
@@ -589,8 +616,23 @@ int _print_job_nodes(job_info_t * job, int width, bool right, char* suffix)
 		if(params.cluster_flags & CLUSTER_FLAG_BG)
 			title = "BP_LIST";
 		_print_str(title, width, right, false);
-	} else
-		_print_nodes(job->nodes, width, right, false);
+	} else {
+		char *nodes = xstrdup(job->nodes);
+		char *ionodes = NULL;
+
+		if (nodes) {
+			select_g_select_jobinfo_get(job->select_jobinfo,
+						    SELECT_JOBDATA_IONODES,
+						    &ionodes);
+		}
+		if (ionodes) {
+			xstrfmtcat(nodes, "[%s]", ionodes);
+			xfree(ionodes);
+			_print_str(nodes, width, right, false);
+		} else
+			_print_nodes(nodes, width, right, false);
+		xfree(nodes);
+	}
 
 	if (suffix)
 		printf("%s", suffix);
@@ -620,10 +662,9 @@ int _print_job_reason_list(job_info_t * job, int width, bool right,
 		char *nodes = xstrdup(job->nodes);
 		char *ionodes = NULL;
 
-		if(params.cluster_flags & CLUSTER_FLAG_BG)
-			select_g_select_jobinfo_get(job->select_jobinfo,
-						    SELECT_JOBDATA_IONODES,
-						    &ionodes);
+		select_g_select_jobinfo_get(job->select_jobinfo,
+					    SELECT_JOBDATA_IONODES,
+					    &ionodes);
 		if(ionodes) {
 			xstrfmtcat(nodes, "[%s]", ionodes);
 			xfree(ionodes);
@@ -1271,8 +1312,24 @@ int _print_step_nodes(job_step_info_t * step, int width, bool right,
 			title = "BP_LIST";
 
 		_print_str(title, width, right, false);
-	} else
-		_print_nodes(step->nodes, width, right, false);
+	} else {
+		char *nodes = xstrdup(step->nodes);
+		char *ionodes = NULL;
+
+		if (nodes) {
+			select_g_select_jobinfo_get(step->select_jobinfo,
+						    SELECT_JOBDATA_IONODES,
+						    &ionodes);
+		}
+		if (ionodes) {
+			xstrfmtcat(nodes, "[%s]", ionodes);
+			xfree(ionodes);
+			_print_str(nodes, width, right, false);
+		} else
+			_print_nodes(nodes, width, right, false);
+		xfree(nodes);
+	}
+
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -1423,6 +1480,13 @@ static int _filter_job(job_info_t * job)
 			return 6;
 	}
 
+	if (params.reservation) {
+		if ((job->resv_name == NULL) ||
+		    (strcmp(job->resv_name, params.reservation))) {
+			return 7;
+		}
+	}
+
 	return 0;
 }
 
diff --git a/src/squeue/print.h b/src/squeue/print.h
index 3051074b2..1aa3c9d9f 100644
--- a/src/squeue/print.h
+++ b/src/squeue/print.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@
 #ifndef _SQUEUE_PRINT_H_
 #define _SQUEUE_PRINT_H_
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/list.h"
 
@@ -81,6 +81,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 			    char *suffix,
 			    int (*function) (job_info_t *, int, bool, char*));
 
+#define job_format_add_batch_host(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_batch_host)
 #define job_format_add_job_id(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_job_id)
 #define job_format_add_partition(list,wid,right,suffix) \
@@ -93,6 +95,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,prefix,_print_job_reason_list)
 #define job_format_add_name(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_name)
+#define job_format_add_licenses(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_licenses)
 #define job_format_add_wckey(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_wckey)
 #define job_format_add_user_name(list,wid,right,suffix) \
@@ -180,9 +184,9 @@ int job_format_add_function(List list, int width, bool right_justify,
 /*****************************************************************************
  * Job Line Print Functions
  *****************************************************************************/
-int _print_job_job_id(job_info_t * job, int width, bool right_justify,
+int _print_job_batch_host(job_info_t * job, int width, bool right_justify,
 			char* suffix);
-int _print_job_partition(job_info_t * job, int width, bool right_justify,
+int _print_job_job_id(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 int _print_job_prefix(job_info_t * job, int width, bool right_justify,
 			char* suffix);
@@ -192,6 +196,8 @@ int _print_job_reason_list(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 int _print_job_name(job_info_t * job, int width, bool right_justify,
 			char* suffix);
+int _print_job_licenses(job_info_t * job, int width, bool right_justify,
+			char* suffix);
 int _print_job_wckey(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 int _print_job_user_id(job_info_t * job, int width, bool right_justify,
diff --git a/src/squeue/sort.c b/src/squeue/sort.c
index 1eedb2e4a..210753974 100644
--- a/src/squeue/sort.c
+++ b/src/squeue/sort.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -51,6 +51,7 @@
 
 static bool reverse_order;
 
+static int _sort_job_by_batch_host(void *void1, void *void2);
 static int _sort_job_by_gres(void *void1, void *void2);
 static int _sort_job_by_group_id(void *void1, void *void2);
 static int _sort_job_by_group_name(void *void1, void *void2);
@@ -88,6 +89,8 @@ static int _sort_step_by_time_used(void *void1, void *void2);
 static int _sort_step_by_user_id(void *void1, void *void2);
 static int _sort_step_by_user_name(void *void1, void *void2);
 
+static time_t now;
+
 /*****************************************************************************
  * Global Print Functions
  *****************************************************************************/
@@ -95,6 +98,7 @@ static int _sort_step_by_user_name(void *void1, void *void2);
 void sort_job_list(List job_list)
 {
 	int i;
+	now = time(NULL);
 
 	if (params.sort == NULL)
 		params.sort = xstrdup("P,t,-p"); /* Partition,state,priority */
@@ -107,7 +111,9 @@ void sort_job_list(List job_list)
 		if ((i > 0) && (params.sort[i-1] == '-'))
 			reverse_order = true;
 
-		if      (params.sort[i] == 'b')
+		if      (params.sort[i] == 'B')
+			list_sort(job_list, _sort_job_by_batch_host);
+		else if (params.sort[i] == 'b')
 			list_sort(job_list, _sort_job_by_gres);
 		else if (params.sort[i] == 'c')
 			;	/* sort_job_by_min_cpus_per_node */
@@ -187,6 +193,7 @@ void sort_jobs_by_start_time (List jobs)
 void sort_step_list(List step_list)
 {
 	int i;
+	now = time(NULL);
 
 	if (params.sort == NULL)
 		params.sort = xstrdup("P,i");	/* Partition, step id */
@@ -222,6 +229,24 @@ void sort_step_list(List step_list)
 /*****************************************************************************
  * Local Job Sort Functions
  *****************************************************************************/
+static int _sort_job_by_batch_host(void *void1, void *void2)
+{
+	int diff;
+	job_info_t *job1 = (job_info_t *) void1;
+	job_info_t *job2 = (job_info_t *) void2;
+	char *val1 = "", *val2 = "";
+
+	if (job1->batch_host)
+		val1 = job1->batch_host;
+	if (job2->batch_host)
+		val2 = job2->batch_host;
+	diff = strcmp(val1, val2);
+
+	if (reverse_order)
+		diff = -diff;
+	return diff;
+}
+
 static int _sort_job_by_gres(void *void1, void *void2)
 {
 	int diff;
@@ -562,8 +587,6 @@ static int _sort_job_by_time_limit(void *void1, void *void2)
 
 static uint32_t _get_start_time(job_info_t *job)
 {
-	time_t now = time(NULL);
-
 	if (job->start_time == (time_t) 0)
 		return 0xffffffff;
 	if ((job->job_state == JOB_PENDING) && (job->start_time < now))
@@ -830,9 +853,8 @@ static int _sort_step_by_time_used(void *void1, void *void2)
 	int diff;
 	job_step_info_t *step1 = (job_step_info_t *) void1;
 	job_step_info_t *step2 = (job_step_info_t *) void2;
-	time_t now, used1, used2;
+	time_t used1, used2;
 
-	now = time(NULL);
 	used1 = difftime(now, step1->start_time);
 	used2 = difftime(now, step2->start_time);
 	diff = used1 - used2;
diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c
index a51517f67..6f89397ec 100644
--- a/src/squeue/squeue.c
+++ b/src/squeue/squeue.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -117,6 +117,8 @@ static int _multi_cluster(List clusters)
 	int rc = 0, rc2;
 
 	itr = list_iterator_create(clusters);
+	if (!itr)
+		fatal("list_iterator_create: malloc failure");
 	while ((working_cluster_rec = list_next(itr))) {
 		if (first)
 			first = false;
diff --git a/src/squeue/squeue.h b/src/squeue/squeue.h
index 1fe24ac10..0ccf99622 100644
--- a/src/squeue/squeue.h
+++ b/src/squeue/squeue.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -60,7 +60,7 @@
 #include <time.h>
 #include <unistd.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
@@ -95,6 +95,7 @@ struct squeue_parameters {
 	hostset_t nodes;
 	char* partitions;
 	char* qoss;
+	char* reservation;
 	char* sort;
 	char* states;
 	char* steps;
diff --git a/src/sreport/Makefile.in b/src/sreport/Makefile.in
index c50fd540d..8149ad48f 100644
--- a/src/sreport/Makefile.in
+++ b/src/sreport/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sreport/assoc_reports.c b/src/sreport/assoc_reports.c
index 550964277..55bd7c148 100644
--- a/src/sreport/assoc_reports.c
+++ b/src/sreport/assoc_reports.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/assoc_reports.h b/src/sreport/assoc_reports.h
index 472dba46d..67fcf6bfe 100644
--- a/src/sreport/assoc_reports.h
+++ b/src/sreport/assoc_reports.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index f58ed54c3..2f4a71358 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -80,7 +80,6 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 	int command_len = 0;
 	int local_cluster_flag = all_clusters_flag;
 	time_t start_time, end_time;
-	int option = 0;
 
 	if(!wckey_cond) {
 		error("No wckey_cond given");
@@ -99,8 +98,7 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -185,7 +183,6 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 	int local_cluster_flag = all_clusters_flag;
 	time_t start_time, end_time;
 	int command_len = 0;
-	int option = 0;
 
 	if(!assoc_cond) {
 		error("We need an slurmdb_association_cond to call this");
@@ -203,8 +200,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
@@ -288,7 +284,6 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 	int local_cluster_flag = all_clusters_flag;
 	time_t start_time, end_time;
 	int command_len = 0;
-	int option = 0;
 
 	if(!cluster_cond) {
 		error("We need an slurmdb_cluster_cond to call this");
@@ -306,8 +301,7 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sreport/cluster_reports.h b/src/sreport/cluster_reports.h
index 220e75056..dee5ee638 100644
--- a/src/sreport/cluster_reports.h
+++ b/src/sreport/cluster_reports.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/common.c b/src/sreport/common.c
index 88f9ac2df..cc9eaef1a 100644
--- a/src/sreport/common.c
+++ b/src/sreport/common.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 64e2cf1f4..c63246953 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -605,7 +605,6 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 
 	print_field_t *field = NULL;
 	print_field_t total_field;
-	uint32_t total_time = 0;
 	slurmdb_report_time_format_t temp_format;
 
 	List slurmdb_report_cluster_grouping_list = NULL;
@@ -660,7 +659,6 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
-	total_time = job_cond->usage_end - job_cond->usage_start;
 
 	header_list = list_create(NULL);
 	list_append_list(header_list, print_fields_list);
@@ -815,7 +813,6 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 
 	print_field_t *field = NULL;
 	print_field_t total_field;
-	uint32_t total_time = 0;
 	slurmdb_report_time_format_t temp_format;
 
 	List slurmdb_report_cluster_grouping_list = NULL;
@@ -870,7 +867,6 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
-	total_time = job_cond->usage_end - job_cond->usage_start;
 
 	header_list = list_create(NULL);
 	list_append_list(header_list, print_fields_list);
@@ -1026,7 +1022,6 @@ extern int job_sizes_grouped_by_top_acct_and_wckey(int argc, char *argv[])
 
 	print_field_t *field = NULL;
 	print_field_t total_field;
-	uint32_t total_time = 0;
 	slurmdb_report_time_format_t temp_format;
 
 	List slurmdb_report_cluster_grouping_list = NULL;
@@ -1081,7 +1076,6 @@ extern int job_sizes_grouped_by_top_acct_and_wckey(int argc, char *argv[])
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
-	total_time = job_cond->usage_end - job_cond->usage_start;
 
 	header_list = list_create(NULL);
 	list_append_list(header_list, print_fields_list);
diff --git a/src/sreport/job_reports.h b/src/sreport/job_reports.h
index c82936a03..39af5ba50 100644
--- a/src/sreport/job_reports.h
+++ b/src/sreport/job_reports.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/resv_reports.c b/src/sreport/resv_reports.c
index a200a949c..319839d89 100644
--- a/src/sreport/resv_reports.c
+++ b/src/sreport/resv_reports.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -78,7 +78,6 @@ static int _set_resv_cond(int *start, int argc, char *argv[],
 	int local_cluster_flag = all_clusters_flag;
 	time_t start_time, end_time;
 	int command_len = 0;
-	int option = 0;
 
 	if(!resv_cond) {
 		error("We need an slurmdb_reservation_cond to call this");
@@ -95,8 +94,7 @@ static int _set_resv_cond(int *start, int argc, char *argv[],
 			command_len=strlen(argv[i]);
 		else {
 			command_len=end-1;
-			if(argv[i][end] == '=') {
-				option = (int)argv[i][end-1];
+			if (argv[i][end] == '=') {
 				end++;
 			}
 		}
diff --git a/src/sreport/resv_reports.h b/src/sreport/resv_reports.h
index 5668a78d8..8464a9583 100644
--- a/src/sreport/resv_reports.h
+++ b/src/sreport/resv_reports.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index db623e373..6089d0d94 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index 18bbbf1d1..468a633ee 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -76,9 +76,8 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
-
-#include <slurm/slurmdb.h>
+#include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
 
 #include "src/common/jobacct_common.h"
 #include "src/common/parse_time.h"
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 410e247a4..cd54a7153 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sreport/user_reports.h b/src/sreport/user_reports.h
index 1ccd7ddab..d373a95d9 100644
--- a/src/sreport/user_reports.h
+++ b/src/sreport/user_reports.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/Makefile.in b/src/srun/Makefile.in
index 51fa56890..bf2924b6d 100644
--- a/src/srun/Makefile.in
+++ b/src/srun/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index 4b15519bd..2f65c7bc3 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -64,16 +64,9 @@
 
 #ifdef HAVE_BG
 #include "src/common/node_select.h"
-#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 #endif
 
-#ifdef HAVE_CRAY
-#include "src/common/node_select.h"
-#include "src/common/basil_resv_conf.h"
-#endif
-
-
 #define MAX_ALLOC_WAIT	60	/* seconds */
 #define MIN_ALLOC_WAIT	5	/* seconds */
 #define MAX_RETRIES	10
@@ -96,7 +89,6 @@ static int sig_array[] = {
 /*
  * Static Prototypes
  */
-static void  _intr_handler(int signo);
 static void _set_pending_job_id(uint32_t job_id);
 static void _signal_while_allocating(int signo);
 
@@ -108,11 +100,6 @@ static int _blocks_dealloc(void);
 static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc);
 #endif
 
-#ifdef HAVE_CRAY
-static int  _claim_reservation(resource_allocation_response_msg_t *alloc);
-#endif
-
-
 static sig_atomic_t destroy_job = 0;
 
 static void _set_pending_job_id(uint32_t job_id)
@@ -220,15 +207,6 @@ static bool _retry(void)
 	return true;
 }
 
-/*
- * SIGINT handler while waiting for resources to become available.
- */
-static void
-_intr_handler(int signo)
-{
-	destroy_job = 1;
-}
-
 #ifdef HAVE_BG
 /* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
 static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
@@ -314,8 +292,7 @@ static int _blocks_dealloc(void)
 		return -1;
 	}
 	for (i=0; i<new_bg_ptr->record_count; i++) {
-		if(new_bg_ptr->block_array[i].state
-		   == RM_PARTITION_DEALLOCATING) {
+		if(new_bg_ptr->block_array[i].state == BG_BLOCK_TERM) {
 			rc = 1;
 			break;
 		}
@@ -379,25 +356,6 @@ static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc)
 }
 #endif	/* HAVE_BG */
 
-#ifdef HAVE_CRAY
-/* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
-static int _claim_reservation(resource_allocation_response_msg_t *alloc)
-{
-	int rc = 0;
-	uint32_t resv_id = 0;
-
-	select_g_select_jobinfo_get(alloc->select_jobinfo,
-				    SELECT_JOBDATA_RESV_ID, &resv_id);
-	if (!resv_id)
-		return rc;
-	if (basil_resv_conf(resv_id, alloc->job_id) == SLURM_SUCCESS)
-		rc = 1;
-
-	return rc;
-}
-#endif
-
-
 int
 allocate_test(void)
 {
@@ -434,6 +392,7 @@ allocate_nodes(void)
 	callbacks.ping = _ping_handler;
 	callbacks.timeout = _timeout_handler;
 	callbacks.job_complete = _job_complete_handler;
+	callbacks.job_suspend = NULL;
 	callbacks.user_msg = _user_msg_handler;
 	callbacks.node_fail = _node_fail_handler;
 
@@ -476,14 +435,6 @@ allocate_nodes(void)
 				      "boot of the nodes.");
 			goto relinquish;
 		}
-#endif
-#ifdef HAVE_CRAY
-		if (!_claim_reservation(resp)) {
-			if(!destroy_job)
-				error("Something is wrong with the ALPS "
-				      "resource reservation.");
-			goto relinquish;
-		}
 #endif
 	} else if (destroy_job) {
 		goto relinquish;
@@ -585,6 +536,7 @@ job_desc_msg_create_from_opts (void)
 {
 	job_desc_msg_t *j = xmalloc(sizeof(*j));
 	hostlist_t hl = NULL;
+	int i;
 
 	slurm_init_job_desc_msg(j);
 
@@ -687,15 +639,16 @@ job_desc_msg_create_from_opts (void)
 		j->job_id	= opt.jobid;
 #ifdef HAVE_BG
 	if (opt.geometry[0] > 0) {
-		int i;
 		for (i=0; i<SYSTEM_DIMENSIONS; i++)
 			j->geometry[i] = opt.geometry[i];
 	}
 #endif
 
-	if (opt.conn_type != (uint16_t) NO_VAL)
-		j->conn_type = opt.conn_type;
-
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t)NO_VAL)
+			break;
+		j->conn_type[i] = opt.conn_type[i];
+	}
 	if (opt.reboot)
 		j->reboot = 1;
 	if (opt.no_rotate)
@@ -729,8 +682,10 @@ job_desc_msg_create_from_opts (void)
 	if (opt.overcommit) {
 		j->min_cpus    = opt.min_nodes;
 		j->overcommit  = opt.overcommit;
-	} else
+	} else if (opt.cpus_set)
 		j->min_cpus    = opt.ntasks * opt.cpus_per_task;
+	else
+		j->min_cpus    = opt.ntasks;
 	if (opt.ntasks_set)
 		j->num_tasks   = opt.ntasks;
 
@@ -750,6 +705,11 @@ job_desc_msg_create_from_opts (void)
 	if (opt.warn_time)
 		j->warn_time = opt.warn_time;
 
+	if (opt.req_switch >= 0)
+		j->req_switch = opt.req_switch;
+	if (opt.wait4switch >= 0)
+		j->wait4switch = opt.wait4switch;
+
 	/* srun uses the same listening port for the allocation response
 	 * message as all other messages */
 	j->alloc_resp_port = slurmctld_comm_addr.port;
@@ -778,7 +738,6 @@ extern int
 create_job_step(srun_job_t *job, bool use_all_cpus)
 {
 	int i, rc;
-	SigFunc *oquitf = NULL, *ointf = NULL, *otermf = NULL;
 	unsigned long my_sleep = 0;
 	time_t begin_time;
 
@@ -797,7 +756,8 @@ create_job_step(srun_job_t *job, bool use_all_cpus)
 		       opt.min_nodes, opt.max_nodes);
 		return -1;
 	}
-#ifndef HAVE_FRONT_END
+#if !defined HAVE_FRONT_END || (defined HAVE_BGQ)
+//#if !defined HAVE_FRONT_END || (defined HAVE_BGQ && defined HAVE_BG_FILES)
 	if (opt.min_nodes && (opt.min_nodes > job->nhosts)) {
 		error ("Minimum node count > allocated node count (%d > %d)",
 		       opt.min_nodes, job->nhosts);
@@ -817,19 +777,23 @@ create_job_step(srun_job_t *job, bool use_all_cpus)
 
 	if (opt.mem_per_cpu != NO_VAL)
 		job->ctx_params.mem_per_cpu = opt.mem_per_cpu;
-	job->ctx_params.gres = opt.gres;
+	if (opt.gres)
+		job->ctx_params.gres = opt.gres;
+	else
+		job->ctx_params.gres = getenv("SLURM_STEP_GRES");
 
 	if (use_all_cpus)
 		job->ctx_params.cpu_count = job->cpu_count;
 	else if (opt.overcommit)
 		job->ctx_params.cpu_count = job->ctx_params.min_nodes;
+	else if (opt.cpus_set)
+		job->ctx_params.cpu_count = opt.ntasks * opt.cpus_per_task;
 	else
-		job->ctx_params.cpu_count = opt.ntasks*opt.cpus_per_task;
+		job->ctx_params.cpu_count = opt.ntasks;
 
 	job->ctx_params.relative = (uint16_t)opt.relative;
 	job->ctx_params.ckpt_interval = (uint16_t)opt.ckpt_interval;
 	job->ctx_params.ckpt_dir = opt.ckpt_dir;
-	job->ctx_params.gres = opt.gres;
 	job->ctx_params.exclusive = (uint16_t)opt.exclusive;
 	if (opt.immediate == 1)
 		job->ctx_params.immediate = (uint16_t)opt.immediate;
@@ -915,9 +879,10 @@ create_job_step(srun_job_t *job, bool use_all_cpus)
 				info("Job step creation temporarily disabled, "
 				     "retrying");
 			}
-			ointf  = xsignal(SIGINT,  _intr_handler);
-			otermf  = xsignal(SIGTERM, _intr_handler);
-			oquitf  = xsignal(SIGQUIT, _intr_handler);
+			xsignal_unblock(sig_array);
+			for (i = 0; sig_array[i]; i++)
+				xsignal(sig_array[i], _signal_while_allocating);
+
 			my_sleep = (getpid() % 1000) * 100 + 100000;
 		} else {
 			verbose("Job step creation still disabled, retrying");
@@ -925,11 +890,13 @@ create_job_step(srun_job_t *job, bool use_all_cpus)
 		}
 		/* sleep 0.1 to 29 secs with exponential back-off */
 		usleep(my_sleep);
+		if (destroy_job) {
+			/* cancelled by signal */
+			break;
+		}
 	}
 	if (i > 0) {
-		xsignal(SIGINT,  ointf);
-		xsignal(SIGQUIT, oquitf);
-		xsignal(SIGTERM, otermf);
+		xsignal_block(sig_array);
 		if (destroy_job) {
 			info("Cancelled pending job step");
 			return -1;
diff --git a/src/srun/allocate.h b/src/srun/allocate.h
index 8e3a2b2d6..c8002d9d8 100644
--- a/src/srun/allocate.h
+++ b/src/srun/allocate.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@
 #ifndef _HAVE_ALLOCATE_H
 #define _HAVE_ALLOCATE_H
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/srun/srun_job.h"
 
diff --git a/src/srun/debugger.c b/src/srun/debugger.c
index 4b97fdd89..81ec9d9bc 100644
--- a/src/srun/debugger.c
+++ b/src/srun/debugger.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/debugger.h b/src/srun/debugger.h
index f44ef511a..b6eebd10b 100644
--- a/src/srun/debugger.h
+++ b/src/srun/debugger.h
@@ -48,12 +48,12 @@
  * them, and will be confused if you change them.
  */
 
-#ifdef HAVE_BG_FILES
+#if defined HAVE_BG_FILES
 /* On bluegene systems the below structure is defined here.  So as to
  * not confict with allocate.c including this file we will just use the
  * definition there instead of defining it here.
  */
-# include "src/plugins/select/bluegene/wrap_rm_api.h"
+# include "src/plugins/select/bluegene/bg_enums.h"
 #else
 typedef struct {
   char * host_name;           /* Something we can pass to inet_addr */
diff --git a/src/srun/fname.c b/src/srun/fname.c
index 440935ebd..a25404973 100644
--- a/src/srun/fname.c
+++ b/src/srun/fname.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/fname.h b/src/srun/fname.h
index 0d0023d67..67b1a3dba 100644
--- a/src/srun/fname.h
+++ b/src/srun/fname.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/multi_prog.c b/src/srun/multi_prog.c
index b65fe0b6f..8965b714b 100644
--- a/src/srun/multi_prog.c
+++ b/src/srun/multi_prog.c
@@ -14,7 +14,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/multi_prog.h b/src/srun/multi_prog.h
index 14cedbc6b..5ffb58c4d 100644
--- a/src/srun/multi_prog.h
+++ b/src/srun/multi_prog.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 97d563f79..d3702a3e3 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -181,6 +181,8 @@
 #define LONG_OPT_DEBUG_SLURMD    0x14f
 #define LONG_OPT_TIME_MIN        0x150
 #define LONG_OPT_GRES            0x151
+#define LONG_OPT_ALPS            0x152
+#define LONG_OPT_REQ_SWITCH      0x153
 
 extern char **environ;
 
@@ -191,6 +193,9 @@ int error_exit = 1;
 int immediate_exit = 1;
 
 /*---- forward declarations of static functions  ----*/
+#if defined HAVE_BG_FILES && HAVE_BGQ
+static const char *runjob_loc = "/bgsys/drivers/ppcfloor/hlcs/bin/runjob";
+#endif
 
 typedef struct env_vars env_vars_t;
 
@@ -321,7 +326,7 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
-	opt.cpus_per_task = 1;
+	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
 	opt.max_nodes = 0;
@@ -377,7 +382,7 @@ static void _opt_default()
 	opt.shared = (uint16_t)NO_VAL;
 	opt.exclusive = false;
 	opt.no_kill = false;
-	opt.kill_bad_exit = false;
+	opt.kill_bad_exit = NO_VAL;
 
 	opt.immediate	= 0;
 
@@ -411,11 +416,12 @@ static void _opt_default()
 	/* Default launch msg timeout           */
 	opt.msg_timeout     = slurm_get_msg_timeout();
 
-	for (i=0; i<SYSTEM_DIMENSIONS; i++)
+	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+		opt.conn_type[i]    = (uint16_t) NO_VAL;
 		opt.geometry[i]	    = (uint16_t) NO_VAL;
+	}
 	opt.reboot          = false;
 	opt.no_rotate	    = false;
-	opt.conn_type	    = (uint16_t) NO_VAL;
 	opt.blrtsimage = NULL;
 	opt.linuximage = NULL;
 	opt.mloaderimage = NULL;
@@ -448,6 +454,8 @@ static void _opt_default()
 	opt.acctg_freq = -1;
 	opt.reservation = NULL;
 	opt.wckey = NULL;
+	opt.req_switch = -1;
+	opt.wait4switch = -1;
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -485,6 +493,7 @@ env_vars_t env_vars[] = {
 {"SLURM_EPILOG",        OPT_STRING,     &opt.epilog,        NULL             },
 {"SLURM_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL             },
 {"SLURM_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL             },
+{"SLURM_GRES",          OPT_STRING,     &opt.gres,          NULL             },
 {"SLURM_IMMEDIATE",     OPT_IMMEDIATE,  NULL,               NULL             },
 {"SLURM_IOLOAD_IMAGE",  OPT_STRING,     &opt.ramdiskimage,  NULL             },
 /* SLURM_JOBID was used in slurm version 1.3 and below, it is now vestigial */
@@ -495,6 +504,8 @@ env_vars_t env_vars[] = {
 {"SLURM_LABELIO",       OPT_INT,        &opt.labelio,       NULL             },
 {"SLURM_LINUX_IMAGE",   OPT_STRING,     &opt.linuximage,    NULL             },
 {"SLURM_MEM_BIND",      OPT_MEM_BIND,   NULL,               NULL             },
+{"SLURM_MEM_PER_CPU",	OPT_INT,	&opt.mem_per_cpu,   NULL             },
+{"SLURM_MEM_PER_NODE",	OPT_INT,	&opt.pn_min_memory, NULL             },
 {"SLURM_MLOADER_IMAGE", OPT_STRING,     &opt.mloaderimage,  NULL             },
 {"SLURM_MPI_TYPE",      OPT_MPI,        NULL,               NULL             },
 {"SLURM_NCORES_PER_SOCKET",OPT_NCORES,  NULL,               NULL             },
@@ -529,6 +540,8 @@ env_vars_t env_vars[] = {
 {"SLURM_WAIT",          OPT_INT,        &opt.max_wait,      NULL             },
 {"SLURM_WCKEY",         OPT_STRING,     &opt.wckey,         NULL             },
 {"SLURM_WORKING_DIR",   OPT_STRING,     &opt.cwd,           &opt.cwd_set     },
+{"SLURM_REQ_SWITCH",    OPT_INT,        &opt.req_switch,    NULL             },
+{"SLURM_WAIT4SWITCH",   OPT_INT,        &opt.wait4switch,   NULL             },
 {NULL, 0, NULL, NULL}
 };
 
@@ -638,7 +651,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_CONN_TYPE:
-		opt.conn_type = verify_conn_type(val);
+		verify_conn_type(val, opt.conn_type);
 		break;
 
 	case OPT_NO_ROTATE:
@@ -709,7 +722,7 @@ _get_int(const char *arg, const char *what, bool positive)
 
 static void set_options(const int argc, char **argv)
 {
-	int opt_char, option_index = 0, max_val = 0;
+	int opt_char, option_index = 0, max_val = 0, tmp_int;
 	struct utsname name;
 	static struct option long_options[] = {
 		{"attach",        no_argument,       0, 'a'},
@@ -730,7 +743,7 @@ static void set_options(const int argc, char **argv)
 		{"join",          no_argument,       0, 'j'},
 		{"job-name",      required_argument, 0, 'J'},
 		{"no-kill",       no_argument,       0, 'k'},
-		{"kill-on-bad-exit", no_argument,    0, 'K'},
+		{"kill-on-bad-exit", optional_argument, 0, 'K'},
 		{"label",         no_argument,       0, 'l'},
 		{"licenses",      required_argument, 0, 'L'},
 		{"distribution",  required_argument, 0, 'm'},
@@ -755,6 +768,7 @@ static void set_options(const int argc, char **argv)
 		{"disable-status", no_argument,      0, 'X'},
 		{"no-allocate",   no_argument,       0, 'Z'},
 		{"acctg-freq",       required_argument, 0, LONG_OPT_ACCTG_FREQ},
+		{"alps",             required_argument, 0, LONG_OPT_ALPS},
 		{"begin",            required_argument, 0, LONG_OPT_BEGIN},
 		{"blrts-image",      required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 		{"checkpoint",       required_argument, 0, LONG_OPT_CHECKPOINT},
@@ -809,6 +823,7 @@ static void set_options(const int argc, char **argv)
 		{"signal",	     required_argument, 0, LONG_OPT_SIGNAL},
 		{"slurmd-debug",     required_argument, 0, LONG_OPT_DEBUG_SLURMD},
 		{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
+		{"switches",         optional_argument, 0, LONG_OPT_REQ_SWITCH},
 		{"task-epilog",      required_argument, 0, LONG_OPT_TASK_EPILOG},
 		{"task-prolog",      required_argument, 0, LONG_OPT_TASK_PROLOG},
 		{"tasks-per-node",   required_argument, 0, LONG_OPT_NTASKSPERNODE},
@@ -821,8 +836,9 @@ static void set_options(const int argc, char **argv)
 		{"wckey",            required_argument, 0, LONG_OPT_WCKEY},
 		{NULL,               0,                 0, 0}
 	};
-	char *opt_string = "+aA:bB:c:C:d:D:e:Eg:hHi:IjJ:kKlL:m:n:N:"
+	char *opt_string = "+aA:bB:c:C:d:D:e:Eg:hHi:I::jJ:kK::lL:m:n:N:"
 		"o:Op:P:qQr:Rst:T:uU:vVw:W:x:XZ";
+	char *pos_delimit;
 #ifdef HAVE_PTY_H
 	char *tmp_str;
 #endif
@@ -833,7 +849,7 @@ static void set_options(const int argc, char **argv)
 		exit(error_exit);
 	}
 
-	if(opt.progname == NULL)
+	if (opt.progname == NULL)
 		opt.progname = xbasename(argv[0]);
 	else
 		error("opt.progname is already set.");
@@ -875,9 +891,14 @@ static void set_options(const int argc, char **argv)
 			}
 			break;
 		case (int)'c':
+			tmp_int = _get_int(optarg, "cpus-per-task", false);
+			if (opt.cpus_set && (tmp_int > opt.cpus_per_task)) {
+				info("Job step's --cpus-per-task value exceeds"
+				     " that of job (%d > %d). Job step may "
+				     "never run.", tmp_int, opt.cpus_per_task);
+			}
 			opt.cpus_set = true;
-			opt.cpus_per_task =
-				_get_int(optarg, "cpus-per-task", false);
+			opt.cpus_per_task = tmp_int;
 			break;
 		case (int)'C':
 			xfree(opt.constraints);
@@ -944,7 +965,10 @@ static void set_options(const int argc, char **argv)
 			opt.no_kill = true;
 			break;
 		case (int)'K':
-			opt.kill_bad_exit = true;
+			if (optarg)
+				opt.kill_bad_exit = strtol(optarg, NULL, 10);
+			else
+				opt.kill_bad_exit = 1;
 			break;
 		case (int)'l':
 			opt.labelio = true;
@@ -1212,7 +1236,7 @@ static void set_options(const int argc, char **argv)
 			_usage();
 			exit(0);
 		case LONG_OPT_CONNTYPE:
-			opt.conn_type = verify_conn_type(optarg);
+			verify_conn_type(optarg, opt.conn_type);
 			break;
 		case LONG_OPT_TEST_ONLY:
 			opt.test_only = true;
@@ -1445,6 +1469,19 @@ static void set_options(const int argc, char **argv)
 			xfree(opt.gres);
 			opt.gres = xstrdup(optarg);
 			break;
+		case LONG_OPT_ALPS:
+			verbose("Not running ALPS. --alps option ignored.");
+			break;
+		case LONG_OPT_REQ_SWITCH:
+			pos_delimit = strstr(optarg,"@");
+			if (pos_delimit != NULL) {
+				pos_delimit[0] = '\0';
+				pos_delimit++;
+				opt.wait4switch = time_str2mins(pos_delimit) * 60;
+			}
+			opt.req_switch = _get_int(optarg, "switches",
+				true);
+			break;
 		default:
 			if (spank_process_option (opt_char, optarg) < 0) {
 				exit(error_exit);
@@ -1501,7 +1538,7 @@ static void _load_multi(int *argc, char **argv)
  */
 static void _opt_args(int argc, char **argv)
 {
-	int i;
+	int i, command_pos = 0;
 	char **rest = NULL;
 
 	set_options(argc, argv);
@@ -1565,10 +1602,136 @@ static void _opt_args(int argc, char **argv)
 		while (rest[opt.argc] != NULL)
 			opt.argc++;
 	}
+#if defined HAVE_BGQ
+	/* A bit of setup for IBM's runjob.  runjob only has so many
+	   options, so it isn't that bad.
+	*/
+	int32_t node_cnt;
+	if (opt.max_nodes)
+		node_cnt = opt.max_nodes;
+	else
+		node_cnt = opt.min_nodes;
+
+	if (!opt.ntasks_set) {
+		if (opt.ntasks_per_node != NO_VAL)
+			opt.ntasks = node_cnt * opt.ntasks_per_node;
+		else
+			opt.ntasks = node_cnt;
+		opt.ntasks_set = true;
+	} else {
+		if (opt.nodes_set) {
+			if (node_cnt > opt.ntasks) {
+				info("You asked for %d nodes, but only "
+				     "%d tasks, resetting node count to %u",
+				     node_cnt, opt.ntasks, opt.ntasks);
+				opt.max_nodes = opt.min_nodes = node_cnt
+					= opt.ntasks;
+			}
+		} else if (node_cnt > opt.ntasks)
+			opt.max_nodes = opt.min_nodes = node_cnt = opt.ntasks;
+
+		if (!opt.ntasks_per_node || (opt.ntasks_per_node == NO_VAL))
+			opt.ntasks_per_node = opt.ntasks / node_cnt;
+		else if ((opt.ntasks / opt.ntasks_per_node) != node_cnt)
+			fatal("You are requesting for %d tasks, but are "
+			      "also asking for %d tasks per node and %d nodes.",
+			      opt.ntasks, opt.ntasks_per_node, node_cnt);
+	}
+
+#if defined HAVE_BG_FILES
+	if (!opt.test_only) {
+	 	/* Since we need the opt.argc to allocate the opt.argv array
+		 * we need to do this before actually messing with
+		 * things. All the extra options added to argv will be
+		 * handled after the allocation. */
+
+		/* Default location of the actual command to be ran. We always
+		 * have to add 3 options no matter what. */
+		command_pos = 3;
+
+		if (opt.ntasks_per_node != NO_VAL)
+			command_pos += 2;
+		if (opt.ntasks_set)
+			command_pos += 2;
+		if (opt.cwd_set)
+			command_pos += 2;
+		if (opt.labelio)
+			command_pos += 2;
+		opt.argc += command_pos;
+	}
+#endif
+
+#endif
+
 	opt.argv = (char **) xmalloc((opt.argc + 1) * sizeof(char *));
-	for (i = 0; i < opt.argc; i++)
-		opt.argv[i] = xstrdup(rest[i]);
-	opt.argv[i] = NULL;	/* End of argv's (for possible execv) */
+
+#if defined HAVE_BGQ
+#if defined HAVE_BG_FILES
+	if (!opt.test_only) {
+		i = 0;
+		/* Instead of running the actual job, the slurmstepd will be
+		   running runjob to run the job.  srun is just wrapping it
+		   making things all kosher.
+		*/
+		opt.argv[i++] = xstrdup(runjob_loc);
+		if (opt.ntasks_per_node != NO_VAL) {
+			opt.argv[i++]  = xstrdup("-p");
+			opt.argv[i++]  = xstrdup_printf("%d",
+							opt.ntasks_per_node);
+		}
+
+		if (opt.ntasks_set) {
+			opt.argv[i++]  = xstrdup("--np");
+			opt.argv[i++]  = xstrdup_printf("%d", opt.ntasks);
+		}
+
+		if (opt.cwd_set) {
+			opt.argv[i++]  = xstrdup("--cwd");
+			opt.argv[i++]  = xstrdup(opt.cwd);
+		}
+
+		if (opt.labelio) {
+			opt.argv[i++]  = xstrdup("--label");
+			opt.argv[i++]  = xstrdup("short");
+			/* Since we are getting labels from runjob. and we
+			 * don't want 2 sets (slurm's will always be 000)
+			 * remove it case. */
+			opt.labelio = 0;
+		}
+
+		/* Export all the environment so the runjob_mux will get the
+		 * correct info about the job, namely the block. */
+		opt.argv[i++] = xstrdup("--env_all");
+
+		/* With runjob anything after a ':' is treated as the actual
+		 * job, which in this case is exactly what it is.  So, very
+		 * sweet. */
+		opt.argv[i++] = xstrdup(":");
+
+		/* Sanity check to make sure we set it up correctly. */
+		if (i != command_pos) {
+			fatal ("command_pos is set to %d but we are going to "
+			       "put it at %d, please update src/srun/opt.c",
+			       command_pos, i);
+		}
+
+		/* Set default job name to the executable name rather than
+		 * "runjob" */
+		if (!opt.job_name_set_cmd && (command_pos < opt.argc)) {
+			opt.job_name_set_cmd = true;
+			opt.job_name = xstrdup(rest[0]);
+		}
+	}
+#endif
+	if (opt.test_only && !opt.jobid_set && (opt.jobid != NO_VAL)) {
+		/* Do not perform allocate test, only disable use of "runjob" */
+		opt.test_only = false;
+	}
+
+#endif
+	for (i = command_pos; i < opt.argc; i++)
+		opt.argv[i] = xstrdup(rest[i-command_pos]);
+	opt.argv[opt.argc] = NULL;	/* End of argv's (for possible execv) */
 
 	if (opt.multi_prog) {
 		if (opt.argc < 1) {
@@ -1576,17 +1739,21 @@ static void _opt_args(int argc, char **argv)
 			exit(error_exit);
 		}
 		_load_multi(&opt.argc, opt.argv);
-	}
-	else if (opt.argc > 0) {
+	} else if (opt.argc > command_pos) {
 		char *fullpath;
 
-		if ((fullpath = search_path(opt.cwd, opt.argv[0], false, X_OK))) {
-			xfree(opt.argv[0]);
-			opt.argv[0] = fullpath;
+		if ((fullpath = search_path(opt.cwd,
+					    opt.argv[command_pos],
+					    false, X_OK))) {
+			xfree(opt.argv[command_pos]);
+			opt.argv[command_pos] = fullpath;
 		}
 	}
+	/* for (i=0; i<opt.argc; i++) */
+	/* 	info("%d is '%s'", i, opt.argv[i]); */
 
-	if (opt.multi_prog && verify_multi_name(opt.argv[0], opt.ntasks))
+	if (opt.multi_prog && verify_multi_name(opt.argv[command_pos],
+						opt.ntasks))
 		exit(error_exit);
 }
 
@@ -1609,7 +1776,8 @@ static bool _opt_verify(void)
 	 */
 	if (opt.slurmd_debug + LOG_LEVEL_ERROR > LOG_LEVEL_DEBUG2) {
 		opt.slurmd_debug = LOG_LEVEL_DEBUG2 - LOG_LEVEL_ERROR;
-		info("Using srun's max debug increment of %d", opt.slurmd_debug);
+		info("Using srun's max debug increment of %d",
+		     opt.slurmd_debug);
 	}
 
 	if (opt.quiet && _verbose) {
@@ -1638,13 +1806,13 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.pn_min_cpus < opt.cpus_per_task)
+	if (opt.cpus_set && (opt.pn_min_cpus < opt.cpus_per_task))
 		opt.pn_min_cpus = opt.cpus_per_task;
 
 	if (opt.argc > 0)
 		opt.cmd_name = base_name(opt.argv[0]);
 
-	if(!opt.nodelist) {
+	if (!opt.nodelist) {
 		if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) {
 			/* make sure the file being read in has a / in
 			   it to make sure it is a file in the
@@ -1729,7 +1897,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-	if (opt.cpus_per_task < 0) {
+	if (opt.cpus_set && (opt.cpus_per_task <= 0)) {
 		error("invalid number of cpus per task (-c %d)",
 		      opt.cpus_per_task);
 		verified = false;
@@ -1742,7 +1910,7 @@ static bool _opt_verify(void)
 		verified = false;
 	}
 
-#ifdef HAVE_BGL
+#if defined(HAVE_BGL)
 	if (opt.blrtsimage && strchr(opt.blrtsimage, ' ')) {
 		error("invalid BlrtsImage given '%s'", opt.blrtsimage);
 		verified = false;
@@ -1953,7 +2121,7 @@ static bool _opt_verify(void)
 	return verified;
 }
 
-/* Initialize the the spank_job_env based upon environment variables set
+/* Initialize the spank_job_env based upon environment variables set
  *	via salloc or sbatch commands */
 extern void init_spank_env(void)
 {
@@ -2109,8 +2277,9 @@ static char *print_constraints()
 
 #define tf_(b) (b == true) ? "true" : "false"
 
-static void _opt_list()
+static void _opt_list(void)
 {
+	int i;
 	char *str;
 
 	info("defined options for program `%s'", opt.progname);
@@ -2122,8 +2291,8 @@ static void _opt_list()
 	info("cwd            : %s", opt.cwd);
 	info("ntasks         : %d %s", opt.ntasks,
 	     opt.ntasks_set ? "(set)" : "(default)");
-	info("cpus_per_task  : %d %s", opt.cpus_per_task,
-	     opt.cpus_set ? "(set)" : "(default)");
+	if (opt.cpus_set)
+		info("cpus_per_task  : %d", opt.cpus_per_task);
 	if (opt.max_nodes)
 		info("nodes          : %d-%d", opt.min_nodes, opt.max_nodes);
 	else {
@@ -2137,6 +2306,8 @@ static void _opt_list()
 	info("job name       : `%s'", opt.job_name);
 	info("reservation    : `%s'", opt.reservation);
 	info("wckey          : `%s'", opt.wckey);
+	info("switch         : %d", opt.req_switch);
+	info("wait-for-switch: %d", opt.wait4switch);
 	info("distribution   : %s", format_task_dist_states(opt.distribution));
 	if(opt.distribution == SLURM_DIST_PLANE)
 		info("plane size   : %u", opt.plane_size);
@@ -2181,8 +2352,11 @@ static void _opt_list()
 	str = print_constraints();
 	info("constraints    : %s", str);
 	xfree(str);
-	if (opt.conn_type != (uint16_t) NO_VAL)
-		info("conn_type      : %u", opt.conn_type);
+	for (i = 0; i < HIGHEST_DIMENSIONS; i++) {
+		if (opt.conn_type[i] == (uint16_t) NO_VAL)
+			break;
+		info("conn_type[%d]    : %u", i, opt.conn_type[i]);
+	}
 	str = print_geometry(opt.geometry);
 	info("geometry       : %s", str);
 	xfree(str);
@@ -2277,6 +2451,7 @@ static void _usage(void)
 "            [--prolog=fname] [--epilog=fname]\n"
 "            [--task-prolog=fname] [--task-epilog=fname]\n"
 "            [--ctrl-comm-ifhn=addr] [--multi-prog]\n"
+"            [--switch=max-switches{@max-time-to-wait}]\n"
 "            [-w hosts...] [-x hosts...] executable [args...]\n");
 }
 
@@ -2322,7 +2497,7 @@ static void _help(void)
 "      --multi-prog            if set the program name specified is the\n"
 "                              configuration specification for multiple programs\n"
 "  -n, --ntasks=ntasks         number of tasks to run\n"
-"      --nice[=value]          decrease secheduling priority by value\n"
+"      --nice[=value]          decrease scheduling priority by value\n"
 "      --ntasks-per-node=n     number of tasks to invoke on each node\n"
 "  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
 "  -o, --output=out            location of stdout redirection\n"
@@ -2351,6 +2526,8 @@ static void _help(void)
 "  -W, --wait=sec              seconds to wait after first task exits\n"
 "                              before killing job\n"
 "  -X, --disable-status        Disable Ctrl-C status feature\n"
+"      --switch=max-switches{@max-time-to-wait}\n"
+"                              Optimum switches and max time to wait for optimum\n"
 "\n"
 "Constraint options:\n"
 "      --contiguous            demand a contiguous range of nodes\n"
diff --git a/src/srun/opt.h b/src/srun/opt.h
index b637d72bb..f0d0676af 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -162,7 +162,7 @@ typedef struct srun_options {
 	bool noshell;		/* --no-shell                   */
 	bool overcommit;	/* --overcommit,   -O		*/
 	bool no_kill;		/* --no-kill, -k		*/
-	bool kill_bad_exit;	/* --kill-on-bad-exit, -K	*/
+	int32_t kill_bad_exit;	/* --kill-on-bad-exit, -K	*/
 	uint16_t shared;	/* --share,   -s		*/
 	int  max_wait;		/* --wait,    -W		*/
 	bool quit_on_intr;      /* --quit-on-interrupt, -q      */
@@ -197,10 +197,10 @@ typedef struct srun_options {
 	char *network;		/* --network=			*/
 
 	/* BLUEGENE SPECIFIC */
-	uint16_t geometry[SYSTEM_DIMENSIONS]; /* --geometry, -g	*/
+	uint16_t geometry[HIGHEST_DIMENSIONS]; /* --geometry, -g */
 	bool reboot;		/* --reboot			*/
 	bool no_rotate;		/* --no_rotate, -R		*/
-	uint16_t conn_type;	/* --conn-type 			*/
+	uint16_t conn_type[HIGHEST_DIMENSIONS];	/* --conn-type 	*/
 	char *blrtsimage;       /* --blrtsimage BlrtsImage for block */
 	char *linuximage;       /* --linuximage LinuxImage for block */
 	char *mloaderimage;     /* --mloaderimage mloaderImage for block */
@@ -223,6 +223,8 @@ typedef struct srun_options {
 	char **spank_job_env;	/* SPANK controlled environment for job
 				 * Prolog and Epilog		*/
 	int spank_job_env_size;	/* size of spank_job_env	*/
+	int req_switch;		/* Minimum number of switches	*/
+	int wait4switch;	/* Maximum time to wait for minimum switches */
 } opt_t;
 
 extern opt_t opt;
@@ -258,7 +260,7 @@ extern int   spank_set_job_env(const char *name, const char *value,
 			       int overwrite);
 extern int   spank_unset_job_env(const char *name);
 
-/* Initialize the the spank_job_env based upon environment variables set
+/* Initialize the spank_job_env based upon environment variables set
  *	via salloc or sbatch commands */
 extern void init_spank_env(void);
 
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 75bfa251c..ce567e8f5 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -189,7 +189,6 @@ int srun(int ac, char **av)
 	resource_allocation_response_msg_t *resp;
 	int debug_level;
 	env_t *env = xmalloc(sizeof(env_t));
-	uint32_t job_id = 0;
 	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
 	slurm_step_launch_params_t launch_params;
 	slurm_step_launch_callbacks_t callbacks;
@@ -214,7 +213,12 @@ int srun(int ac, char **av)
 	 * which are not designed to handle them */
 	if (xsignal_block(sig_array) < 0)
 		error("Unable to block signals");
-
+#ifndef HAVE_CRAY_EMULATION
+	if (is_cray_system() || is_cray_select_type()) {
+		error("operation not supported on Cray systems - use aprun(1)");
+		exit(error_exit);
+	}
+#endif
 	/* Initialize plugin stack, read options from plugins, etc.
 	 */
 	init_spank_env();
@@ -283,8 +287,8 @@ int srun(int ac, char **av)
 			exit(error_exit);
 		}
 	} else if ((resp = existing_allocation())) {
-
-		job_id = resp->job_id;
+		select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET,
+					&resp->node_cnt);
 		if (opt.nodes_set_env && !opt.nodes_set_opt &&
 		    (opt.min_nodes > resp->node_cnt)) {
 			/* This signifies the job used the --no-kill option
@@ -319,7 +323,7 @@ int srun(int ac, char **av)
 			exit(error_exit);
 	} else {
 		/* Combined job allocation and job step launch */
-#ifdef HAVE_FRONT_END
+#if defined HAVE_FRONT_END && (!defined HAVE_BGQ || !defined HAVE_BG_FILES)
 		uid_t my_uid = getuid();
 		if ((my_uid != 0) &&
 		    (my_uid != slurm_get_slurm_user_id())) {
@@ -332,6 +336,11 @@ int srun(int ac, char **av)
 			      "request");
 		}
 
+		if (!opt.job_name_set_env && opt.job_name_set_cmd)
+			setenvfs("SLURM_JOB_NAME=%s", opt.job_name);
+		else if (!opt.job_name_set_env && opt.argc)
+			setenvfs("SLURM_JOB_NAME=%s", opt.argv[0]);
+
 		if ( !(resp = allocate_nodes()) )
 			exit(error_exit);
 		got_alloc = 1;
@@ -373,7 +382,8 @@ int srun(int ac, char **av)
 	/*
 	 *  Enhance environment for job
 	 */
-	env->cpus_per_task = opt.cpus_per_task;
+	if (opt.cpus_set)
+		env->cpus_per_task = opt.cpus_per_task;
 	if (opt.ntasks_per_node != NO_VAL)
 		env->ntasks_per_node = opt.ntasks_per_node;
 	if (opt.ntasks_per_socket != NO_VAL)
@@ -431,7 +441,12 @@ int srun(int ac, char **av)
 	xfree(env);
 
  re_launch:
+#if defined HAVE_BGQ
+//#if defined HAVE_BGQ && defined HAVE_BG_FILES
+	task_state = task_state_create(1);
+#else
 	task_state = task_state_create(opt.ntasks);
+#endif
 	slurm_step_launch_params_t_init(&launch_params);
 	launch_params.gid = opt.gid;
 	launch_params.argc = opt.argc;
@@ -454,7 +469,10 @@ int srun(int ac, char **av)
 	if (opt.acctg_freq >= 0)
 		launch_params.acctg_freq = opt.acctg_freq;
 	launch_params.pty = opt.pty;
-	launch_params.cpus_per_task	= opt.cpus_per_task;
+	if (opt.cpus_set)
+		launch_params.cpus_per_task	= opt.cpus_per_task;
+	else
+		launch_params.cpus_per_task	= 1;
 	launch_params.task_dist         = opt.distribution;
 	launch_params.ckpt_dir		= opt.ckpt_dir;
 	launch_params.restart_dir       = opt.restart_dir;
@@ -1220,7 +1238,9 @@ _update_task_exit_state(uint32_t ntasks, uint32_t taskids[], int abnormal)
 
 static int _kill_on_bad_exit(void)
 {
-	return (opt.kill_bad_exit || slurm_get_kill_on_bad_exit());
+	if (opt.kill_bad_exit == NO_VAL)
+		return slurm_get_kill_on_bad_exit();
+	return opt.kill_bad_exit;
 }
 
 static void _setup_max_wait_timer(void)
@@ -1345,8 +1365,9 @@ static void _handle_intr(void)
 {
 	static time_t last_intr      = 0;
 	static time_t last_intr_sent = 0;
+	time_t now = time(NULL);
 
-	if (!opt.quit_on_intr && ((time(NULL) - last_intr) > 1)) {
+	if (!opt.quit_on_intr && ((now - last_intr) > 1)) {
 		if  (opt.disable_status) {
 			info("sending Ctrl-C to job %u.%u",
 			     job->jobid, job->stepid);
@@ -1363,7 +1384,7 @@ static void _handle_intr(void)
 		update_job_state(job, SRUN_JOB_CANCELLED);
 		/* terminate job */
 		if (job->state < SRUN_JOB_FORCETERM) {
-			if ((time(NULL) - last_intr_sent) < 1) {
+			if ((now - last_intr_sent) < 1) {
 				job_force_termination(job);
 				slurm_step_launch_abort(job->step_ctx);
 				return;
@@ -1371,7 +1392,7 @@ static void _handle_intr(void)
 
 			info("sending Ctrl-C to job %u.%u",
 			     job->jobid, job->stepid);
-			last_intr_sent = time(NULL);
+			last_intr_sent = now;
 			slurm_step_launch_fwd_signal(job->step_ctx, SIGINT);
 			slurm_step_launch_abort(job->step_ctx);
 		} else {
diff --git a/src/srun/srun.h b/src/srun/srun.h
index 6f130d34f..135b631b6 100644
--- a/src/srun/srun.h
+++ b/src/srun/srun.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/srun_job.c b/src/srun/srun_job.c
index 80650b5f0..30d0051f7 100644
--- a/src/srun/srun_job.c
+++ b/src/srun/srun_job.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -421,21 +421,25 @@ static int
 _compute_task_count(allocation_info_t *ainfo)
 {
 	int i, cnt = 0;
-
+#if defined HAVE_BGQ
+//#if defined HAVE_BGQ && HAVE_BG_FILES
+	/* always return the ntasks here for Q */
+	info("returning %d", opt.ntasks);
+	return opt.ntasks;
+#endif
 	if (opt.cpus_set) {
 		for (i = 0; i < ainfo->num_cpu_groups; i++)
 			cnt += ( ainfo->cpu_count_reps[i] *
 				 (ainfo->cpus_per_node[i]/opt.cpus_per_task));
 	}
-
 	return (cnt < ainfo->nnodes) ? ainfo->nnodes : cnt;
 }
 
 static void
-_set_ntasks(allocation_info_t *info)
+_set_ntasks(allocation_info_t *ai)
 {
 	if (!opt.ntasks_set) {
-		opt.ntasks = _compute_task_count(info);
+		opt.ntasks = _compute_task_count(ai);
 		if (opt.cpus_set)
 			opt.ntasks_set = true;	/* implicit */
 	}
@@ -460,14 +464,19 @@ _job_create_structure(allocation_info_t *ainfo)
  	job->nodelist = xstrdup(ainfo->nodelist);
 	job->stepid  = ainfo->stepid;
 
-#ifdef HAVE_FRONT_END	/* Limited job step support */
+#if defined HAVE_BGQ
+//#if defined HAVE_BGQ && defined HAVE_BG_FILES
+	job->nhosts   = ainfo->nnodes;
+	select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, &job->nhosts);
+#elif defined HAVE_FRONT_END	/* Limited job step support */
 	opt.overcommit = true;
 	job->nhosts = 1;
 #else
 	job->nhosts   = ainfo->nnodes;
 #endif
 
-#ifndef HAVE_FRONT_END
+#if !defined HAVE_FRONT_END || (defined HAVE_BGQ)
+//#if !defined HAVE_FRONT_END || (defined HAVE_BGQ && defined HAVE_BG_FILES)
 	if(opt.min_nodes > job->nhosts) {
 		error("Only allocated %d nodes asked for %d",
 		      job->nhosts, opt.min_nodes);
diff --git a/src/srun/srun_job.h b/src/srun/srun_job.h
index ae24c2142..675da90d6 100644
--- a/src/srun/srun_job.h
+++ b/src/srun/srun_job.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -45,7 +45,7 @@
 
 #include <netinet/in.h>
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/eio.h"
 #include "src/common/cbuf.h"
diff --git a/src/srun/srun_pty.c b/src/srun/srun_pty.c
index ec638c20f..1034f9c92 100644
--- a/src/srun/srun_pty.c
+++ b/src/srun/srun_pty.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,7 +54,7 @@
 #include <sys/ioctl.h>
 #include <sys/poll.h>
 
-#include <slurm/slurm_errno.h>
+#include "slurm/slurm_errno.h"
 
 #include "src/common/log.h"
 #include "src/common/macros.h"
diff --git a/src/srun/srun_pty.h b/src/srun/srun_pty.h
index 428ae4eac..41382afd5 100644
--- a/src/srun/srun_pty.h
+++ b/src/srun/srun_pty.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/task_state.c b/src/srun/task_state.c
index 255a37087..bede86ad5 100644
--- a/src/srun/task_state.c
+++ b/src/srun/task_state.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun/task_state.h b/src/srun/task_state.h
index 7309fc9dd..ac7120601 100644
--- a/src/srun/task_state.h
+++ b/src/srun/task_state.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/srun_cr/Makefile.in b/src/srun_cr/Makefile.in
index 45e68abe8..4b88b2a27 100644
--- a/src/srun_cr/Makefile.in
+++ b/src/srun_cr/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -120,7 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -157,6 +162,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -214,6 +220,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -249,6 +256,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/srun_cr/srun_cr.c b/src/srun_cr/srun_cr.c
index c17528ab0..707350a2d 100644
--- a/src/srun_cr/srun_cr.c
+++ b/src/srun_cr/srun_cr.c
@@ -6,7 +6,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -54,9 +54,9 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <sys/wait.h>
-
 #include <libcr.h>
-#include <slurm/slurm.h>
+
+#include "slurm/slurm.h"
 
 #include "src/common/fd.h"
 #include "src/common/log.h"
diff --git a/src/sshare/Makefile.in b/src/sshare/Makefile.in
index f35bce20e..0edbdc8ca 100644
--- a/src/sshare/Makefile.in
+++ b/src/sshare/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -120,7 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -157,6 +162,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -214,6 +220,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -249,6 +256,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sshare/process.c b/src/sshare/process.c
index 62520cdd4..1172ec5a8 100644
--- a/src/sshare/process.c
+++ b/src/sshare/process.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sshare/sshare.c b/src/sshare/sshare.c
index d599b9bd6..a917b3ca6 100644
--- a/src/sshare/sshare.c
+++ b/src/sshare/sshare.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -125,7 +125,10 @@ main (int argc, char *argv[])
 				list_destroy(clusters);
 			if(!(clusters =
 			     slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
diff --git a/src/sshare/sshare.h b/src/sshare/sshare.h
index 08ef1df2d..d5fe65650 100644
--- a/src/sshare/sshare.h
+++ b/src/sshare/sshare.h
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -76,7 +76,7 @@
 #  endif
 #endif  /* HAVE_INTTYPES_H */
 
-#include <slurm/slurm.h>
+#include "slurm/slurm.h"
 
 #include "src/common/parse_time.h"
 #include "src/common/slurm_accounting_storage.h"
diff --git a/src/sstat/Makefile.in b/src/sstat/Makefile.in
index d28b626b2..d5e025609 100644
--- a/src/sstat/Makefile.in
+++ b/src/sstat/Makefile.in
@@ -66,6 +66,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,6 +77,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/sstat/options.c b/src/sstat/options.c
index fb9cf1899..9e3a5f26a 100644
--- a/src/sstat/options.c
+++ b/src/sstat/options.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -273,6 +273,8 @@ int decode_state_char(char *state)
 		return JOB_TIMEOUT;
 	else if (!strcasecmp(state, "nf"))
 		return JOB_NODE_FAIL;
+	else if (!strcasecmp(state, "pr"))
+		return JOB_PREEMPTED;
 	else
 		return -1; // unknown
 }
diff --git a/src/sstat/print.c b/src/sstat/print.c
index 55e0cf5d4..7374a131d 100644
--- a/src/sstat/print.c
+++ b/src/sstat/print.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sstat/process.c b/src/sstat/process.c
index 5513a7db4..960ae8b48 100644
--- a/src/sstat/process.c
+++ b/src/sstat/process.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index 469de5590..16b08d6d4 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sstat/sstat.h b/src/sstat/sstat.h
index aaf2b526c..c664473eb 100644
--- a/src/sstat/sstat.h
+++ b/src/sstat/sstat.h
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/strigger/Makefile.in b/src/strigger/Makefile.in
index 6e4853bb8..c48fa5219 100644
--- a/src/strigger/Makefile.in
+++ b/src/strigger/Makefile.in
@@ -67,6 +67,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +78,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -122,7 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -159,6 +164,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -216,6 +222,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -251,6 +258,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/src/strigger/opts.c b/src/strigger/opts.c
index 4a4e94637..fb105e65c 100644
--- a/src/strigger/opts.c
+++ b/src/strigger/opts.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -72,6 +72,7 @@
 #define OPT_LONG_CLEAR     0x104
 #define OPT_LONG_USER      0x105
 #define OPT_LONG_BLOCK_ERR 0x106
+#define OPT_LONG_FRONT_END 0x107
 
 /* getopt_long options, integers but not characters */
 
@@ -126,6 +127,7 @@ extern void parse_command_line(int argc, char *argv[])
 		{"version",                             no_argument, 0, 'V'},
 		{"block_err", no_argument,       0, OPT_LONG_BLOCK_ERR},
 		{"clear",     no_argument,       0, OPT_LONG_CLEAR},
+		{"front_end", no_argument,       0, OPT_LONG_FRONT_END},
 		{"get",       no_argument,       0, OPT_LONG_GET},
 		{"help",      no_argument,       0, OPT_LONG_HELP},
 		{"set",       no_argument,       0, OPT_LONG_SET},
@@ -213,7 +215,10 @@ extern void parse_command_line(int argc, char *argv[])
 				list_destroy(params.clusters);
 			if (!(params.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
-				error("'%s' invalid entry for --cluster",
+				error("'%s' can't be reached now, "
+				      "or it is an invalid entry for "
+				      "--cluster.  Use 'sacctmgr --list "
+				      "cluster' to see available clusters.",
 				      optarg);
 				exit(1);
 			}
@@ -263,6 +268,9 @@ extern void parse_command_line(int argc, char *argv[])
 		case (int) OPT_LONG_CLEAR:
 			params.mode_clear = true;
 			break;
+		case (int) OPT_LONG_FRONT_END:
+			params.front_end = true;
+			break;
 		case (int) OPT_LONG_GET:
 			params.mode_get = true;
 			break;
@@ -299,6 +307,7 @@ static void _init_options( void )
 	params.bu_ctld_fail = false;
 	params.bu_ctld_res_op = false;
 	params.bu_ctld_as_ctrl = false;
+	params.front_end    = false;
 	params.node_down    = false;
 	params.node_drained = false;
 	params.node_fail    = false;
@@ -329,6 +338,7 @@ static void _print_options( void )
 	verbose("get          = %s", params.mode_get ? "true" : "false");
 	verbose("clear        = %s", params.mode_clear ? "true" : "false");
 	verbose("block_err    = %s", params.block_err ? "true" : "false");
+	verbose("front_end    = %s", params.front_end ? "true" : "false");
 	verbose("job_id       = %u", params.job_id);
 	verbose("job_fini     = %s", params.job_fini ? "true" : "false");
 	verbose("node_down    = %s", params.node_down ? "true" : "false");
@@ -458,6 +468,7 @@ Usage: strigger [--set | --get | --clear] [OPTIONS]\n\
       --get           get trigger information\n\
       --clear         delete a trigger\n\n\
       --block_err     trigger event on BlueGene block error\n\
+      --front_end     trigger event on FrontEnd node state changes\n\
   -a, --primary_slurmctld_failure\n\
                       trigger event when primary slurmctld fails\n\
   -A, --primary_slurmctld_resumed_operation\n\
diff --git a/src/strigger/strigger.c b/src/strigger/strigger.c
index 4b0ca6add..4c0b8766c 100644
--- a/src/strigger/strigger.c
+++ b/src/strigger/strigger.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -48,13 +48,13 @@
 #include <stdlib.h>
 #include <time.h>
 #include <unistd.h>
-#include <slurm/slurm_errno.h>
-#include <slurm/slurm.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #ifdef HAVE_STRINGS_H
 #  include <strings.h>
 #endif
+#include "slurm/slurm_errno.h"
+#include "slurm/slurm.h"
 
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -136,6 +136,8 @@ static int _set_trigger(void)
 			ti.trig_type |= TRIGGER_TYPE_FINI;
 		if (params.time_limit)
 			ti.trig_type |= TRIGGER_TYPE_TIME;
+	} else if (params.front_end) {
+		ti.res_type = TRIGGER_RES_TYPE_FRONT_END;
 	} else {
 		ti.res_type = TRIGGER_RES_TYPE_NODE;
 		if (params.node_id)
@@ -250,8 +252,10 @@ static int _get_trigger(void)
 				continue;
 		}
 		if (params.node_down) {
-			if ((trig_msg->trigger_array[i].res_type
-					!= TRIGGER_RES_TYPE_NODE) ||
+			if (((trig_msg->trigger_array[i].res_type
+					!= TRIGGER_RES_TYPE_NODE) &&
+			     (trig_msg->trigger_array[i].res_type
+					!= TRIGGER_RES_TYPE_FRONT_END)) ||
 			    (trig_msg->trigger_array[i].trig_type
 					!= TRIGGER_TYPE_DOWN))
 				continue;
@@ -283,8 +287,10 @@ static int _get_trigger(void)
 				continue;
 		}
 		if (params.node_up) {
-			if ((trig_msg->trigger_array[i].res_type
-					!= TRIGGER_RES_TYPE_NODE) ||
+			if (((trig_msg->trigger_array[i].res_type
+					!= TRIGGER_RES_TYPE_NODE) &&
+			     (trig_msg->trigger_array[i].res_type
+					!= TRIGGER_RES_TYPE_FRONT_END)) ||
 			    (trig_msg->trigger_array[i].trig_type
 					!= TRIGGER_TYPE_UP))
 				continue;
diff --git a/src/strigger/strigger.h b/src/strigger/strigger.h
index 61d7aa230..0c31f1741 100644
--- a/src/strigger/strigger.h
+++ b/src/strigger/strigger.h
@@ -2,13 +2,13 @@
  *  strigger.h - definitions used for strigger functions
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -44,14 +44,15 @@
 #  include "config.h"
 #endif
 
-#include <slurm/slurm.h>
-#include <src/common/macros.h>
-#include <src/common/slurm_protocol_defs.h>
+#include "slurm/slurm.h"
+#include "src/common/macros.h"
+#include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurmdb_defs.h"
 
 struct strigger_parameters {
 	bool     block_err;
 	List     clusters;
+	bool     front_end;
 	bool     job_fini;
 	uint32_t job_id;
 	bool     mode_set;
diff --git a/src/sview/Makefile.am b/src/sview/Makefile.am
index 515a152f2..e000a472d 100644
--- a/src/sview/Makefile.am
+++ b/src/sview/Makefile.am
@@ -9,13 +9,11 @@ if HAVE_GTK
 
 bin_PROGRAMS = sview
 
-sview_LDADD =					  \
-	$(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la  \
-	$(top_builddir)/src/db_api/libslurmdb.o -ldl
+sview_LDADD = $(top_builddir)/src/db_api/libslurmdb.o -ldl
 
 noinst_HEADERS = sview.h
 sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
-	block_info.c node_info.c resv_info.c \
+	block_info.c front_end_info.c node_info.c resv_info.c \
 	submit_info.c admin_info.c common.c \
 	config_info.c defaults.c
 
@@ -29,7 +27,7 @@ sview_CFLAGS = $(GTK_CFLAGS)
 else
 
 EXTRA_sview_SOURCES = sview.h sview.c popups.c grid.c part_info.c job_info.c \
-	block_info.c node_info.c resv_info.c \
+	block_info.c front_end_info.c node_info.c resv_info.c \
 	submit_info.c admin_info.c common.c config_info.c defaults.c
 
 endif
diff --git a/src/sview/Makefile.in b/src/sview/Makefile.in
index 199f90a65..8e86f957f 100644
--- a/src/sview/Makefile.in
+++ b/src/sview/Makefile.in
@@ -68,6 +68,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -78,6 +79,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -89,13 +91,15 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 PROGRAMS = $(bin_PROGRAMS)
 am__sview_SOURCES_DIST = sview.c popups.c grid.c part_info.c \
-	job_info.c block_info.c node_info.c resv_info.c submit_info.c \
-	admin_info.c common.c config_info.c defaults.c
+	job_info.c block_info.c front_end_info.c node_info.c \
+	resv_info.c submit_info.c admin_info.c common.c config_info.c \
+	defaults.c
 @HAVE_GTK_TRUE@am_sview_OBJECTS = sview-sview.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-popups.$(OBJEXT) sview-grid.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-part_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-job_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-block_info.$(OBJEXT) \
+@HAVE_GTK_TRUE@	sview-front_end_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-node_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-resv_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-submit_info.$(OBJEXT) \
@@ -104,10 +108,11 @@ am__sview_SOURCES_DIST = sview.c popups.c grid.c part_info.c \
 @HAVE_GTK_TRUE@	sview-config_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-defaults.$(OBJEXT)
 am__EXTRA_sview_SOURCES_DIST = sview.h sview.c popups.c grid.c \
-	part_info.c job_info.c block_info.c node_info.c resv_info.c \
-	submit_info.c admin_info.c common.c config_info.c defaults.c
+	part_info.c job_info.c block_info.c front_end_info.c \
+	node_info.c resv_info.c submit_info.c admin_info.c common.c \
+	config_info.c defaults.c
 sview_OBJECTS = $(am_sview_OBJECTS)
-@HAVE_GTK_TRUE@sview_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
+@HAVE_GTK_TRUE@sview_DEPENDENCIES =  \
 @HAVE_GTK_TRUE@	$(top_builddir)/src/db_api/libslurmdb.o
 sview_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(sview_CFLAGS) $(CFLAGS) $(sview_LDFLAGS) \
@@ -143,7 +148,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -180,6 +188,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -237,6 +246,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -272,6 +282,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -326,20 +337,17 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
-@HAVE_GTK_TRUE@sview_LDADD = \
-@HAVE_GTK_TRUE@	$(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la  \
-@HAVE_GTK_TRUE@	$(top_builddir)/src/db_api/libslurmdb.o -ldl
-
+@HAVE_GTK_TRUE@sview_LDADD = $(top_builddir)/src/db_api/libslurmdb.o -ldl
 @HAVE_GTK_TRUE@noinst_HEADERS = sview.h
 @HAVE_GTK_TRUE@sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
-@HAVE_GTK_TRUE@	block_info.c node_info.c resv_info.c \
+@HAVE_GTK_TRUE@	block_info.c front_end_info.c node_info.c resv_info.c \
 @HAVE_GTK_TRUE@	submit_info.c admin_info.c common.c \
 @HAVE_GTK_TRUE@	config_info.c defaults.c
 
 @HAVE_GTK_TRUE@sview_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BG_LDFLAGS) $(GTK_LIBS)
 @HAVE_GTK_TRUE@sview_CFLAGS = $(GTK_CFLAGS)
 @HAVE_GTK_FALSE@EXTRA_sview_SOURCES = sview.h sview.c popups.c grid.c part_info.c job_info.c \
-@HAVE_GTK_FALSE@	block_info.c node_info.c resv_info.c \
+@HAVE_GTK_FALSE@	block_info.c front_end_info.c node_info.c resv_info.c \
 @HAVE_GTK_FALSE@	submit_info.c admin_info.c common.c config_info.c defaults.c
 
 all: all-am
@@ -434,6 +442,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-common.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-config_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-defaults.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-front_end_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-grid.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-job_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-node_info.Po@am__quote@
@@ -548,6 +557,20 @@ sview-block_info.obj: block_info.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-block_info.obj `if test -f 'block_info.c'; then $(CYGPATH_W) 'block_info.c'; else $(CYGPATH_W) '$(srcdir)/block_info.c'; fi`
 
+sview-front_end_info.o: front_end_info.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-front_end_info.o -MD -MP -MF $(DEPDIR)/sview-front_end_info.Tpo -c -o sview-front_end_info.o `test -f 'front_end_info.c' || echo '$(srcdir)/'`front_end_info.c
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/sview-front_end_info.Tpo $(DEPDIR)/sview-front_end_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='front_end_info.c' object='sview-front_end_info.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-front_end_info.o `test -f 'front_end_info.c' || echo '$(srcdir)/'`front_end_info.c
+
+sview-front_end_info.obj: front_end_info.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-front_end_info.obj -MD -MP -MF $(DEPDIR)/sview-front_end_info.Tpo -c -o sview-front_end_info.obj `if test -f 'front_end_info.c'; then $(CYGPATH_W) 'front_end_info.c'; else $(CYGPATH_W) '$(srcdir)/front_end_info.c'; fi`
+@am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/sview-front_end_info.Tpo $(DEPDIR)/sview-front_end_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='front_end_info.c' object='sview-front_end_info.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-front_end_info.obj `if test -f 'front_end_info.c'; then $(CYGPATH_W) 'front_end_info.c'; else $(CYGPATH_W) '$(srcdir)/front_end_info.c'; fi`
+
 sview-node_info.o: node_info.c
 @am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-node_info.o -MD -MP -MF $(DEPDIR)/sview-node_info.Tpo -c -o sview-node_info.o `test -f 'node_info.c' || echo '$(srcdir)/'`node_info.c
 @am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/sview-node_info.Tpo $(DEPDIR)/sview-node_info.Po
diff --git a/src/sview/admin_info.c b/src/sview/admin_info.c
index f59e6cceb..02b47de8a 100644
--- a/src/sview/admin_info.c
+++ b/src/sview/admin_info.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sview/block_info.c b/src/sview/block_info.c
index 8db14a741..d7e3d340d 100644
--- a/src/sview/block_info.c
+++ b/src/sview/block_info.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,13 +37,13 @@ typedef struct {
 	char *bg_user_name;
 	char *bg_block_name;
 	char *slurm_part_name;
-	char *nodes;
-	enum connection_type bg_conn_type;
-	enum node_use_type bg_node_use;
-	rm_partition_state_t state;
+	char *mp_str;
+	uint16_t bg_conn_type[HIGHEST_DIMENSIONS];
+	uint16_t bg_node_use;
+	uint16_t state;
 	int size;
-	int node_cnt;
-	int *bp_inx;            /* list index pairs into node_table for *nodes:
+	int cnode_cnt;
+	int *bp_inx;            /* list index pairs into node_table for *mp_str:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
 	int color_inx;
@@ -74,7 +74,7 @@ enum {
 	SORTID_IMAGERAMDISK,
 	SORTID_IMAGEMLOADER,
 #endif
-	SORTID_NODES,
+	SORTID_MP_STR,
 	SORTID_PARTITION,
 	SORTID_STATE,
 	SORTID_UPDATED,
@@ -104,7 +104,7 @@ static display_data_t display_data_block[] = {
 	 create_model_block, admin_edit_block},
 	{G_TYPE_STRING, SORTID_USER, "User", FALSE, EDIT_NONE, refresh_block,
 	 create_model_block, admin_edit_block},
-	{G_TYPE_STRING, SORTID_NODES, "Node Count",
+	{G_TYPE_STRING, SORTID_MP_STR, "Node Count",
 	 FALSE, EDIT_NONE, refresh_block, create_model_block, admin_edit_block},
 	{G_TYPE_STRING, SORTID_CONN, "Connection Type",
 	 FALSE, EDIT_NONE, refresh_block,
@@ -136,9 +136,9 @@ static display_data_t display_data_block[] = {
 	{G_TYPE_STRING, SORTID_IMAGEMLOADER, "Image Mloader",
 	 FALSE, EDIT_NONE, refresh_block, create_model_block, admin_edit_block},
 	{G_TYPE_POINTER, SORTID_NODE_INX, NULL, FALSE, EDIT_NONE,
-	 refresh_resv, create_model_resv, admin_edit_resv},
+	 refresh_block, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT, SORTID_COLOR_INX, NULL, FALSE, EDIT_NONE,
-	 refresh_resv, create_model_resv, admin_edit_resv},
+	 refresh_block, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT, SORTID_SMALL_BLOCK, NULL, FALSE, EDIT_NONE, refresh_block,
 	 create_model_block, admin_edit_block},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_block,
@@ -185,7 +185,7 @@ static void _block_list_del(void *object)
 		xfree(block_ptr->bg_user_name);
 		xfree(block_ptr->bg_block_name);
 		xfree(block_ptr->slurm_part_name);
-		xfree(block_ptr->nodes);
+		xfree(block_ptr->mp_str);
 		xfree(block_ptr->imageblrts);
 		xfree(block_ptr->imagelinux);
 		xfree(block_ptr->imagemloader);
@@ -227,6 +227,7 @@ static void _layout_block_record(GtkTreeView *treeview,
 				 int update)
 {
 	char tmp_cnt[18];
+	char *tmp_char = NULL;
 	GtkTreeIter iter;
 	GtkTreeStore *treestore =
 		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
@@ -234,13 +235,14 @@ static void _layout_block_record(GtkTreeView *treeview,
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_block,
 						 SORTID_NODELIST),
-				   block_ptr->nodes);
-
+				   block_ptr->mp_str);
+	tmp_char = conn_type_string_full(block_ptr->bg_conn_type);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_block,
 						 SORTID_CONN),
-				   conn_type_string(
-					   block_ptr->bg_conn_type));
+				   tmp_char);
+	xfree(tmp_char);
+
 	if (cluster_flags & CLUSTER_FLAG_BGL) {
 		add_display_treestore_line(update, treestore, &iter,
 					   find_col_name(display_data_block,
@@ -290,11 +292,11 @@ static void _layout_block_record(GtkTreeView *treeview,
 					   node_use_string(
 						   block_ptr->bg_node_use));
 	}
-	convert_num_unit((float)block_ptr->node_cnt, tmp_cnt, sizeof(tmp_cnt),
+	convert_num_unit((float)block_ptr->cnode_cnt, tmp_cnt, sizeof(tmp_cnt),
 			 UNIT_NONE);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_block,
-						 SORTID_NODES),
+						 SORTID_MP_STR),
 				   tmp_cnt);
 
 	add_display_treestore_line(update, treestore, &iter,
@@ -314,58 +316,50 @@ static void _layout_block_record(GtkTreeView *treeview,
 static void _update_block_record(sview_block_info_t *block_ptr,
 				 GtkTreeStore *treestore, GtkTreeIter *iter)
 {
-	char tmp_cnt[18];
+	char job_running[20], cnode_cnt[20];
+	char *tmp_char = NULL;
 
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
-			   sview_colors[block_ptr->color_inx], -1);
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR_INX,
-			   block_ptr->color_inx, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_BLOCK,
-			   block_ptr->bg_block_name, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_PARTITION,
-			   block_ptr->slurm_part_name, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_STATE,
-			   bg_block_state_string(block_ptr->state), -1);
-	gtk_tree_store_set(treestore, iter, SORTID_USER,
-			   block_ptr->bg_user_name, -1);
 	if (block_ptr->job_running > NO_JOB_RUNNING)
-		snprintf(tmp_cnt, sizeof(tmp_cnt),
+		snprintf(job_running, sizeof(job_running),
 			 "%d", block_ptr->job_running);
 	else
-		snprintf(tmp_cnt, sizeof(tmp_cnt), "-");
+		snprintf(job_running, sizeof(job_running), "-");
 
-	gtk_tree_store_set(treestore, iter, SORTID_JOB, tmp_cnt, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_CONN,
-			   conn_type_string(block_ptr->bg_conn_type), -1);
-	if (cluster_flags & CLUSTER_FLAG_BGL)
-		gtk_tree_store_set(treestore, iter, SORTID_USE,
-				   node_use_string(block_ptr->bg_node_use), -1);
-
-	convert_num_unit((float)block_ptr->node_cnt, tmp_cnt, sizeof(tmp_cnt),
+	convert_num_unit((float)block_ptr->cnode_cnt, cnode_cnt, sizeof(cnode_cnt),
 			 UNIT_NONE);
-	gtk_tree_store_set(treestore, iter, SORTID_NODES, tmp_cnt, -1);
 
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST,
-			   block_ptr->nodes, -1);
+	tmp_char = conn_type_string_full(block_ptr->bg_conn_type);
 
+	/* Combining these records provides a slight performance improvement */
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODE_INX, block_ptr->bp_inx, -1);
-
-	if (cluster_flags & CLUSTER_FLAG_BGL)
-		gtk_tree_store_set(treestore, iter, SORTID_IMAGEBLRTS,
-				   block_ptr->imageblrts, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_IMAGELINUX,
-			   block_ptr->imagelinux, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_IMAGEMLOADER,
-			   block_ptr->imagemloader, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_IMAGERAMDISK,
-			   block_ptr->imageramdisk, -1);
+			   SORTID_BLOCK,        block_ptr->bg_block_name,
+			   SORTID_COLOR,
+				sview_colors[block_ptr->color_inx],
+			   SORTID_COLOR_INX,    block_ptr->color_inx,
+			   SORTID_CONN,		tmp_char,
+			   SORTID_IMAGERAMDISK, block_ptr->imageramdisk,
+			   SORTID_IMAGELINUX,   block_ptr->imagelinux,
+			   SORTID_IMAGEMLOADER, block_ptr->imagemloader,
+			   SORTID_JOB,          job_running,
+			   SORTID_NODE_INX,     block_ptr->bp_inx,
+			   SORTID_MP_STR,        cnode_cnt,
+			   SORTID_NODELIST,     block_ptr->mp_str,
+			   SORTID_PARTITION,    block_ptr->slurm_part_name,
+			   SORTID_SMALL_BLOCK,  block_ptr->small_block,
+			   SORTID_STATE,
+				bg_block_state_string(block_ptr->state),
+			   SORTID_USER,         block_ptr->bg_user_name,
+			   SORTID_UPDATED,      1,
+			   -1);
+	xfree(tmp_char);
 
-	gtk_tree_store_set(treestore, iter, SORTID_SMALL_BLOCK,
-			   block_ptr->small_block, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
+	if (cluster_flags & CLUSTER_FLAG_BGL) {
+		gtk_tree_store_set(treestore, iter,
+				   SORTID_IMAGEBLRTS,   block_ptr->imageblrts,
+				   SORTID_USE,
+					node_use_string(block_ptr->bg_node_use),
+				   -1);
+	}
 
 	return;
 }
@@ -413,8 +407,8 @@ static void _update_info_block(List block_list,
 
 	itr = list_iterator_create(block_list);
 	while ((block_ptr = (sview_block_info_t*) list_next(itr))) {
-		if (block_ptr->node_cnt == 0)
-			block_ptr->node_cnt = block_ptr->size;
+		if (block_ptr->cnode_cnt == 0)
+			block_ptr->cnode_cnt = block_ptr->size;
 		if (!block_ptr->slurm_part_name)
 			block_ptr->slurm_part_name = xstrdup("no part");
 
@@ -462,8 +456,8 @@ static void _update_info_block(List block_list,
 static int _sview_block_sort_aval_dec(sview_block_info_t* rec_a,
 				      sview_block_info_t* rec_b)
 {
-	int size_a = rec_a->node_cnt;
-	int size_b = rec_b->node_cnt;
+	int size_a = rec_a->cnode_cnt;
+	int size_b = rec_b->cnode_cnt;
 
 	if ((rec_a->job_running == NO_JOB_RUNNING)
 	    && (rec_b->job_running != NO_JOB_RUNNING))
@@ -472,20 +466,19 @@ static int _sview_block_sort_aval_dec(sview_block_info_t* rec_a,
 		 && (rec_b->job_running == NO_JOB_RUNNING))
 		return -1;
 
-	if ((rec_a->state == RM_PARTITION_FREE)
-	    && (rec_b->state != RM_PARTITION_FREE))
+	if ((rec_a->state == BG_BLOCK_FREE) && (rec_b->state != BG_BLOCK_FREE))
 		return 1;
-	else if ((rec_a->state != RM_PARTITION_FREE)
-		 && (rec_b->state == RM_PARTITION_FREE))
-		return -1;
+	else if ((rec_a->state != BG_BLOCK_FREE) &&
+		 (rec_b->state == BG_BLOCK_FREE))
+			return -1;
 
 	if (size_a < size_b)
 		return -1;
 	else if (size_a > size_b)
 		return 1;
 
-	if (rec_a->nodes && rec_b->nodes) {
-		size_a = strcmp(rec_a->nodes, rec_b->nodes);
+	if (rec_a->mp_str && rec_b->mp_str) {
+		size_a = strcmp(rec_a->mp_str, rec_b->mp_str);
 		if (size_a < 0)
 			return -1;
 		else if (size_a > 0)
@@ -502,7 +495,7 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 	static List block_list = NULL;
 	partition_info_t part;
 	sview_block_info_t *block_ptr = NULL;
-	char tmp_nodes[50];
+	char tmp_mp_str[50];
 
 	if (!changed && block_list) {
 		return block_list;
@@ -528,22 +521,9 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 		if (!block_ptr->bg_block_name)
 			continue;
 
-#ifdef HAVE_BG_FILES
 		block_ptr->color_inx =
 			atoi(block_ptr->bg_block_name+7);
-#else
-		/* If on a non-bluegene system and looking at one
-		   check for strlen, if it is more than 7 go with
-		   that, or you could get everone being the same
-		   color.
-		*/
-		if (strlen(block_ptr->bg_block_name) >= 7)
-			block_ptr->color_inx =
-				atoi(block_ptr->bg_block_name+7);
-		else
-			block_ptr->color_inx =
-				atoi(block_ptr->bg_block_name+3);
-#endif
+
 		/* on some systems they make there own blocks named
 		   whatever they want, so doing this fixes what could
 		   be a negative number.
@@ -553,16 +533,16 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 
 		block_ptr->color_inx %= sview_colors_cnt;
 
-		block_ptr->nodes
-			= xstrdup(block_info_ptr->block_array[i].nodes);
-		if (block_info_ptr->block_array[i].ionodes) {
+		block_ptr->mp_str
+			= xstrdup(block_info_ptr->block_array[i].mp_str);
+		if (block_info_ptr->block_array[i].ionode_str) {
 			block_ptr->small_block = 1;
-			snprintf(tmp_nodes, sizeof(tmp_nodes),
+			snprintf(tmp_mp_str, sizeof(tmp_mp_str),
 				 "%s[%s]",
-				 block_ptr->nodes,
-				 block_info_ptr->block_array[i].ionodes);
-			xfree(block_ptr->nodes);
-			block_ptr->nodes = xstrdup(tmp_nodes);
+				 block_ptr->mp_str,
+				 block_info_ptr->block_array[i].ionode_str);
+			xfree(block_ptr->mp_str);
+			block_ptr->mp_str = xstrdup(tmp_mp_str);
 		}
 
 		block_ptr->bg_user_name
@@ -581,17 +561,18 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 
 		block_ptr->state
 			= block_info_ptr->block_array[i].state;
-		block_ptr->bg_conn_type
-			= block_info_ptr->block_array[i].conn_type;
+		memcpy(block_ptr->bg_conn_type,
+		       block_info_ptr->block_array[i].conn_type,
+		       sizeof(block_ptr->bg_conn_type));
 
 		if (cluster_flags & CLUSTER_FLAG_BGL)
 			block_ptr->bg_node_use
 				= block_info_ptr->block_array[i].node_use;
 
-		block_ptr->node_cnt
-			= block_info_ptr->block_array[i].node_cnt;
+		block_ptr->cnode_cnt
+			= block_info_ptr->block_array[i].cnode_cnt;
 		block_ptr->bp_inx
-			= block_info_ptr->block_array[i].bp_inx;
+			= block_info_ptr->block_array[i].mp_inx;
 		for(j = 0; j < part_info_ptr->record_count; j++) {
 			part = part_info_ptr->partition_array[j];
 			if (_in_slurm_partition(part.node_inx,
@@ -603,7 +584,7 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 		}
 		block_ptr->job_running =
 			block_info_ptr->block_array[i].job_running;
-		if (block_ptr->bg_conn_type >= SELECT_SMALL)
+		if (block_ptr->bg_conn_type[0] >= SELECT_SMALL)
 			block_ptr->size = 0;
 
 		list_append(block_list, block_ptr);
@@ -623,15 +604,12 @@ void _display_info_block(List block_list,
 	char *name = (char *)spec_info->search_info->gchar_data;
 	int j = 0, found = 0;
 	sview_block_info_t *block_ptr = NULL;
-	char *info = NULL;
 	int update = 0;
 	GtkTreeView *treeview = NULL;
 	ListIterator itr = NULL;
 
-	if (!spec_info->search_info->gchar_data) {
-		info = xstrdup("No pointer given!");
+	if (!spec_info->search_info->gchar_data)
 		goto finished;
-	}
 
 need_refresh:
 	if (!spec_info->display_widget) {
@@ -647,12 +625,12 @@ need_refresh:
 	itr = list_iterator_create(block_list);
 	while ((block_ptr = (sview_block_info_t*) list_next(itr))) {
 		if (!strcmp(block_ptr->bg_block_name, name)
-		    || !strcmp(block_ptr->nodes, name)) {
+		    || !strcmp(block_ptr->mp_str, name)) {
 			/* we want to over ride any subgrp in error
 			   state */
 			enum node_states state = NODE_STATE_UNKNOWN;
 
-			if (block_ptr->state == RM_PARTITION_ERROR)
+			if (block_ptr->state & BG_BLOCK_ERROR_FLAG)
 				state = NODE_STATE_ERROR;
 			else if (block_ptr->job_running > NO_JOB_RUNNING)
 				state = NODE_STATE_ALLOCATED;
@@ -666,7 +644,7 @@ need_refresh:
 					block_ptr->bp_inx[j],
 					block_ptr->bp_inx[j+1],
 					block_ptr->color_inx, true,
-					0);
+					state);
 				j += 2;
 			}
 			_layout_block_record(treeview, block_ptr, update);
@@ -798,34 +776,34 @@ extern int update_state_block(GtkDialog *dialog,
 	gtk_dialog_add_button(dialog,
 			      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
 
-	if (!strcasecmp("Error", type)
-	    || !strcasecmp("Put block in error state", type)) {
+	if (!strcasecmp("Error", type) ||
+	    !strcasecmp("Put block in error state", type)) {
 		snprintf(tmp_char, sizeof(tmp_char),
 			 "Are you sure you want to put block %s "
 			 "in an error state?",
 			 blockid);
-		block_msg.state = RM_PARTITION_ERROR;
+		block_msg.state = BG_BLOCK_ERROR_FLAG;
 	} else if (!strcasecmp("Recreate block", type)) {
 		snprintf(tmp_char, sizeof(tmp_char),
 			 "Are you sure you want to recreate block %s?",
 			 blockid);
-		block_msg.state = RM_PARTITION_CONFIGURING;
+		block_msg.state = BG_BLOCK_BOOTING;
 	} else if (!strcasecmp("Remove block", type)) {
 		snprintf(tmp_char, sizeof(tmp_char),
 			 "Are you sure you want to remove block %s?",
 			 blockid);
-		block_msg.state = RM_PARTITION_NAV;
+		block_msg.state = BG_BLOCK_NAV;
 	} else if (!strcasecmp("Resume block", type)) {
 		snprintf(tmp_char, sizeof(tmp_char),
 			 "Are you sure you want to resume block %s?",
 			 blockid);
-		block_msg.state = RM_PARTITION_DEALLOCATING;
+		block_msg.state = BG_BLOCK_TERM;
 	} else {
 		snprintf(tmp_char, sizeof(tmp_char),
 			 "Are you sure you want to put block %s "
 			 "in a free state?",
 			 blockid);
-		block_msg.state = RM_PARTITION_FREE;
+		block_msg.state = BG_BLOCK_FREE;
 	}
 
 	label = gtk_label_new(tmp_char);
@@ -1007,8 +985,19 @@ extern void get_info_block(GtkTable *table, display_data_t *display_data)
 	}
 
 display_it:
-
-	if (!part_info_ptr || !block_ptr)
+	if (!block_ptr) {
+		view = ERROR_VIEW;
+		if (display_widget)
+			gtk_widget_destroy(display_widget);
+		label = gtk_label_new("No blocks on non-Bluegene systems");
+		gtk_table_attach_defaults(GTK_TABLE(table),
+					  label,
+					  0, 1, 0, 1);
+		gtk_widget_show(label);
+		display_widget = gtk_widget_ref(label);
+		goto end_it;
+	}
+	if (!part_info_ptr)
 		goto reset_curs;
 
 	block_list = _create_block_list(part_info_ptr, block_ptr,
@@ -1022,7 +1011,7 @@ display_it:
 		    gtk_tree_view_get_selection(
 			    GTK_TREE_VIEW(display_widget)))) {
 		GtkTreeViewColumn *focus_column = NULL;
-		/* highlight the correct nodes from the last selection */
+		/* highlight the correct mp_str from the last selection */
 		gtk_tree_view_get_cursor(GTK_TREE_VIEW(display_widget),
 					 &path, &focus_column);
 	}
@@ -1048,11 +1037,6 @@ display_it:
 			       SORTID_NODE_INX, SORTID_COLOR_INX,
 			       grid_button_list);
 
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
-
 	if (view == ERROR_VIEW && display_widget) {
 		gtk_widget_destroy(display_widget);
 		display_widget = NULL;
@@ -1218,12 +1202,12 @@ display_it:
 			break;
 		case RESV_PAGE:
 		case NODE_PAGE:
-			if (!block_ptr->nodes)
+			if (!block_ptr->mp_str)
 				continue;
 			if (!(hostset = hostset_create(
 				      search_info->gchar_data)))
 				continue;
-			name = block_ptr->nodes;
+			name = block_ptr->mp_str;
 			if (block_ptr->small_block) {
 				int j=0;
 				/* strip off the ionodes part */
@@ -1255,7 +1239,7 @@ display_it:
 			case SEARCH_BLOCK_SIZE:
 				if (search_info->int_data == NO_VAL)
 					continue;
-				if (block_ptr->node_cnt
+				if (block_ptr->cnode_cnt
 				    != search_info->int_data)
 					continue;
 				break;
@@ -1282,7 +1266,7 @@ display_it:
 		}
 		list_push(send_block_list, block_ptr);
 
-		if (block_ptr->state == RM_PARTITION_ERROR)
+		if (block_ptr->state & BG_BLOCK_ERROR_FLAG)
 			state = NODE_STATE_ERROR;
 		else if (block_ptr->job_running > NO_JOB_RUNNING)
 			state = NODE_STATE_ALLOCATED;
@@ -1506,7 +1490,7 @@ static void _admin_block(GtkTreeModel *model, GtkTreeIter *iter, char *type)
 	return;
 }
 
-extern void cluster_change_block()
+extern void cluster_change_block(void)
 {
 	display_data_t *display_data = display_data_block;
 	while (display_data++) {
diff --git a/src/sview/common.c b/src/sview/common.c
index 0cad36f59..513967868 100644
--- a/src/sview/common.c
+++ b/src/sview/common.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -57,7 +57,7 @@ static int _find_node_inx (char *name)
 	int i;
 
 	if ((name == NULL) || (name[0] == '\0')) {
-		info("find_node_record passed NULL name");
+		info("_find_node_inx passed NULL name");
 		return -1;
 	}
 
@@ -74,7 +74,7 @@ static int _find_node_inx (char *name)
 
 static void _display_topology(void)
 {
-	int i, match, match_cnt = 0;
+	int i, match = 0, match_cnt = 0;
 	hostset_t hs;
 	int one_liner = 1;
 
@@ -353,37 +353,50 @@ cleanup:
 	return ret;
 }
 
+/* Translate a three-digit alpha-numeric value into it's
+ * base 36 equivalent number */
+static int _xlate_bp_coord(const char *name)
+{
+	int i, rc = 0;
+
+	for (i=0; i<cluster_dims; i++) {
+		rc *= 36;
+		rc += select_char2coord(name[i]);
+	}
+	return rc;
+}
+
 /* Make a BlueGene node name into a numeric representation of
  * its location.
- * Value is low_coordinate * 1,000,000 +
- *          high_coordinate * 1,000 + I/O node (999 if none)
- * (e.g. bg123[4] -> 123,123,004, bg[234x235] -> 234,235,999)
+ * Value is low_node_coordinate * 1,000 + I/O node (999 if none)
+ * with use of base 36 for the node coordinate:
+ * (e.g. bg123[4]    ->  1,371,004
+ *       bg[234x235] ->  2,704,999
+ *       bglZZZ      -> 46,655,999
  */
 static int _bp_coordinate(const char *name)
 {
-	int i, io_val = 999, low_val = -1, high_val = -1;
+	int i, io_val = 999, low_val = -1;
 
 	for (i=0; name[i]; i++) {
 		if (name[i] == '[') {
 			i++;
-			if (low_val < 0) {
-				char *end_ptr;
-				low_val = strtol(name+i, &end_ptr, 10);
-				if ((end_ptr[0] != '\0') &&
-				    (isdigit(end_ptr[1])))
-					high_val = atoi(end_ptr + 1);
-				else
-					high_val = low_val;
-			} else
+			if (low_val < 0)
+				low_val = _xlate_bp_coord(name+i);
+			else
 				io_val = atoi(name+i);
 			break;
-		} else if ((low_val < 0) && (isdigit(name[i])))
-			low_val = high_val = atoi(name+i);
+		} else if ((low_val < 0) &&
+			   ((name[i] >= '0' && (name[i] <= '9')) ||
+			    (name[i] >= 'A' && (name[i] <= 'Z')))) {
+			low_val = _xlate_bp_coord(name+i);
+			i += 2;
+		}
 	}
 
 	if (low_val < 0)
 		return low_val;
-	return ((low_val * 1000000) + (high_val * 1000) + io_val);
+	return ((low_val * 1000) + io_val);
 }
 
 static int _sort_iter_compare_func_bp_list(GtkTreeModel *model,
@@ -595,6 +608,9 @@ static void _selected_page(GtkMenuItem *menuitem, display_data_t *display_data)
 	case RESV_PAGE:
 		each.pfunc = &popup_all_resv;
 		break;
+	case FRONT_END_PAGE:
+		each.pfunc = &popup_all_front_end;
+		break;
 	case ADMIN_PAGE:
 		switch(display_data->id) {
 		case JOB_PAGE:
@@ -611,6 +627,12 @@ static void _selected_page(GtkMenuItem *menuitem, display_data_t *display_data)
 			select_admin_block(treedata->model, &treedata->iter,
 					   display_data, treedata->treeview);
 			break;
+		case FRONT_END_PAGE:
+			select_admin_front_end(treedata->model,
+					       &treedata->iter,
+					       display_data,
+					       treedata->treeview);
+			break;
 		case RESV_PAGE:
 			select_admin_resv(treedata->model, &treedata->iter,
 					  display_data, treedata->treeview);
@@ -701,7 +723,7 @@ extern int build_nodes_bitmap(char *node_names, bitstr_t **bitmap)
 	int node_inx = -1;
 
 	if (TOPO_DEBUG)
-		g_print("..............._node_names2bitmap............%s\n",
+		g_print("...............build_nodes_bitmap............%s\n",
 			node_names);
 	my_bitmap = (bitstr_t *) bit_alloc(g_node_info_ptr->record_count);
 	if (!my_bitmap) {
@@ -710,12 +732,12 @@ extern int build_nodes_bitmap(char *node_names, bitstr_t **bitmap)
 	*bitmap = my_bitmap;
 
 	if (!node_names) {
-		error("_node_name2bitmap: node_names is NULL");
+		error("build_nodes_bitmap: node_names is NULL");
 		return EINVAL;
 	}
 
 	if (!(host_list = hostlist_create(node_names))) {
-		error("_node_name2bitmap: hostlist_create(%s) error",
+		error("build_nodes_bitmap: hostlist_create(%s) error",
 		      node_names);
 		return EINVAL;
 	}
@@ -741,6 +763,7 @@ extern int get_topo_conf(void)
 	int i;
 	switch_record_bitmaps_t sw_nodes_bitmaps;
 	switch_record_bitmaps_t *sw_nodes_bitmaps_ptr;
+
 	if (TOPO_DEBUG)
 		g_print("get_topo_conf\n");
 
@@ -767,28 +790,20 @@ extern int get_topo_conf(void)
 	if (TOPO_DEBUG)
 		g_print("_display_topology,  record_count = %d\n",
 			g_topo_info_msg_ptr->record_count);
-	for (i=0; i < g_topo_info_msg_ptr->record_count;
+	for (i = 0; i < g_topo_info_msg_ptr->record_count;
 	     i++, sw_nodes_bitmaps_ptr++) {
-		if (g_topo_info_msg_ptr->topo_array[i].nodes) {
-			if (TOPO_DEBUG)
-				g_print("ptr->nodes =  %s \n",
-					g_topo_info_msg_ptr->
-					topo_array[i].nodes);
-			if (build_nodes_bitmap(
-				    g_topo_info_msg_ptr->topo_array[i].nodes,
-				    &sw_nodes_bitmaps_ptr->node_bitmap)) {
-				fatal("Invalid node name (%s) in switch "
-				      "config (%s)",
-				      g_topo_info_msg_ptr->topo_array[i].nodes,
-				      g_topo_info_msg_ptr->topo_array[i].name);
-				if (TOPO_DEBUG)
-					g_print("Invalid node name (%s) "
-						"in switch  %s \n",
-						g_topo_info_msg_ptr->
-						topo_array[i].nodes,
-						g_topo_info_msg_ptr->
-						topo_array[i].name);
-			}
+		if (!g_topo_info_msg_ptr->topo_array[i].nodes)
+			continue;
+		if (TOPO_DEBUG)  {
+			g_print("ptr->nodes =  %s \n",
+				g_topo_info_msg_ptr->topo_array[i].nodes);
+		}
+		if (build_nodes_bitmap(
+			    g_topo_info_msg_ptr->topo_array[i].nodes,
+			    &sw_nodes_bitmaps_ptr->node_bitmap)) {
+			g_print("Invalid node name (%s) in switch %s\n",
+				g_topo_info_msg_ptr->topo_array[i].nodes,
+				g_topo_info_msg_ptr->topo_array[i].name);
 		}
 	}
 
@@ -937,6 +952,29 @@ extern void set_page_opts(int page, display_data_t *display_data,
 	itr = list_iterator_create(page_opts->col_list);
 	while ((col_name = list_next(itr))) {
 		replus(col_name);
+		if (strstr(col_name, "list")) {
+			char *orig_ptr = col_name;
+			if (cluster_flags & CLUSTER_FLAG_BG) {
+				xstrsubstitute(col_name, "node", "bp ");
+				xstrsubstitute(col_name, "midplane", "bp ");
+			} else {
+				xstrsubstitute(col_name, "bp ", "node");
+				xstrsubstitute(col_name, "midplane", "node");
+			}
+
+			/* Make sure we have the correct pointer here
+			   since xstrsubstitute() could of changed it
+			   on us.
+			*/
+			if (col_name != orig_ptr) {
+				list_insert(itr, col_name);
+				/* Don't use list_delete_item().
+				   xstrsubstitute() has already
+				   deleted it for us.
+				*/
+				list_remove(itr);
+			}
+		}
 		while (display_data++) {
 			if (display_data->id == -1)
 				break;
@@ -998,7 +1036,7 @@ extern void make_options_menu(GtkTreeView *tree_view, GtkTreePath *path,
 	}
 }
 
-extern GtkScrolledWindow *create_scrolled_window()
+extern GtkScrolledWindow *create_scrolled_window(void)
 {
 	GtkScrolledWindow *scrolled_window = NULL;
 	GtkWidget *table = NULL;
@@ -1019,7 +1057,7 @@ extern GtkScrolledWindow *create_scrolled_window()
 	return scrolled_window;
 }
 
-extern GtkWidget *create_entry()
+extern GtkWidget *create_entry(void)
 {
 	GtkWidget *entry = gtk_entry_new();
 
@@ -1238,9 +1276,13 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 				(int)display_data[i].type);
 		}
 	}
-	gtk_tree_sortable_set_sort_column_id(GTK_TREE_SORTABLE(treestore),
-					     sort_column,
-					     GTK_SORT_ASCENDING);
+
+	if (sort_column >= 0) {
+		gtk_tree_sortable_set_sort_column_id(
+					GTK_TREE_SORTABLE(treestore),
+					sort_column,
+					GTK_SORT_ASCENDING);
+	}
 
 	g_object_unref(treestore);
 
@@ -1351,7 +1393,6 @@ extern gboolean key_pressed(GtkTreeView *tree_view,
 {
 	GtkTreePath *path = NULL;
 	GtkTreeViewColumn *column;
-	GtkTreeSelection *selection = NULL;
 
 	control_key_in_effect = FALSE;
 	enter_key_in_effect = FALSE;
@@ -1360,22 +1401,23 @@ extern gboolean key_pressed(GtkTreeView *tree_view,
 	    (event->keyval == GDK_Control_R))
 		control_key_in_effect = TRUE;
 	else if (event->keyval == GDK_Return) {
+		each_t each;
+		GtkTreeSelection *selection = NULL;
+
 		gtk_tree_view_get_cursor(GTK_TREE_VIEW(tree_view),
 					 &path, &column);
 		selection = gtk_tree_view_get_selection(tree_view);
-		each_t each;
 		memset(&each, 0, sizeof(each_t));
 		each.tree_view = tree_view;
 		each.display_data = signal_params->display_data;
-		global_row_count = gtk_tree_selection_count_selected_rows(
-			gtk_tree_view_get_selection(tree_view));
+		global_row_count =
+			gtk_tree_selection_count_selected_rows(selection);
 		popup_pos.x = 10;
 		popup_pos.x = 10;
 		popup_pos.cntr = 1;
 		popup_pos.slider = 0;
 		gtk_tree_selection_selected_foreach(
-			gtk_tree_view_get_selection(tree_view),
-			_foreach_full_info, &each);
+			selection, _foreach_full_info, &each);
 		/*prevent row_activation from
 		 * performing a redundant 'full info'*/
 		enter_key_in_effect = TRUE;
@@ -1771,7 +1813,7 @@ extern gboolean delete_popup(GtkWidget *widget, GtkWidget *event, char *title)
 	return FALSE;
 }
 
-extern gboolean delete_popups()
+extern gboolean delete_popups(void)
 {
 	ListIterator itr = list_iterator_create(popup_list);
 	popup_info_t *popup_win = NULL;
@@ -1808,6 +1850,9 @@ extern void *popup_thr(popup_info_t *popup_win)
 	case RESV_PAGE:
 		specifc_info = specific_info_resv;
 		break;
+	case FRONT_END_PAGE:
+		specifc_info = specific_info_front_end;
+		break;
 	case SUBMIT_PAGE:
 	default:
 		g_print("thread got unknown type %d\n", popup_win->type);
@@ -1900,7 +1945,7 @@ extern char *str_tolower(char *upper_str)
 	return lower_str;
 }
 
-extern char *get_reason()
+extern char *get_reason(void)
 {
 	char *reason_str = NULL;
 	int len = 0;
@@ -2007,9 +2052,11 @@ extern void display_admin_edit(GtkTable *table, void *type_msg, int *row,
 		char *temp_char = NULL;
 		/* other edittable items that are unknown */
 		entry = create_entry();
-		gtk_tree_model_get(model, iter,
-				   display_data->id,
-				   &temp_char, -1);
+		if (model) {
+			gtk_tree_model_get(model, iter,
+					   display_data->id,
+					   &temp_char, -1);
+		}
 		gtk_entry_set_max_length(GTK_ENTRY(entry),
 					 (DEFAULT_ENTRY_LENGTH +
 					  display_data->id));
@@ -2031,7 +2078,7 @@ extern void display_admin_edit(GtkTable *table, void *type_msg, int *row,
 		return;
 	label = gtk_label_new(display_data->name);
 	/* left justify */
-	gtk_misc_set_alignment(GTK_MISC(label),0.0,0.5);
+	gtk_misc_set_alignment(GTK_MISC(label), 0.0, 0.5);
 	gtk_table_attach(table, label, 0, 1, *row, (*row)+1,
 			 GTK_FILL | GTK_EXPAND, GTK_SHRINK,
 			 0, 0);
@@ -2100,7 +2147,7 @@ extern void add_display_treestore_line(int update,
 	}
 found:
 	gtk_tree_store_set(treestore, iter,
-			   DISPLAY_NAME, name,
+			   DISPLAY_NAME,  name,
 			   DISPLAY_VALUE, value,
 			   -1);
 
@@ -2148,9 +2195,9 @@ extern void add_display_treestore_line_with_font(
 	}
 found:
 	gtk_tree_store_set(treestore, iter,
-			   DISPLAY_NAME, name,
+			   DISPLAY_NAME,  name,
 			   DISPLAY_VALUE, value,
-			   DISPLAY_FONT, font,
+			   DISPLAY_FONT,  font,
 			   -1);
 
 	return;
@@ -2159,24 +2206,7 @@ found:
 extern void sview_widget_modify_bg(GtkWidget *widget, GtkStateType state,
 				   const GdkColor color)
 {
-	if (working_sview_config.grid_speedup) {
-		/* For some reason, QT Themes have a very slow call to for
-		 * gtk_widget_modify_bg as of 7-6-09.
-		 * Here we only take around 40 microsecs where
-		 * gtk_widget_modify_bg takes around 2500.  This isn't
-		 * that big of a deal on most systems, but if you have
-		 * like 10000 nodes this makes an outrageous
-		 * difference.  You must follow this up by doing a
-		 * gtk_widget_set_sensitive 0, and then 1 on the
-		 * parent container to make the color stick.
-		 */
-		GtkRcStyle *rc_style = gtk_widget_get_modifier_style (widget);
-		widget->style->bg[state] = color;
-		rc_style->bg[state] = color;
-		rc_style->color_flags[state] |= GTK_RC_BG;
-		gtk_widget_reset_rc_styles (widget);
-	} else
-		gtk_widget_modify_bg(widget, state, &color);
+	gtk_widget_modify_bg(widget, state, &color);
 }
 
 extern void sview_radio_action_set_current_value(GtkRadioAction *action,
@@ -2232,6 +2262,8 @@ extern char *page_to_str(int page)
 		return "Block";
 	case RESV_PAGE:
 		return "Reservation";
+	case FRONT_END_PAGE:
+		return "Frontend";
 	default:
 		return NULL;
 	}
@@ -2258,13 +2290,15 @@ extern char *tab_pos_to_str(int pos)
 extern char *visible_to_str(sview_config_t *sview_config)
 {
 	char *ret = NULL;
-	int i = 0;
-	for(i=0; i<PAGE_CNT; i++)
-		if (sview_config->page_visible[i]) {
+	int i;
+
+	for (i = 0; i < PAGE_CNT; i++) {
+		if (sview_config->page_visible[i] && (i != TAB_PAGE)) {
 			if (ret)
 				xstrcat(ret, ",");
 			xstrcat(ret, page_to_str(i));
 		}
+	}
 
 	return ret;
 }
diff --git a/src/sview/config_info.c b/src/sview/config_info.c
index 68b3f382e..df6302f46 100644
--- a/src/sview/config_info.c
+++ b/src/sview/config_info.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sview/defaults.c b/src/sview/defaults.c
index 576068ae3..afd5cd194 100644
--- a/src/sview/defaults.c
+++ b/src/sview/defaults.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,12 +41,12 @@
 #include "sview.h"
 #include "src/common/parse_config.h"
 #include "src/common/slurm_strcasestr.h"
-#include "src/common/parse_time.h"
 
 /* These need to be in alpha order (except POS and CNT) */
 enum {
 	SORTID_POS = POS_LOC,
 	SORTID_ADMIN,
+	SORTID_BUTTON_SIZE,
 	SORTID_DEFAULT_PAGE,
 	SORTID_GRID_HORI,
 	SORTID_GRID_VERT,
@@ -71,6 +71,8 @@ static display_data_t display_data_defaults[] = {
 	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE, NULL},
 	{G_TYPE_STRING, SORTID_ADMIN, "Start in Admin Mode",
 	 TRUE, EDIT_MODEL, NULL, create_model_defaults, NULL},
+	{G_TYPE_STRING, SORTID_BUTTON_SIZE, "Node Button Size in Pixels",
+	 TRUE, EDIT_TEXTBOX, NULL, create_model_defaults, NULL},
 	{G_TYPE_STRING, SORTID_DEFAULT_PAGE, "Default Page",
 	 TRUE, EDIT_MODEL, NULL, create_model_defaults, NULL},
 	{G_TYPE_STRING, SORTID_GRID_HORI, "Grid: Nodes before Horizontal break",
@@ -176,6 +178,14 @@ static const char *_set_sview_config(sview_config_t *sview_config,
 		else
 			sview_config->admin_mode = 0;
 		break;
+	case SORTID_BUTTON_SIZE:
+		type = "Button Size";
+		temp_int = strtol(new_text, (char **)NULL, 10);
+		if ((temp_int <= 0) && (temp_int != INFINITE))
+			goto return_error;
+		sview_config->button_size = temp_int;
+		sview_config->gap_size = MIN(temp_int/2, 2);
+		break;
 	case SORTID_DEFAULT_PAGE:
 		if (!strcasecmp(new_text, "job"))
 			sview_config->default_page = JOB_PAGE;
@@ -187,6 +197,8 @@ static const char *_set_sview_config(sview_config_t *sview_config,
 			sview_config->default_page = BLOCK_PAGE;
 		else if (!strcasecmp(new_text, "node"))
 			sview_config->default_page = NODE_PAGE;
+		else if (!strcasecmp(new_text, "frontend"))
+			sview_config->default_page = FRONT_END_PAGE;
 		else
 			sview_config->default_page = JOB_PAGE;
 		break;
@@ -213,7 +225,6 @@ static const char *_set_sview_config(sview_config_t *sview_config,
 	case SORTID_REFRESH_DELAY:
 		type = "Refresh Delay";
 		temp_int = strtol(new_text, (char **)NULL, 10);
-		//temp_int = time_str2secs((char *)new_text);
 		if ((temp_int <= 0) && (temp_int != INFINITE))
 			goto return_error;
 		sview_config->refresh_delay = temp_int;
@@ -391,6 +402,10 @@ static void _local_display_admin_edit(GtkTable *table,
 			temp_char = xstrdup_printf("%u",
 						   sview_config->grid_x_width);
 			break;
+		case SORTID_BUTTON_SIZE:
+			temp_char = xstrdup_printf("%u",
+						   sview_config->button_size);
+			break;
 		case SORTID_REFRESH_DELAY:
 			temp_char = xstrdup_printf("%u",
 						   sview_config->refresh_delay);
@@ -495,21 +510,24 @@ static void _init_sview_conf()
 {
 	int i;
 
+	default_sview_config.main_width = 1000;
+	default_sview_config.main_height = 500;
+	default_sview_config.fi_popup_width = 800;
+	default_sview_config.fi_popup_height = 500;
+	default_sview_config.button_size = 10;
+	default_sview_config.gap_size = 5;
 	default_sview_config.refresh_delay = 5;
 	default_sview_config.grid_x_width = 0;
 	default_sview_config.grid_hori = 10;
 	default_sview_config.grid_vert = 10;
 	default_sview_config.show_hidden = 0;
 	default_sview_config.admin_mode = FALSE;
-	default_sview_config.grid_speedup = 0;
 	default_sview_config.grid_topological = FALSE;
 	default_sview_config.ruled_treeview = FALSE;
 	default_sview_config.show_grid = TRUE;
 	default_sview_config.default_page = JOB_PAGE;
 	default_sview_config.tab_pos = GTK_POS_TOP;
 
-	if (getenv("SVIEW_GRID_SPEEDUP"))
-		default_sview_config.grid_speedup = 1;
 	for(i=0; i<PAGE_CNT; i++) {
 		memset(&default_sview_config.page_opts[i],
 		       0, sizeof(page_opts_t));
@@ -521,11 +539,12 @@ static void _init_sview_conf()
 	}
 }
 
-extern int load_defaults()
+extern int load_defaults(void)
 {
 	s_p_hashtbl_t *hashtbl = NULL;
 	s_p_options_t sview_conf_options[] = {
 		{"AdminMode", S_P_BOOLEAN},
+		{"ButtonSize", S_P_UINT16},
 		{"DefaultPage", S_P_STRING},
 		{"ExcludedPartitions", S_P_STRING},	/* Vestigial */
 		{"FullInfoPopupWidth", S_P_UINT32},
@@ -542,6 +561,7 @@ extern int load_defaults()
 		{"PageOptsNode", S_P_STRING},
 		{"PageOptsPartition", S_P_STRING},
 		{"PageOptsReservation", S_P_STRING},
+		{"PageOptsFrontend", S_P_STRING},
 		{"RefreshDelay", S_P_UINT16},
 		{"RuledTables", S_P_BOOLEAN},
 		{"SavePageSettings", S_P_BOOLEAN},
@@ -577,10 +597,15 @@ extern int load_defaults()
 
 	hashtbl = s_p_hashtbl_create(sview_conf_options);
 
-	if (s_p_parse_file(hashtbl, &hash_val, pathname) == SLURM_ERROR)
+	if (s_p_parse_file(hashtbl, &hash_val, pathname, true) == SLURM_ERROR)
 		error("something wrong with opening/reading conf file");
 
 	s_p_get_boolean(&default_sview_config.admin_mode, "AdminMode", hashtbl);
+	if (s_p_get_uint16(&default_sview_config.button_size, "ButtonSize",
+			   hashtbl)) {
+		default_sview_config.gap_size =
+			MAX(default_sview_config.button_size/2, 2);
+	}
 	if (s_p_get_string(&tmp_str, "DefaultPage", hashtbl)) {
 		if (slurm_strcasestr(tmp_str, "job"))
 			default_sview_config.default_page = JOB_PAGE;
@@ -592,12 +617,12 @@ extern int load_defaults()
 			default_sview_config.default_page = BLOCK_PAGE;
 		else if (slurm_strcasestr(tmp_str, "node"))
 			default_sview_config.default_page = NODE_PAGE;
+		else if (slurm_strcasestr(tmp_str, "frontend"))
+			default_sview_config.default_page = FRONT_END_PAGE;
 		xfree(tmp_str);
 	}
 	s_p_get_uint32(&default_sview_config.grid_hori,
 		       "GridHorizontal", hashtbl);
-	s_p_get_boolean(&default_sview_config.grid_speedup,
-			"GridSpeedup", hashtbl);
 	s_p_get_boolean(&default_sview_config.grid_topological,
 			"GridTopo", hashtbl);
 	if (default_sview_config.grid_topological == 0)
@@ -624,13 +649,6 @@ extern int load_defaults()
 		       "FullInfoPopupWidth", hashtbl);
 	s_p_get_uint32(&default_sview_config.fi_popup_height,
 		       "FullInfoPopupHeight", hashtbl);
-
-	if (default_sview_config.main_width == 0) {
-		default_sview_config.main_width=1000;
-		default_sview_config.main_height=450;
-		default_sview_config.fi_popup_width=600;
-		default_sview_config.fi_popup_height=400;
-	}
 	if (s_p_get_string(&tmp_str, "TabPosition", hashtbl)) {
 		if (slurm_strcasestr(tmp_str, "top"))
 			default_sview_config.tab_pos = GTK_POS_TOP;
@@ -644,7 +662,7 @@ extern int load_defaults()
 	}
 	if (s_p_get_string(&tmp_str, "VisiblePages", hashtbl)) {
 		int i = 0;
-		for(i=0; i<PAGE_CNT; i++)
+		for (i=0; i<PAGE_CNT; i++)
 			default_sview_config.page_visible[i] = FALSE;
 
 		if (slurm_strcasestr(tmp_str, "job"))
@@ -657,6 +675,8 @@ extern int load_defaults()
 			default_sview_config.page_visible[BLOCK_PAGE] = 1;
 		if (slurm_strcasestr(tmp_str, "node"))
 			default_sview_config.page_visible[NODE_PAGE] = 1;
+		if (slurm_strcasestr(tmp_str, "frontend"))
+			default_sview_config.page_visible[FRONT_END_PAGE] = 1;
 		xfree(tmp_str);
 	}
 
@@ -689,7 +709,7 @@ end_it:
 	       sizeof(sview_config_t));
 
 	xfree(pathname);
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 extern int save_defaults(bool final_save)
@@ -740,13 +760,6 @@ extern int save_defaults(bool final_save)
 				 default_sview_config.grid_hori);
 	rc = _write_to_file(fd, tmp_str);
 	xfree(tmp_str);
-	if (rc != SLURM_SUCCESS)
-		goto end_it;
-	tmp_str = xstrdup_printf("GridSpeedup=%s\n",
-				 default_sview_config.grid_speedup ?
-				 "YES" : "NO");
-	rc = _write_to_file(fd, tmp_str);
-	xfree(tmp_str);
 	if (rc != SLURM_SUCCESS)
 		goto end_it;
 	tmp_str = xstrdup_printf("GridTopo=%s\n",
@@ -766,6 +779,12 @@ extern int save_defaults(bool final_save)
 				 default_sview_config.grid_x_width);
 	rc = _write_to_file(fd, tmp_str);
 	xfree(tmp_str);
+	if (rc != SLURM_SUCCESS)
+		goto end_it;
+	tmp_str = xstrdup_printf("ButtonSize=%u\n",
+				 default_sview_config.button_size);
+	rc = _write_to_file(fd, tmp_str);
+	xfree(tmp_str);
 	if (rc != SLURM_SUCCESS)
 		goto end_it;
 	tmp_str = xstrdup_printf("RefreshDelay=%u\n",
@@ -840,9 +859,6 @@ extern int save_defaults(bool final_save)
 	if (rc != SLURM_SUCCESS)
 		goto end_it;
 
-	if (!final_save)
-		goto end_it;
-
 	/* save all current page options */
 	for (i=0; i<PAGE_CNT; i++) {
 		page_opts_t *page_opts =
@@ -878,11 +894,6 @@ extern int save_defaults(bool final_save)
 			list_iterator_destroy(itr);
 		}
 
-		if (page_opts->col_list) {
-			list_destroy(page_opts->col_list);
-			page_opts->col_list = NULL;
-		}
-
 		if (tmp_str2) {
 			replspace(tmp_str2);
 			tmp_str = xstrdup_printf("PageOpts%s=%s\n",
@@ -903,11 +914,14 @@ end_it:
 	if (rc)
 		(void) unlink(new_file);
 	else {			/* file shuffle */
-		int ign;	/* avoid warning */
 		(void) unlink(old_file);
-		ign =  link(reg_file, old_file);
+		if (link(reg_file, old_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
 		(void) unlink(reg_file);
-		ign =  link(new_file, reg_file);
+		if (link(new_file, reg_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
 		(void) unlink(new_file);
 	}
 
@@ -968,6 +982,11 @@ extern GtkListStore *create_model_defaults(int type)
 				   0, "node",
 				   1, type,
 				   -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "frontend",
+				   1, type,
+				   -1);
 		break;
 	case SORTID_TAB_POS:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
@@ -998,7 +1017,7 @@ extern GtkListStore *create_model_defaults(int type)
 	return model;
 }
 
-extern int configure_defaults()
+extern int configure_defaults(void)
 {
 	GtkScrolledWindow *window = create_scrolled_window();
 	GtkWidget *popup = gtk_dialog_new_with_buttons(
@@ -1101,6 +1120,7 @@ extern int configure_defaults()
 				cluster_change_part();
 				cluster_change_job();
 				cluster_change_node();
+				cluster_change_front_end();
 			} else if (tmp_config.grid_topological !=
 				   working_sview_config.grid_topological) {
 				apply_hidden_change = FALSE;
diff --git a/src/sview/front_end_info.c b/src/sview/front_end_info.c
new file mode 100644
index 000000000..73c12c1c6
--- /dev/null
+++ b/src/sview/front_end_info.c
@@ -0,0 +1,1087 @@
+/*****************************************************************************\
+ *  front_end_info.c - Functions related to front end node display
+ *  mode of sview.
+ *****************************************************************************
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#include "src/common/uid.h"
+#include "src/sview/sview.h"
+#include "src/common/parse_time.h"
+
+#define _DEBUG 0
+
+/* Collection of data for printing reports. Like data is combined here */
+typedef struct {
+	int color_inx;
+	front_end_info_t *front_end_ptr;
+	char *boot_time;
+	int node_inx[3];
+	char *reason;
+	char *slurmd_start_time;
+	char *state;
+} sview_front_end_info_t;
+
+typedef struct {
+	char *node_list;
+} front_end_user_data_t;
+
+enum {
+	EDIT_REMOVE = 1,
+	EDIT_EDIT
+};
+
+/* These need to be in alpha order (except POS and CNT) */
+enum {
+	SORTID_POS = POS_LOC,
+	SORTID_BOOT_TIME,
+	SORTID_COLOR,
+	SORTID_COLOR_INX,
+	SORTID_NAME,
+	SORTID_NODE_INX,
+	SORTID_REASON,
+	SORTID_SLURMD_START_TIME,
+	SORTID_STATE,
+	SORTID_CNT
+};
+
+/* extra field here is for choosing the type of edit you that will
+ * take place.  If you choose EDIT_MODEL (means only display a set of
+ * known options) create it in function create_model_*.
+ */
+
+/*these are the settings to apply for the user
+ * on the first startup after a fresh slurm install.
+ * s/b a const probably*/
+static char *_initial_page_opts = "Name,State";
+
+static display_data_t display_data_front_end[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_NAME, "Name", FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_COLOR,  NULL, TRUE, EDIT_COLOR,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_STATE, "State", FALSE, EDIT_MODEL,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_BOOT_TIME, "BootTime", FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_SLURMD_START_TIME, "SlurmdStartTime",
+	 FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_STRING, SORTID_REASON, "Reason", FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_INT, SORTID_COLOR_INX,  NULL, FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_POINTER, SORTID_NODE_INX,  NULL, FALSE, EDIT_NONE,
+	 refresh_front_end, create_model_front_end, admin_edit_front_end},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+static display_data_t options_data_front_end[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
+	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, FRONT_END_PAGE},
+	{G_TYPE_STRING, FRONT_END_PAGE, "Drain Front End Node", TRUE,
+	 ADMIN_PAGE},
+	{G_TYPE_STRING, FRONT_END_PAGE, "Resume Front End Node", TRUE,
+	 ADMIN_PAGE},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+
+static display_data_t *local_display_data = NULL;
+
+static char *got_edit_signal = NULL;
+
+static void _admin_front_end(GtkTreeModel *model, GtkTreeIter *iter, char *type,
+			     char *node_list);
+static void _process_each_front_end(GtkTreeModel *model, GtkTreePath *path,
+				    GtkTreeIter*iter, gpointer userdata);
+
+static void _front_end_info_list_del(void *object)
+{
+	sview_front_end_info_t *sview_front_end_info;
+
+	sview_front_end_info = (sview_front_end_info_t *)object;
+	if (sview_front_end_info) {
+		xfree(sview_front_end_info->boot_time);
+		xfree(sview_front_end_info->reason);
+		xfree(sview_front_end_info->slurmd_start_time);
+		xfree(sview_front_end_info->state);
+		xfree(sview_front_end_info);
+	}
+}
+
+static void _layout_front_end_record(GtkTreeView *treeview,
+				     sview_front_end_info_t *
+				     sview_front_end_info,
+				     int update)
+{
+	GtkTreeIter iter;
+	front_end_info_t *front_end_ptr =
+		sview_front_end_info->front_end_ptr;
+	GtkTreeStore *treestore =
+		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
+
+	if (!treestore)
+		return;
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_front_end,
+						 SORTID_NAME),
+				   front_end_ptr->name);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_front_end,
+						 SORTID_STATE),
+				   sview_front_end_info->state);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_front_end,
+						 SORTID_BOOT_TIME),
+				   sview_front_end_info->boot_time);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_front_end,
+						 SORTID_SLURMD_START_TIME),
+				   sview_front_end_info->slurmd_start_time);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_front_end,
+						 SORTID_REASON),
+				   sview_front_end_info->reason);
+}
+
+static void _update_front_end_record(
+			sview_front_end_info_t *sview_front_end_info_ptr,
+			GtkTreeStore *treestore,
+			GtkTreeIter *iter)
+{
+	front_end_info_t *front_end_ptr;
+
+	front_end_ptr = sview_front_end_info_ptr->front_end_ptr;
+
+	/* Combining these records provides a slight performance improvement */
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_BOOT_TIME,
+				sview_front_end_info_ptr->boot_time,
+			   SORTID_COLOR,
+				sview_colors[sview_front_end_info_ptr->color_inx],
+			   SORTID_COLOR_INX,
+				sview_front_end_info_ptr->color_inx,
+			   SORTID_NODE_INX,
+				sview_front_end_info_ptr->node_inx, 
+			   SORTID_NAME,    front_end_ptr->name,
+			   SORTID_REASON,  sview_front_end_info_ptr->reason,
+			   SORTID_SLURMD_START_TIME,
+				sview_front_end_info_ptr->slurmd_start_time,
+			   SORTID_STATE,   sview_front_end_info_ptr->state,
+			   -1);
+
+	return;
+}
+
+static void _append_front_end_record(
+			sview_front_end_info_t *sview_front_end_info_ptr,
+			GtkTreeStore *treestore, GtkTreeIter *iter,
+			int line)
+{
+	gtk_tree_store_append(treestore, iter, NULL);
+	gtk_tree_store_set(treestore, iter, SORTID_POS, line, -1);
+	_update_front_end_record(sview_front_end_info_ptr, treestore, iter);
+}
+
+static void _update_info_front_end(List info_list, GtkTreeView *tree_view)
+{
+	GtkTreePath *path = gtk_tree_path_new_first();
+	GtkTreeModel *model = gtk_tree_view_get_model(tree_view);
+	GtkTreeIter iter;
+	front_end_info_t *front_end_ptr = NULL;
+	int line = 0;
+	char *host = NULL, *front_end_name = NULL;
+	ListIterator itr = NULL;
+	sview_front_end_info_t *sview_front_end_info = NULL;
+
+	/* get the iter, or find out the list is empty goto add */
+	if (gtk_tree_model_get_iter(model, &iter, path)) {
+		/* make sure all the reserves are still here */
+		while (1) {
+			if (!gtk_tree_model_iter_next(model, &iter)) {
+				break;
+			}
+		}
+	}
+
+	itr = list_iterator_create(info_list);
+	while ((sview_front_end_info = (sview_front_end_info_t*) list_next(itr))) {
+		front_end_ptr = sview_front_end_info->front_end_ptr;
+		/* get the iter, or find out the list is empty goto add */
+		if (!gtk_tree_model_get_iter(model, &iter, path)) {
+			goto adding;
+		}
+		line = 0;
+		while (1) {
+			/* search for the jobid and check to see if
+			   it is in the list */
+			gtk_tree_model_get(model, &iter, SORTID_NAME,
+					   &front_end_name, -1);
+			if (!strcmp(front_end_name, front_end_ptr->name)) {
+				/* update with new info */
+				g_free(front_end_name);
+				_update_front_end_record(sview_front_end_info,
+						    GTK_TREE_STORE(model),
+						    &iter);
+				goto found;
+			}
+			g_free(front_end_name);
+
+			line++;
+			if (!gtk_tree_model_iter_next(model, &iter)) {
+				break;
+			}
+		}
+	adding:
+		_append_front_end_record(sview_front_end_info, GTK_TREE_STORE(model),
+				    &iter, line);
+	found:
+		;
+	}
+	list_iterator_destroy(itr);
+	if (host)
+		free(host);
+
+	gtk_tree_path_free(path);
+
+	return;
+}
+
+static List _create_front_end_info_list(front_end_info_msg_t *front_end_info_ptr,
+					int changed)
+{
+	char *upper = NULL;
+	char user[32], time_str[32];
+	static List info_list = NULL;
+	int i = 0;
+	sview_front_end_info_t *sview_front_end_info_ptr = NULL;
+	front_end_info_t *front_end_ptr = NULL;
+
+	if (!changed && info_list)
+		goto update_color;
+
+	if (info_list)
+		list_flush(info_list);
+	else
+		info_list = list_create(_front_end_info_list_del);
+
+	if (!info_list) {
+		g_print("malloc error\n");
+		return NULL;
+	}
+
+	for (i = 0; i < front_end_info_ptr->record_count; i++) {
+		front_end_ptr = &(front_end_info_ptr->front_end_array[i]);
+		sview_front_end_info_ptr =
+			xmalloc(sizeof(sview_front_end_info_t));
+		sview_front_end_info_ptr->front_end_ptr = front_end_ptr;
+		sview_front_end_info_ptr->color_inx = i % sview_colors_cnt;
+		if (g_node_info_ptr) {
+			sview_front_end_info_ptr->node_inx[0] = 0;
+			sview_front_end_info_ptr->node_inx[1] =
+				g_node_info_ptr->record_count - 1;
+			sview_front_end_info_ptr->node_inx[2] = -1;
+		} else
+			sview_front_end_info_ptr->node_inx[0] = -1;
+		if (front_end_ptr->boot_time) {
+			slurm_make_time_str(&front_end_ptr->boot_time,
+					    time_str, sizeof(time_str));
+			sview_front_end_info_ptr->boot_time =
+				xstrdup(time_str);
+		}
+		if (front_end_ptr->slurmd_start_time) {
+			slurm_make_time_str(&front_end_ptr->slurmd_start_time,
+					    time_str, sizeof(time_str));
+			sview_front_end_info_ptr->slurmd_start_time =
+				xstrdup(time_str);
+		}
+		upper = node_state_string(front_end_ptr->node_state);
+		sview_front_end_info_ptr->state = str_tolower(upper);
+
+		if (front_end_ptr->reason && front_end_ptr->reason_time &&
+		    (front_end_ptr->reason_uid != NO_VAL)) {
+			struct passwd *pw = NULL;
+
+			if ((pw=getpwuid(front_end_ptr->reason_uid)))
+				snprintf(user, sizeof(user), "%s", pw->pw_name);
+			else
+				snprintf(user, sizeof(user), "Unk(%u)",
+					 front_end_ptr->reason_uid);
+			slurm_make_time_str(&front_end_ptr->reason_time,
+					    time_str, sizeof(time_str));
+			sview_front_end_info_ptr->reason =
+				xstrdup_printf("%s [%s@%s]",
+					       front_end_ptr->reason, user,
+					       time_str);
+		} else {
+			sview_front_end_info_ptr->reason =
+				xstrdup(front_end_ptr->reason);
+		}
+
+		list_append(info_list, sview_front_end_info_ptr);
+	}
+
+update_color:
+	return info_list;
+}
+
+static void _display_info_front_end(List info_list, popup_info_t *popup_win)
+{
+	specific_info_t *spec_info = popup_win->spec_info;
+	char *name = (char *)spec_info->search_info->gchar_data;
+	int found = 0, j;
+	front_end_info_t *front_end_ptr = NULL;
+	GtkTreeView *treeview = NULL;
+	ListIterator itr = NULL;
+	sview_front_end_info_t *sview_fe_info = NULL;
+	int update = 0;
+
+	if (!spec_info->search_info->gchar_data) {
+		//info = xstrdup("No pointer given!");
+		goto finished;
+	}
+
+need_refresh:
+	if (!spec_info->display_widget) {
+		treeview = create_treeview_2cols_attach_to_table(
+			popup_win->table);
+		spec_info->display_widget =
+			gtk_widget_ref(GTK_WIDGET(treeview));
+	} else {
+		treeview = GTK_TREE_VIEW(spec_info->display_widget);
+		update = 1;
+	}
+
+	itr = list_iterator_create(info_list);
+	while ((sview_fe_info = (sview_front_end_info_t*) list_next(itr))) {
+		front_end_ptr = sview_fe_info->front_end_ptr;
+		if (strcmp(front_end_ptr->name, name) == 0) {
+			j = 0;
+			while (sview_fe_info->node_inx[j] >= 0) {
+				change_grid_color(popup_win->grid_button_list,
+						  sview_fe_info->node_inx[j],
+						  sview_fe_info->node_inx[j + 1],
+						  sview_fe_info->color_inx,
+						  true, 0);
+				j += 2;
+			}
+			_layout_front_end_record(treeview, sview_fe_info,
+						 update);
+			found = 1;
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	post_setup_popup_grid_list(popup_win);
+
+	if (!found) {
+		if (!popup_win->not_found) {
+			char *temp = "FRONT END DOESN'T EXSIST\n";
+			GtkTreeIter iter;
+			GtkTreeModel *model = NULL;
+
+			/* only time this will be run so no update */
+			model = gtk_tree_view_get_model(treeview);
+			add_display_treestore_line(0,
+						   GTK_TREE_STORE(model),
+						   &iter,
+						   temp, "");
+		}
+		popup_win->not_found = true;
+	} else {
+		if (popup_win->not_found) {
+			popup_win->not_found = false;
+			gtk_widget_destroy(spec_info->display_widget);
+
+			goto need_refresh;
+		}
+	}
+	gtk_widget_show(spec_info->display_widget);
+
+finished:
+
+	return;
+}
+
+extern void refresh_front_end(GtkAction *action, gpointer user_data)
+{
+	popup_info_t *popup_win = (popup_info_t *)user_data;
+	xassert(popup_win);
+	xassert(popup_win->spec_info);
+	xassert(popup_win->spec_info->title);
+	popup_win->force_refresh = 1;
+	specific_info_front_end(popup_win);
+}
+
+extern int get_new_info_front_end(front_end_info_msg_t **info_ptr, int force)
+{
+	static front_end_info_msg_t *new_front_end_ptr = NULL;
+	int error_code = SLURM_NO_CHANGE_IN_DATA;
+	time_t now = time(NULL);
+	static time_t last;
+	static bool changed = 0;
+
+	if (g_front_end_info_ptr && !force &&
+	    ((now - last) < working_sview_config.refresh_delay)) {
+		if (*info_ptr != g_front_end_info_ptr)
+			error_code = SLURM_SUCCESS;
+		*info_ptr = g_front_end_info_ptr;
+		if (changed)
+			error_code = SLURM_SUCCESS;
+		goto end_it;
+	}
+	last = now;
+	if (g_front_end_info_ptr) {
+		error_code = slurm_load_front_end(
+			g_front_end_info_ptr->last_update, &new_front_end_ptr);
+		if (error_code == SLURM_SUCCESS) {
+			slurm_free_front_end_info_msg(g_front_end_info_ptr);
+			changed = 1;
+		} else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
+			error_code = SLURM_NO_CHANGE_IN_DATA;
+			new_front_end_ptr = g_front_end_info_ptr;
+			changed = 0;
+		}
+	} else {
+		new_front_end_ptr = NULL;
+		error_code = slurm_load_front_end((time_t) NULL,
+						  &new_front_end_ptr);
+		changed = 1;
+	}
+
+	g_front_end_info_ptr = new_front_end_ptr;
+
+	if (g_front_end_info_ptr && (*info_ptr != g_front_end_info_ptr))
+		error_code = SLURM_SUCCESS;
+
+	*info_ptr = g_front_end_info_ptr;
+end_it:
+	return error_code;
+}
+
+extern GtkListStore *create_model_front_end(int type)
+{
+	GtkListStore *model = NULL;
+	GtkTreeIter iter;
+	int i = 0;
+
+	switch(type) {
+	case SORTID_STATE:
+		model = gtk_list_store_new(2, G_TYPE_STRING,
+					   G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "Drain",
+				   1, i,
+				   -1);
+		gtk_list_store_append(model, &iter);
+
+		gtk_list_store_set(model, &iter,
+				   0, "Resume",
+				   1, i,
+				   -1);
+		break;
+
+	}
+	return model;
+}
+
+extern void admin_edit_front_end(GtkCellRendererText *cell,
+				 const char *path_string,
+				 const char *new_text, gpointer data)
+{
+	GtkTreeStore *treestore = GTK_TREE_STORE(data);
+	GtkTreePath *path = gtk_tree_path_new_from_string(path_string);
+	GtkTreeIter iter;
+	char *node_list = NULL;
+	int column = GPOINTER_TO_INT(g_object_get_data(G_OBJECT(cell),
+						       "column"));
+	if (!new_text || !strcmp(new_text, ""))
+		goto no_input;
+
+	gtk_tree_model_get_iter(GTK_TREE_MODEL(treestore), &iter, path);
+	switch(column) {
+	case SORTID_STATE:
+		gtk_tree_model_get(GTK_TREE_MODEL(treestore), &iter,
+				   SORTID_NAME,
+				   &node_list, -1);
+		_admin_front_end(GTK_TREE_MODEL(treestore), &iter,
+				 (char *)new_text, node_list);
+		g_free(node_list);
+	default:
+		break;
+	}
+no_input:
+	gtk_tree_path_free(path);
+	g_static_mutex_unlock(&sview_mutex);
+}
+
+extern void get_info_front_end(GtkTable *table, display_data_t *display_data)
+{
+	int error_code = SLURM_SUCCESS;
+	List info_list = NULL;
+	static int view = -1;
+	static front_end_info_msg_t *front_end_info_ptr = NULL;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	static GtkWidget *display_widget = NULL;
+	int changed = 1, j;
+	ListIterator itr = NULL;
+	GtkTreePath *path = NULL;
+	static bool set_opts = FALSE;
+
+	if (!set_opts)
+		set_page_opts(FRONT_END_PAGE, display_data_front_end,
+			      SORTID_CNT, _initial_page_opts);
+	set_opts = TRUE;
+
+	/* reset */
+	if (!table && !display_data) {
+		if (display_widget)
+			gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+		front_end_info_ptr = NULL;
+		goto reset_curs;
+	}
+
+	if (display_data)
+		local_display_data = display_data;
+	if (!table) {
+		display_data_front_end->set_menu = local_display_data->set_menu;
+		goto reset_curs;
+	}
+	if (display_widget && toggled) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+		goto display_it;
+	}
+
+	error_code = get_new_info_front_end(&front_end_info_ptr, force_refresh);
+	if (error_code == SLURM_NO_CHANGE_IN_DATA) {
+		changed = 0;
+	} else if (error_code != SLURM_SUCCESS) {
+		if (view == ERROR_VIEW)
+			goto end_it;
+		if (display_widget)
+			gtk_widget_destroy(display_widget);
+		view = ERROR_VIEW;
+		sprintf(error_char, "slurm_load_front_end: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(table, label, 0, 1, 0, 1);
+		gtk_widget_show(label);
+		display_widget = gtk_widget_ref(GTK_WIDGET(label));
+		goto end_it;
+	}
+
+display_it:
+	info_list = _create_front_end_info_list(front_end_info_ptr, changed);
+	if (!info_list)
+		goto reset_curs;
+	/* set up the grid */
+	if (display_widget && GTK_IS_TREE_VIEW(display_widget) &&
+	    gtk_tree_selection_count_selected_rows(
+		   gtk_tree_view_get_selection(
+			   GTK_TREE_VIEW(display_widget)))) {
+		GtkTreeViewColumn *focus_column = NULL;
+		/* highlight the correct nodes from the last selection */
+		gtk_tree_view_get_cursor(GTK_TREE_VIEW(display_widget),
+					 &path, &focus_column);
+	}
+	if (!path) {
+		sview_front_end_info_t *fe_ptr;
+		itr = list_iterator_create(info_list);
+		while ((fe_ptr = list_next(itr))) {
+			j = 0;
+			while (fe_ptr->node_inx[j] >= 0) {
+				change_grid_color(grid_button_list,
+						  fe_ptr->node_inx[j],
+						  fe_ptr->node_inx[j+1],
+						  fe_ptr->color_inx,
+						  true, 0);
+				j += 2;
+			}
+		}
+		list_iterator_destroy(itr);
+		change_grid_color(grid_button_list, -1, -1,
+				  MAKE_WHITE, true, 0);
+	} else {
+		highlight_grid(GTK_TREE_VIEW(display_widget),
+			       SORTID_NODE_INX, SORTID_COLOR_INX,
+			       grid_button_list);
+	}
+
+	if (view == ERROR_VIEW && display_widget) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+	}
+	if (!display_widget) {
+		tree_view = create_treeview(local_display_data,
+					    &grid_button_list);
+		gtk_tree_selection_set_mode(
+			gtk_tree_view_get_selection(tree_view),
+			GTK_SELECTION_MULTIPLE);
+		display_widget = gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(table,
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1);
+		/* since this function sets the model of the tree_view
+		   to the treestore we don't really care about
+		   the return value */
+		create_treestore(tree_view, display_data_front_end,
+				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+	}
+
+	view = INFO_VIEW;
+	_update_info_front_end(info_list, GTK_TREE_VIEW(display_widget));
+end_it:
+	toggled = FALSE;
+	force_refresh = FALSE;
+reset_curs:
+	if (main_window && main_window->window)
+		gdk_window_set_cursor(main_window->window, NULL);
+	return;
+}
+
+extern void specific_info_front_end(popup_info_t *popup_win)
+{
+	int resv_error_code = SLURM_SUCCESS;
+	static front_end_info_msg_t *front_end_info_ptr = NULL;
+	static front_end_info_t *front_end_ptr = NULL;
+	specific_info_t *spec_info = popup_win->spec_info;
+	sview_search_info_t *search_info = spec_info->search_info;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	List resv_list = NULL;
+	List send_resv_list = NULL;
+	int changed = 1;
+	sview_front_end_info_t *sview_front_end_info_ptr = NULL;
+	int i = -1;
+	ListIterator itr = NULL;
+
+	if (!spec_info->display_widget) {
+		setup_popup_info(popup_win, display_data_front_end, SORTID_CNT);
+	}
+
+	if (spec_info->display_widget && popup_win->toggled) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+		goto display_it;
+	}
+
+	resv_error_code = get_new_info_front_end(&front_end_info_ptr,
+						 popup_win->force_refresh);
+	if (resv_error_code == SLURM_NO_CHANGE_IN_DATA) {
+		if (!spec_info->display_widget || spec_info->view == ERROR_VIEW)
+			goto display_it;
+		changed = 0;
+	} else if (resv_error_code != SLURM_SUCCESS) {
+		if (spec_info->view == ERROR_VIEW)
+			goto end_it;
+		spec_info->view = ERROR_VIEW;
+		if (spec_info->display_widget)
+			gtk_widget_destroy(spec_info->display_widget);
+		sprintf(error_char, "get_new_info_front_end: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(popup_win->table,
+					  label,
+					  0, 1, 0, 1);
+		gtk_widget_show(label);
+		spec_info->display_widget = gtk_widget_ref(label);
+		goto end_it;
+	}
+
+display_it:
+
+	resv_list = _create_front_end_info_list(front_end_info_ptr, changed);
+
+	if (!resv_list)
+		return;
+
+	if (spec_info->view == ERROR_VIEW && spec_info->display_widget) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+	}
+	if (spec_info->type != INFO_PAGE && !spec_info->display_widget) {
+		tree_view = create_treeview(local_display_data,
+					    &popup_win->grid_button_list);
+		gtk_tree_selection_set_mode(
+			gtk_tree_view_get_selection(tree_view),
+			GTK_SELECTION_MULTIPLE);
+		spec_info->display_widget =
+			gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(popup_win->table,
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1);
+		/* since this function sets the model of the tree_view
+		   to the treestore we don't really care about
+		   the return value */
+		create_treestore(tree_view, popup_win->display_data,
+				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+	}
+
+	setup_popup_grid_list(popup_win);
+
+	spec_info->view = INFO_VIEW;
+	if (spec_info->type == INFO_PAGE) {
+		_display_info_front_end(resv_list, popup_win);
+		goto end_it;
+	}
+
+	/* just linking to another list, don't free the inside, just
+	   the list */
+	send_resv_list = list_create(NULL);
+	itr = list_iterator_create(resv_list);
+	i = -1;
+	while ((sview_front_end_info_ptr = list_next(itr))) {
+		i++;
+		front_end_ptr = sview_front_end_info_ptr->front_end_ptr;
+		switch (spec_info->type) {
+		case PART_PAGE:
+		case BLOCK_PAGE:
+		case NODE_PAGE:
+			break;
+		case JOB_PAGE:
+			if (strcmp(front_end_ptr->name,
+				   search_info->gchar_data))
+				continue;
+			break;
+		case RESV_PAGE:
+			switch (search_info->search_type) {
+			case SEARCH_RESERVATION_NAME:
+				if (!search_info->gchar_data)
+					continue;
+
+				if (strcmp(front_end_ptr->name,
+					   search_info->gchar_data))
+					continue;
+				break;
+			default:
+				continue;
+			}
+			break;
+		default:
+			g_print("Unknown type %d\n", spec_info->type);
+			continue;
+		}
+		list_push(send_resv_list, sview_front_end_info_ptr);
+	}
+	list_iterator_destroy(itr);
+	post_setup_popup_grid_list(popup_win);
+
+	_update_info_front_end(send_resv_list,
+			  GTK_TREE_VIEW(spec_info->display_widget));
+	list_destroy(send_resv_list);
+end_it:
+	popup_win->toggled = 0;
+	popup_win->force_refresh = 0;
+
+	return;
+}
+
+extern void set_menus_front_end(void *arg, void *arg2, GtkTreePath *path,
+				int type)
+{
+	GtkTreeView *tree_view = (GtkTreeView *)arg;
+	popup_info_t *popup_win = (popup_info_t *)arg;
+	GtkMenu *menu = (GtkMenu *)arg2;
+	List button_list = (List)arg2;
+
+	switch (type) {
+	case TAB_CLICKED:
+		make_fields_menu(NULL, menu, display_data_front_end, SORTID_CNT);
+		break;
+	case ROW_CLICKED:
+		make_options_menu(tree_view, path, menu, options_data_front_end);
+		break;
+	case ROW_LEFT_CLICKED:
+		highlight_grid(tree_view, SORTID_NODE_INX,
+			       SORTID_COLOR_INX, button_list);
+		break;
+	case FULL_CLICKED:
+	{
+		GtkTreeModel *model = gtk_tree_view_get_model(tree_view);
+		GtkTreeIter iter;
+		if (!gtk_tree_model_get_iter(model, &iter, path)) {
+			g_error("error getting iter from model\n");
+			break;
+		}
+
+		popup_all_front_end(model, &iter, INFO_PAGE);
+
+		break;
+	}
+	case POPUP_CLICKED:
+		make_fields_menu(popup_win, menu,
+				 popup_win->display_data, SORTID_CNT);
+		break;
+	default:
+		g_error("UNKNOWN type %d given to set_fields\n", type);
+	}
+}
+
+extern void popup_all_front_end(GtkTreeModel *model, GtkTreeIter *iter, int id)
+{
+	char *name = NULL;
+	char title[100];
+	ListIterator itr = NULL;
+	popup_info_t *popup_win = NULL;
+	GError *error = NULL;
+
+	gtk_tree_model_get(model, iter, SORTID_NAME, &name, -1);
+
+	switch (id) {
+	case INFO_PAGE:
+		snprintf(title, 100, "Full info for front end node %s", name);
+		break;
+	default:
+		g_print("front end got %d\n", id);
+	}
+
+	itr = list_iterator_create(popup_list);
+	while ((popup_win = list_next(itr))) {
+		if (popup_win->spec_info)
+			if (!strcmp(popup_win->spec_info->title, title)) {
+				break;
+			}
+	}
+	list_iterator_destroy(itr);
+
+	if (!popup_win) {
+		if (id == INFO_PAGE)
+			popup_win = create_popup_info(id, FRONT_END_PAGE,
+						      title);
+		else {
+			popup_win = create_popup_info(FRONT_END_PAGE, id,
+						      title);
+		}
+	} else {
+		g_free(name);
+		gtk_window_present(GTK_WINDOW(popup_win->popup));
+		return;
+	}
+
+	/* Pass the model and the structs from the iter so we can always get
+	 * the current node_inx.
+	 */
+	popup_win->model = model;
+	popup_win->iter = *iter;
+	popup_win->node_inx_id = SORTID_NODE_INX;
+
+	switch (id) {
+	case INFO_PAGE:
+		popup_win->spec_info->search_info->gchar_data = name;
+		break;
+	default:
+		g_print("resv got unknown type %d\n", id);
+	}
+	if (!g_thread_create((gpointer)popup_thr, popup_win, FALSE, &error)) {
+		g_printerr ("Failed to create resv popup thread: %s\n",
+			    error->message);
+		return;
+	}
+}
+
+static void _process_each_front_end(GtkTreeModel *model, GtkTreePath *path,
+				    GtkTreeIter*iter, gpointer user_data)
+{
+	char *name = NULL;
+	front_end_user_data_t *fe_data = user_data;
+
+	gtk_tree_model_get(model, iter, SORTID_NAME, &name, -1);
+	if (fe_data->node_list)
+		xstrfmtcat(fe_data->node_list, ",%s", name);
+	else
+		fe_data->node_list = xstrdup(name);
+	g_free(name);
+}
+
+extern void select_admin_front_end(GtkTreeModel *model, GtkTreeIter *iter,
+				   display_data_t *display_data,
+				   GtkTreeView *treeview)
+{
+	if (treeview) {
+		char *node_list;
+		hostlist_t hl = NULL;
+		front_end_user_data_t user_data;
+
+		memset(&user_data, 0, sizeof(front_end_user_data_t));
+		gtk_tree_selection_selected_foreach(
+			gtk_tree_view_get_selection(treeview),
+			_process_each_front_end, &user_data);
+
+		hl = hostlist_create(user_data.node_list);
+		hostlist_uniq(hl);
+		hostlist_sort(hl);
+		xfree(user_data.node_list);
+		node_list = hostlist_ranged_string_xmalloc(hl);
+		hostlist_destroy(hl);
+
+		_admin_front_end(model, iter, display_data->name, node_list);
+		xfree(node_list);
+	}
+}
+
+static void _admin_front_end(GtkTreeModel *model, GtkTreeIter *iter, char *type,
+			     char *node_list)
+{
+	uint16_t state = (uint16_t) NO_VAL;
+	update_front_end_msg_t front_end_update_msg;
+	char *new_type = NULL, *reason = NULL;
+	char tmp_char[100];
+	char *lower;
+	int rc;
+	GtkWidget *label = NULL;
+	GtkWidget *entry = NULL;
+	GtkWidget *popup = gtk_dialog_new_with_buttons(
+		type,
+		GTK_WINDOW(main_window),
+		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
+		NULL);
+
+	gtk_window_set_transient_for(GTK_WINDOW(popup), NULL);
+
+	label = gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_YES, GTK_RESPONSE_OK);
+	gtk_window_set_default(GTK_WINDOW(popup), label);
+	gtk_dialog_add_button(GTK_DIALOG(popup),
+			      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
+
+	if (!strncasecmp("Drain", type, 5)) {
+		new_type = "DRAIN";
+		reason = "\n\nPlease enter reason.";
+		state = NODE_STATE_DRAIN;
+		entry = create_entry();
+	} else if (!strncasecmp("Resume", type, 6)) {
+		new_type = "RESUME";
+		reason = "";
+		state = NODE_RESUME;
+	}
+	snprintf(tmp_char, sizeof(tmp_char),
+		 "Are you sure you want to set state of front end node %s "
+		 "to %s?%s", node_list, new_type, reason);
+	label = gtk_label_new(tmp_char);
+
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+			   label, FALSE, FALSE, 0);
+	if (entry)
+		gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+				   entry, TRUE, TRUE, 0);
+	gtk_widget_show_all(popup);
+	rc = gtk_dialog_run (GTK_DIALOG(popup));
+
+	slurm_init_update_front_end_msg(&front_end_update_msg);
+
+	if (rc == GTK_RESPONSE_OK) {
+		front_end_update_msg.name = node_list;
+		front_end_update_msg.node_state = state;
+		if (entry) {
+			front_end_update_msg.reason = xstrdup(
+				gtk_entry_get_text(GTK_ENTRY(entry)));
+			if (!front_end_update_msg.reason ||
+			    !strlen(front_end_update_msg.reason)) {
+				lower = g_strdup_printf(
+					"You need a reason to do that.");
+				display_edit_note(lower);
+				g_free(lower);
+				goto end_it;
+			}
+			rc = uid_from_string(getlogin(),
+					     &front_end_update_msg.reason_uid);
+			if (rc < 0)
+				front_end_update_msg.reason_uid = getuid();
+		}
+
+		rc = slurm_update_front_end(&front_end_update_msg);
+		if (rc == SLURM_SUCCESS) {
+			lower = g_strdup_printf(
+				"Nodes %s updated successfully.",
+				node_list);
+			display_edit_note(lower);
+			g_free(lower);
+		} else {
+			lower = g_strdup_printf(
+				"Problem updating nodes %s: %s",
+				node_list, slurm_strerror(rc));
+			display_edit_note(lower);
+			g_free(lower);
+		}
+	}
+
+end_it:
+	global_entry_changed = 0;
+	xfree(front_end_update_msg.reason);
+	gtk_widget_destroy(popup);
+	if (got_edit_signal) {
+		type = got_edit_signal;
+		got_edit_signal = NULL;
+		_admin_front_end(model, iter, type, node_list);
+		xfree(type);
+	}
+	return;
+}
+
+extern void cluster_change_front_end(void)
+{
+	display_data_t *display_data = display_data_front_end;
+
+	display_data = options_data_front_end;
+	while (display_data++) {
+		if (display_data->id == -1)
+			break;
+
+		if (cluster_flags & CLUSTER_FLAG_BG) {
+			switch (display_data->id) {
+			case BLOCK_PAGE:
+				display_data->name = "Blocks";
+				break;
+			case NODE_PAGE:
+				display_data->name = "Base Partitions";
+				break;
+			}
+		} else {
+			switch (display_data->id) {
+			case BLOCK_PAGE:
+				display_data->name = NULL;
+				break;
+			case NODE_PAGE:
+				display_data->name = "Nodes";
+				break;
+			}
+		}
+	}
+	get_info_front_end(NULL, NULL);
+}
diff --git a/src/sview/grid.c b/src/sview/grid.c
index 81dca9e86..3b7608c40 100644
--- a/src/sview/grid.c
+++ b/src/sview/grid.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -37,9 +37,10 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 #include "sview.h"
-#include "src/plugins/select/bluegene/plugin/bluegene.h"
-#define TOPO_DEBUG 0
+
 #define RESET_GRID -2
+#define TOPO_DEBUG  0
+
 List grid_button_list = NULL;
 List blinking_button_list = NULL;
 List multi_button_list = NULL;
@@ -76,15 +77,6 @@ typedef struct {
 
 GStaticMutex blinking_mutex = G_STATIC_MUTEX_INIT;
 
-static int _coord(char coord)
-{
-	if ((coord >= '0') && (coord <= '9'))
-		return (coord - '0');
-	if ((coord >= 'A') && (coord <= 'Z'))
-		return ((coord - 'A') + 10);
-	return -1;
-}
-
 static gboolean _mouseover_node(GtkWidget *widget, GdkEventButton *event,
 				grid_button_t *grid_button)
 {
@@ -232,7 +224,9 @@ void _put_button_as_down(grid_button_t *grid_button, int state)
 	grid_button->color = NULL;
 	grid_button->color_inx = MAKE_DOWN;
 	grid_button->button = gtk_event_box_new();
-	gtk_widget_set_size_request(grid_button->button, 10, 10);
+	gtk_widget_set_size_request(grid_button->button,
+				    working_sview_config.button_size,
+				    working_sview_config.button_size);
 	gtk_event_box_set_above_child(GTK_EVENT_BOX(grid_button->button),
 				      FALSE);
 	_add_button_signals(grid_button);
@@ -272,7 +266,9 @@ void _put_button_as_up(grid_button_t *grid_button)
 	}
 	gtk_widget_destroy(grid_button->button);
 	grid_button->button = gtk_button_new();
-	gtk_widget_set_size_request(grid_button->button, 10, 10);
+	gtk_widget_set_size_request(grid_button->button,
+				    working_sview_config.button_size,
+				    working_sview_config.button_size);
 	_add_button_signals(grid_button);
 
 /* 	if (grid_button->frame) */
@@ -298,7 +294,9 @@ void _put_button_as_inactive(grid_button_t *grid_button)
 	}
 	gtk_widget_destroy(grid_button->button);
 	grid_button->button = gtk_button_new();
-	gtk_widget_set_size_request(grid_button->button, 10, 10);
+	gtk_widget_set_size_request(grid_button->button,
+				    working_sview_config.button_size,
+				    working_sview_config.button_size);
 	//gtk_widget_set_sensitive (grid_button->button, FALSE);
 
 	_add_button_signals(grid_button);
@@ -391,7 +389,6 @@ static void _each_highlightd(GtkTreeModel *model,
 
 	int j=0;
 	GdkColor color;
-	bool changed = 0;
 
 	grid_foreach_t *grid_foreach = userdata;
 
@@ -422,10 +419,9 @@ static void _each_highlightd(GtkTreeModel *model,
 		    || (grid_button->inx > node_inx[j+1]))
 			continue;
 
-		if (_change_button_color(grid_button, color_inx,
-					 sview_colors[color_inx],
-					 color, 0, 0))
-			changed = 1;
+		(void)_change_button_color(grid_button, color_inx,
+				     sview_colors[color_inx],
+				     color, 0, 0);
 
 		if (GTK_WIDGET_STATE(grid_button->button) != GTK_STATE_NORMAL)
 			gtk_widget_set_state(grid_button->button,
@@ -437,15 +433,9 @@ static void _each_highlightd(GtkTreeModel *model,
 	}
 
 	list_iterator_destroy(itr);
-	if (changed && working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
 	return;
 }
 
-
-
 static void _each_highlight_selected(GtkTreeModel *model,
 				     GtkTreePath *path,
 				     GtkTreeIter *iter,
@@ -469,9 +459,8 @@ static void _each_highlight_selected(GtkTreeModel *model,
 		return;
 	itr = list_iterator_create(grid_foreach->button_list);
 	while ((grid_button = list_next(itr))) {
-		/*For multiple selections, need to retain all selected.
-		 *(previously this assumed only one selected).
-		 */
+		/* For multiple selections, need to retain all selected.
+		 * (previously this assumed only one selected). */
 		if (grid_button->inx != node_inx)
 			continue;
 		else if (GTK_WIDGET_STATE(grid_button->button)
@@ -507,15 +496,13 @@ static int _block_in_node(int *bp_inx, int inx)
  * in the system (e.g. there is a gap in the 3-D torus for a service or login
  * node.
  */
-static void _build_empty_node(int x, int y, int z,
+static void _build_empty_node(int coord_x, int coord_y,
 			      button_processor_t *button_processor)
 {
 	grid_button_t *grid_button = button_processor->grid_button;
-	int y_offset;
 
-	(*button_processor->coord_x) = (x + (DIM_SIZE[Z] - 1)) - z;
-	y_offset = button_processor->default_y_offset - (DIM_SIZE[Z] * y);
-	(*button_processor->coord_y) = (y_offset - y) + z;
+	(*button_processor->coord_x) = coord_x;
+	(*button_processor->coord_y) = coord_y;
 	grid_button = xmalloc(sizeof(grid_button_t));
 	grid_button->color_inx = MAKE_BLACK;
 	grid_button->inx = (*button_processor->inx);
@@ -537,61 +524,168 @@ static void _build_empty_node(int x, int y, int z,
 			 GTK_SHRINK, GTK_SHRINK, 1, 1);
 }
 
+static void _calc_coord_3d(int x, int y, int z, int default_y_offset,
+			   int *coord_x, int *coord_y, int *dim_size)
+{
+	int y_offset;
+
+	*coord_x = (x + (dim_size[2] - 1)) - z;
+	y_offset = default_y_offset - (dim_size[2] * y);
+	*coord_y = (y_offset - y) + z;
+}
+
+static void _calc_coord_4d(int a, int x, int y, int z, int default_y_offset,
+			   int *coord_x, int *coord_y, int* dim_size)
+{
+	int x_offset, y_offset;
+
+	x_offset = (dim_size[1] + dim_size[3]) * a;
+	*coord_x = x_offset + (x + (dim_size[3] - 1)) - z;
+	y_offset = default_y_offset - (dim_size[3] * y);
+	*coord_y = (y_offset - y) + z;
+}
+
+static int *_get_cluster_dims(void)
+{
+	int *my_dim_size = slurmdb_setup_cluster_dim_size();
+
+	if ((cluster_flags & CLUSTER_FLAG_CRAYXT) && my_dim_size) {
+		static int cray_dim_size[3] = {-1, -1, -1};
+		/* For now, assume four nodes per coordinate all in
+		 * the same cage. Need to refine. */
+		cray_dim_size[0] = my_dim_size[0];
+		cray_dim_size[1] = my_dim_size[1];
+		cray_dim_size[2] = my_dim_size[2];
+		return cray_dim_size;
+	}
+
+	return my_dim_size;
+}
+
 /* Add a button for a given node. If node_ptr == NULL then fill in any gaps
  * in the grid just for a clean look. Always call with node_ptr == NULL for
  * the last call in the sequence. */
 static int _add_button_to_list(node_info_t *node_ptr,
 			       button_processor_t *button_processor)
 {
+	static bool *node_exists = NULL;
+	static int node_exists_cnt = 1;
 	grid_button_t *grid_button = button_processor->grid_button;
+	int *dim_size = NULL, i, coord_x = 0, coord_y = 0;
+	int len = 0, len_a = 0;
+
+	if (cluster_dims > 1) {
+		dim_size = _get_cluster_dims();
+		if (dim_size == NULL) {
+			g_error("Could not read dim_size\n");
+			return SLURM_ERROR;
+		}
+		if ((dim_size[0] < 1) || (cluster_dims < 1)) {
+			g_error("Invalid dim_size %d or cluster_dims %d\n",
+				dim_size[0], cluster_dims);
+			return SLURM_ERROR;
+		}
 
-	if (cluster_dims == 4) {
-		/* FIXME: */
-		return SLURM_ERROR;
-	} else if (cluster_dims == 3) {
-		static bool *node_exists = NULL;
-		int i, x=0, y=0, z=0, y_offset=0;
-		/* On 3D system we need to translate a
-		   3D space to a 2D space and make it
-		   appear 3D.  So we get the coords of
-		   each node in xyz format and apply
-		   an x and y offset to get a coord_x
-		   and coord_y.  This is not needed
-		   for linear systems since they can
-		   be laid out in any fashion
-		*/
-
+		/* Translate a 3D or 4D space into a 2D space to the extent
+		 * possible. */
 		if (node_exists == NULL) {
-			node_exists = xmalloc(sizeof(bool) * DIM_SIZE[X] *
-					      DIM_SIZE[Y] * DIM_SIZE[Z]);
+			node_exists_cnt = 1;
+			for (i = 0; i < cluster_dims; i++)
+				node_exists_cnt *= dim_size[i];
+			node_exists = xmalloc(sizeof(bool) * node_exists_cnt);
 		}
 		if (node_ptr) {
-			i = strlen(node_ptr->name);
-			if (i < 4) {
+			len = strlen(node_ptr->name);
+			if (len < cluster_dims) {
 				g_error("bad node name %s\n", node_ptr->name);
 				return SLURM_ERROR;
+			}
+			if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+				len_a = strlen(node_ptr->node_addr);
+				if (len_a < cluster_dims) {
+					g_error("bad node addr %s\n",
+						node_ptr->node_addr);
+					return SLURM_ERROR;
+				}
+			}
+		}
+	}
+
+	if (cluster_dims == 4) {
+		int a, x, y, z;
+		if (node_ptr) {
+			a = select_char2coord(node_ptr->name[len-4]);
+			x = select_char2coord(node_ptr->name[len-3]);
+			y = select_char2coord(node_ptr->name[len-2]);
+			z = select_char2coord(node_ptr->name[len-1]);
+			/* Ignore "b" dimension for BlueGene/Q */
+			i = ((a * dim_size[1] + x) * dim_size[2] + y) *
+			    dim_size[3] + z;
+			node_exists[i] = true;
+			_calc_coord_4d(a, x, y, z,
+				       button_processor->default_y_offset,
+				       &coord_x, &coord_y, dim_size);
+		} else {
+			for (i = -1, a = 0; a < dim_size[0]; a++) {
+				for (x = 0; x < dim_size[1]; x++) {
+					for (y = 0; y < dim_size[2]; y++) {
+						for (z = 0; z < dim_size[3];
+						     z++) {
+							i++;
+							if (node_exists[i])
+								continue;
+							_calc_coord_4d(a,x,y,z,
+				      				button_processor->
+								default_y_offset,
+								&coord_x,
+								&coord_y,
+								dim_size);
+							_build_empty_node(
+								coord_x,
+								coord_y,
+								button_processor);
+						}
+					}
+				}
+			}
+			xfree(node_exists);
+			return SLURM_SUCCESS;
+		}
+	} else if (cluster_dims == 3) {
+		int x, y, z;
+		if (node_ptr) {
+			if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+				x = select_char2coord(
+					node_ptr->node_addr[len_a-3]);
+				y = select_char2coord(
+					node_ptr->node_addr[len_a-2]);
+				z = select_char2coord(
+					node_ptr->node_addr[len_a-1]);
 			} else {
-				x = _coord(node_ptr->name[i-3]);
-				y = _coord(node_ptr->name[i-2]);
-				z = _coord(node_ptr->name[i-1]);
-				i = (x * DIM_SIZE[Y] + y) * DIM_SIZE[Z] + z;
-				node_exists[i] = true;
+				x = select_char2coord(node_ptr->name[len-3]);
+				y = select_char2coord(node_ptr->name[len-2]);
+				z = select_char2coord(node_ptr->name[len-1]);
 			}
-			(*button_processor->coord_x) = (x + (DIM_SIZE[Z] - 1))
-				- z;
-			y_offset = button_processor->default_y_offset
-				- (DIM_SIZE[Z] * y);
-			(*button_processor->coord_y) = (y_offset - y) + z;
+			i = (x * dim_size[1] + y) * dim_size[2] + z;
+			node_exists[i] = true;
+			_calc_coord_3d(x, y, z,
+				       button_processor->default_y_offset,
+				       &coord_x, &coord_y, dim_size);
 		} else {
-			for (x = 0; x < DIM_SIZE[X]; x++) {
-				for (y = 0; y < DIM_SIZE[Y]; y++) {
-					for (z = 0; z < DIM_SIZE[Z]; z++) {
-						i = (x * DIM_SIZE[Y] + y) *
-							DIM_SIZE[Z] + z;
+			for (x = 0; x < dim_size[0]; x++) {
+				for (y = 0; y < dim_size[1]; y++) {
+					for (z = 0; z < dim_size[2]; z++) {
+						i = (x * dim_size[1] + y) *
+							dim_size[2] + z;
 						if (node_exists[i])
 							continue;
+						_calc_coord_3d(x, y, z,
+				      			button_processor->
+							default_y_offset,
+							&coord_x, &coord_y,
+							dim_size);
 						_build_empty_node(
-							x, y, z,
+							coord_x, coord_y,
 							button_processor);
 					}
 				}
@@ -603,6 +697,14 @@ static int _add_button_to_list(node_info_t *node_ptr,
 	if (node_ptr == NULL)
 		return SLURM_SUCCESS;
 
+	if (cluster_dims > 1) {
+		(*button_processor->coord_x) = coord_x;
+		(*button_processor->coord_y) = coord_y;
+#if 0
+		g_print("%s %d:%d\n", node_ptr->name, coord_x, coord_y);
+#endif
+	}
+
 	if (!grid_button) {
 		grid_button = xmalloc(sizeof(grid_button_t));
 		grid_button->color_inx = MAKE_INIT;
@@ -613,7 +715,9 @@ static int _add_button_to_list(node_info_t *node_ptr,
 		grid_button->button = gtk_button_new();
 		grid_button->node_name = xstrdup(node_ptr->name);
 
-		gtk_widget_set_size_request(grid_button->button, 10, 10);
+		gtk_widget_set_size_request(grid_button->button,
+					    working_sview_config.button_size,
+					    working_sview_config.button_size);
 		_add_button_signals(grid_button);
 		list_append(button_processor->button_list, grid_button);
 
@@ -641,13 +745,21 @@ static int _add_button_to_list(node_info_t *node_ptr,
 /* 		gtk_frame_set_shadow_type(GTK_FRAME(grid_button->frame), */
 /* 					  GTK_SHADOW_ETCHED_OUT); */
 	if (cluster_dims < 3) {
-		/* On linear systems we just up the
-		   x_coord until we hit the side of
-		   the table and then increment the
-		   coord_y.  We add space inbetween
-		   each 10th row.
-		*/
+		/* On linear systems we just up the x_coord until we hit the
+		 * side of the table and then increment the coord_y.  We add
+		 * space between each tenth row. */
 		(*button_processor->coord_x)++;
+
+		if (button_processor->force_row_break) {
+			(*button_processor->coord_x) = 0;
+			(*button_processor->coord_y)++;
+			gtk_table_set_row_spacing(
+				button_processor->table,
+				(*button_processor->coord_y)-1,
+				working_sview_config.gap_size);
+			return SLURM_SUCCESS;
+		}
+
 		if ((*button_processor->coord_x)
 		    == working_sview_config.grid_x_width) {
 			(*button_processor->coord_x) = 0;
@@ -656,16 +768,8 @@ static int _add_button_to_list(node_info_t *node_ptr,
 			      % working_sview_config.grid_vert))
 				gtk_table_set_row_spacing(
 					button_processor->table,
-					(*button_processor->coord_y)-1, 5);
-		}
-
-		if (button_processor->force_row_break) {
-			(*button_processor->coord_x) = 0;
-			(*button_processor->coord_y)+= 2;
-			gtk_table_set_row_spacing(
-				button_processor->table,
-				(*button_processor->coord_y)-1,	5);
-			return SLURM_SUCCESS;
+					(*button_processor->coord_y)-1,
+					working_sview_config.gap_size);
 		}
 
 		if ((*button_processor->coord_y) == button_processor->table_y)
@@ -676,7 +780,8 @@ static int _add_button_to_list(node_info_t *node_ptr,
 		      % working_sview_config.grid_hori))
 			gtk_table_set_col_spacing(
 				button_processor->table,
-				(*button_processor->coord_x)-1, 5);
+				(*button_processor->coord_x)-1,
+				working_sview_config.gap_size);
 	}
 	return SLURM_SUCCESS;
 }
@@ -687,14 +792,12 @@ static int _grid_table_by_switch(button_processor_t *button_processor,
 	int rc = SLURM_SUCCESS;
 	int inx = 0, ii = 0;
 	switch_record_bitmaps_t *sw_nodes_bitmaps_ptr = g_switch_nodes_maps;
-
+#if TOPO_DEBUG
+	/* engage if want original display below switched */
+	ListIterator itr = list_iterator_create(node_list);
+	sview_node_info_t *sview_node_info_ptr = NULL;
+#endif
 	button_processor->inx = &inx;
-
-	/* engage if want original
-	   ListIterator itr = list_iterator_create(node_list);
-	   sview_node_info_t *sview_node_info_ptr = NULL;
-	   * display below switched
-	   */
 	for (ii=0; ii<g_topo_info_msg_ptr->record_count;
 	     ii++, sw_nodes_bitmaps_ptr++) {
 		int j = 0, first, last;
@@ -728,19 +831,19 @@ static int _grid_table_by_switch(button_processor_t *button_processor,
 		rc = _add_button_to_list(NULL, button_processor);
 	}
 
+#if TOPO_DEBUG
 	/* engage this if want original display below
-	 * switched grid
+	 * switched grid */
 	 button_processor->inx = &inx;
 	 while ((sview_node_info_ptr = list_next(itr))) {
-	 if ((rc = _add_button_to_list(
-	 sview_node_info_ptr->node_ptr,
-	 button_processor)) != SLURM_SUCCESS)
-	 break;
-	 inx++;
+		 if ((rc = _add_button_to_list(
+				sview_node_info_ptr->node_ptr,
+	 			button_processor)) != SLURM_SUCCESS)
+			 break;
+	 	inx++;
 	 }
 	 list_iterator_destroy(itr);
-
-	*/
+#endif
 
 	/* This is needed to get the correct width of the grid window.
 	 * If it is not given then we get a really narrow window. */
@@ -788,6 +891,8 @@ static int _grid_table_by_list(button_processor_t *button_processor,
 static int _init_button_processor(button_processor_t *button_processor,
 				  int node_count)
 {
+	int *dim_size = NULL;
+
 	if (node_count == 0) {
 		g_print("_init_button_processor: no nodes selected\n");
 		return SLURM_ERROR;
@@ -795,15 +900,26 @@ static int _init_button_processor(button_processor_t *button_processor,
 
 	memset(button_processor, 0, sizeof(button_processor_t));
 
+	if (cluster_dims > 1) {
+		dim_size = _get_cluster_dims();
+		if (dim_size == NULL) {
+			g_error("could not read dim_size\n");
+			return SLURM_ERROR;
+		}
+	}
 	if (cluster_dims == 4) {
-		/* FIXME: */
-		return SLURM_ERROR;
+		button_processor->default_y_offset = (dim_size[3] * dim_size[2])
+					+ (dim_size[2] - dim_size[3]);
+		working_sview_config.grid_x_width = (dim_size[1] + dim_size[3])
+						    * dim_size[0];
+		button_processor->table_y = (dim_size[3] * dim_size[2])
+					    + dim_size[2];
 	} else if (cluster_dims == 3) {
-		button_processor->default_y_offset = (DIM_SIZE[Z] * DIM_SIZE[Y])
-			+ (DIM_SIZE[Y] - DIM_SIZE[Z]);
-		working_sview_config.grid_x_width = DIM_SIZE[X] + DIM_SIZE[Z];
-		button_processor->table_y =
-			(DIM_SIZE[Z] * DIM_SIZE[Y]) + DIM_SIZE[Y];
+		button_processor->default_y_offset = (dim_size[2] * dim_size[1])
+			+ (dim_size[1] - dim_size[2]);
+		working_sview_config.grid_x_width = dim_size[0] + dim_size[2];
+		button_processor->table_y = (dim_size[2] * dim_size[1])
+					    + dim_size[1];
 	} else {
 		if (!working_sview_config.grid_x_width) {
 			if (node_count < 50) {
@@ -932,7 +1048,9 @@ extern grid_button_t *create_grid_button_from_another(
 /* 		sview_widget_modify_bg(send_grid_button->button,  */
 /* 				       GTK_STATE_ACTIVE, color); */
 	}
-	gtk_widget_set_size_request(send_grid_button->button, 10, 10);
+	gtk_widget_set_size_request(send_grid_button->button,
+				    working_sview_config.button_size,
+				    working_sview_config.button_size);
 
 	send_grid_button->node_name = xstrdup(name);
 
@@ -968,10 +1086,9 @@ extern char *change_grid_color(List button_list, int start, int end,
 
 	itr = list_iterator_create(button_list);
 	while ((grid_button = list_next(itr))) {
-		if (start != -1)
-			if ((grid_button->inx < start)
-			    ||  (grid_button->inx > end))
-				continue;
+		if ((start != -1) &&
+		    ((grid_button->inx < start) || (grid_button->inx > end)))
+			continue;
 		_change_button_color(grid_button, color_inx, new_col,
 				     color, only_change_unused, state_override);
 	}
@@ -980,6 +1097,50 @@ extern char *change_grid_color(List button_list, int start, int end,
 	return sview_colors[color_inx];
 }
 
+/* This variation of change_grid_color() is faster when changing many
+ * button colors at the same time since we can issue a single call to
+ * _change_button_color() and eliminate a nested loop. */
+extern void change_grid_color_array(List button_list, int array_len,
+				    int *color_inx, bool *color_set_flag,
+				    bool only_change_unused,
+				    enum node_states state_override)
+{
+	ListIterator itr = NULL;
+	grid_button_t *grid_button = NULL;
+	GdkColor color;
+	char *new_col = NULL;
+
+	if (!button_list)
+		return;
+
+	itr = list_iterator_create(button_list);
+	while ((grid_button = list_next(itr))) {
+		if ((grid_button->inx < 0) || (grid_button->inx >= array_len))
+			continue;
+		if (!color_set_flag[grid_button->inx])
+			continue;
+
+		if (color_inx[grid_button->inx] >= 0) {
+			color_inx[grid_button->inx] %= sview_colors_cnt;
+			new_col = sview_colors[color_inx[grid_button->inx]];
+		} else if (color_inx[grid_button->inx] == MAKE_BLACK) {
+			new_col = blank_color;
+		} else if (color_inx[grid_button->inx] == MAKE_TOPO_1) {
+			new_col = topo1_color;
+		} else if (color_inx[grid_button->inx] == MAKE_TOPO_2) {
+			new_col = topo2_color;
+		} else
+			new_col = white_color;
+		gdk_color_parse(new_col, &color);
+
+		_change_button_color(grid_button, color_inx[grid_button->inx],
+				     new_col, color, only_change_unused,
+				     state_override);
+	}
+	list_iterator_destroy(itr);
+	return;
+}
+
 extern void highlight_grid(GtkTreeView *tree_view,
 			   int node_inx_id, int color_inx_id, List button_list)
 {
@@ -1016,10 +1177,6 @@ extern void highlight_grid(GtkTreeView *tree_view,
 		gtk_tree_selection_selected_foreach(
 			gtk_tree_view_get_selection(tree_view),
 			_each_highlight_selected, &grid_foreach);
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
 
 	return;
 }
@@ -1155,7 +1312,7 @@ extern void add_extra_bluegene_buttons(List *button_list, int inx,
 	grid_button_t *grid_button = NULL;
 	grid_button_t *send_grid_button = NULL;
 	int i=0;
-	char *nodes = NULL;
+	char *mp_str = NULL;
 	char tmp_nodes[256];
 	int found = 0;
 	int coord_y=0;
@@ -1194,23 +1351,23 @@ extern void add_extra_bluegene_buttons(List *button_list, int inx,
 
 	for (i=0; i < block_ptr->record_count; i++) {
 		bg_info_ptr = &block_ptr->block_array[i];
-		if (!_block_in_node(bg_info_ptr->bp_inx, inx))
+		if (!_block_in_node(bg_info_ptr->mp_inx, inx))
 			continue;
 		found = 1;
-		nodes = bg_info_ptr->nodes;
-		if (bg_info_ptr->ionodes) {
-			sprintf(tmp_nodes, "%s[%s]", nodes,
-				bg_info_ptr->ionodes);
-			nodes = tmp_nodes;
+		mp_str = bg_info_ptr->mp_str;
+		if (bg_info_ptr->ionode_str) {
+			sprintf(tmp_nodes, "%s[%s]", mp_str,
+				bg_info_ptr->ionode_str);
+			mp_str = tmp_nodes;
 		}
-		if (bg_info_ptr->state == RM_PARTITION_ERROR)
+		if (bg_info_ptr->state & BG_BLOCK_ERROR_FLAG)
 			grid_button->state = NODE_STATE_ERROR;
 		else if (bg_info_ptr->job_running > NO_JOB_RUNNING)
 			grid_button->state = NODE_STATE_ALLOCATED;
 		else
 			grid_button->state = NODE_STATE_IDLE;
 		send_grid_button = create_grid_button_from_another(
-			grid_button, nodes, *color_inx);
+			grid_button, mp_str, *color_inx);
 		grid_button->state = orig_state;
 		if (send_grid_button) {
 			send_grid_button->button_list = *button_list;
@@ -1286,8 +1443,19 @@ extern void put_buttons_in_table(GtkTable *table, List button_list)
 	itr = list_iterator_create(button_list);
 	while ((grid_button = list_next(itr))) {
 		if (cluster_dims == 4) {
-			/* FIXME: */
-			return;
+			grid_button->table = table;
+			gtk_table_attach(table, grid_button->button,
+					 grid_button->table_x,
+					 (grid_button->table_x+1),
+					 grid_button->table_y,
+					 (grid_button->table_y+1),
+					 GTK_SHRINK, GTK_SHRINK,
+					 1, 1);
+			if (!grid_button->table_x) {
+				gtk_table_set_row_spacing(table,
+						grid_button->table_y,
+						working_sview_config.gap_size);
+			}
 		} else if (cluster_dims == 3) {
 			grid_button->table = table;
 			gtk_table_attach(table, grid_button->button,
@@ -1297,10 +1465,11 @@ extern void put_buttons_in_table(GtkTable *table, List button_list)
 					 (grid_button->table_y+1),
 					 GTK_SHRINK, GTK_SHRINK,
 					 1, 1);
-			if (!grid_button->table_x)
+			if (!grid_button->table_x) {
 				gtk_table_set_row_spacing(table,
-							  grid_button->table_y,
-							  5);
+						grid_button->table_y,
+						working_sview_config.gap_size);
+			}
 		} else {
 			grid_button->table = table;
 			grid_button->table_x = coord_x;
@@ -1316,7 +1485,8 @@ extern void put_buttons_in_table(GtkTable *table, List button_list)
 				coord_y++;
 				if (!(coord_y % working_sview_config.grid_vert))
 					gtk_table_set_row_spacing(
-						table, coord_y-1, 5);
+						table, coord_y-1,
+						working_sview_config.gap_size);
 			}
 
 			if (coord_y == button_processor.table_y)
@@ -1329,12 +1499,11 @@ extern void put_buttons_in_table(GtkTable *table, List button_list)
 	}
 	list_iterator_destroy(itr);
 
-#ifndef HAVE_3D
-	/* This is needed to get the correct width of the grid
-	   window.  If it is not given then we get a really narrow
-	   window. */
-	gtk_table_set_row_spacing(table, coord_y-1, 1);
-#endif
+	if (cluster_dims == 0) {
+		/* This is needed to get the correct width of the grid window.
+		 * If it is not given then we get a really narrow window. */
+		gtk_table_set_row_spacing(table, coord_y-1, 1);
+	}
 	gtk_widget_show_all(GTK_WIDGET(table));
 }
 
@@ -1421,7 +1590,8 @@ extern int get_system_stats(GtkTable *table)
 		changed = 0;
 	} else if (rc != SLURM_SUCCESS)
 		return SLURM_ERROR;
-	ba_init(node_info_ptr, 0);
+
+	select_g_ba_init(node_info_ptr, 0);
 
 	node_list = create_node_info_list(node_info_ptr,
 					  changed, FALSE);
@@ -1487,8 +1657,8 @@ extern void sview_init_grid(bool reset_highlight)
 	ListIterator itr = NULL;
 	grid_button_t *grid_button = NULL;
 
-	if ((rc = get_new_info_node(&node_info_ptr, force_refresh))
-	    == SLURM_NO_CHANGE_IN_DATA) {
+	rc = get_new_info_node(&node_info_ptr, force_refresh);
+	if (rc == SLURM_NO_CHANGE_IN_DATA) {
 		/* need to clear out old data */
 		set_grid_used(grid_button_list, -1, -1, false, reset_highlight);
 		return;
@@ -1502,7 +1672,7 @@ extern void sview_init_grid(bool reset_highlight)
 	}
 
 	itr = list_iterator_create(grid_button_list);
-	for(i=0; i<node_info_ptr->record_count; i++) {
+	for (i = 0; i < node_info_ptr->record_count; i++) {
 		int tried_again = 0;
 		node_ptr = &node_info_ptr->node_array[i];
 	try_again:
@@ -1512,7 +1682,6 @@ extern void sview_init_grid(bool reset_highlight)
 			grid_button->state = node_ptr->node_state;
 			gtk_widget_set_state(grid_button->button,
 					     GTK_STATE_NORMAL);
-			change_grid_color(grid_button_list, i, i, i, true, 0);
 			grid_button->used = false;
 			break;
 		}
@@ -1571,8 +1740,4 @@ extern void post_setup_popup_grid_list(popup_info_t *popup_win)
 
 	change_grid_color(popup_win->grid_button_list, -1, -1,
 			  MAKE_BLACK, true, NODE_STATE_IDLE);
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(popup_win->grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(popup_win->grid_table), 1);
-	}
 }
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index a285ba65f..05bf16436 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -1,16 +1,15 @@
 /*****************************************************************************\
- *  job_info.c - Functions related to job display
- *  mode of sview.
+ *  job_info.c - Functions related to job display mode of sview.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -28,16 +27,29 @@
  *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
 \*****************************************************************************/
 
+#include <fcntl.h>
+#include <grp.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
 #include "src/common/uid.h"
 #include "src/common/node_select.h"
 #include "src/sview/sview.h"
 #include "src/common/parse_time.h"
-#include <grp.h>
+#include "src/common/proc_args.h"
+#include "src/common/slurm_strcasestr.h"
 
 #define _DEBUG 0
 #define MAX_CANCEL_RETRY 10
 #define SIZE(a) (sizeof(a)/sizeof(a[0]))
 
+/* We do not read the node table here, but allocated space for up to
+ * MAX_NODE_SPACE nodes and generate fatal error if we go higher. Increase
+ * this value if needed */
+#ifndef SVIEW_MAX_NODE_SPACE
+#define SVIEW_MAX_NODE_SPACE (24 * 1024)
+#endif
+
 /* Collection of data for printing reports. Like data is combined here */
 typedef struct {
 	int color_inx;
@@ -83,6 +95,7 @@ enum {
 	SORTID_ALLOC_NODE,
 	SORTID_ALPS_RESV_ID,
 	SORTID_BATCH,
+	SORTID_BATCH_HOST,
 #ifdef HAVE_BG
 	SORTID_NODELIST,
 	SORTID_NODELIST_EXC,
@@ -139,6 +152,7 @@ enum {
 /* 	SORTID_NTASKS_PER_NODE, */
 /* 	SORTID_NTASKS_PER_SOCKET, */
 	SORTID_PARTITION,
+	SORTID_PREEMPT_TIME,
 	SORTID_PRIORITY,
 	SORTID_QOS,
 	SORTID_REASON,
@@ -151,6 +165,7 @@ enum {
 /* 	SORTID_SOCKETS_MIN, */
 	SORTID_STATE,
 	SORTID_STATE_NUM,
+	SORTID_SWITCHES,
 	SORTID_TASKS,
 /* 	SORTID_THREADS_MAX, */
 /* 	SORTID_THREADS_MIN, */
@@ -254,17 +269,17 @@ static display_data_t display_data_job[] = {
 	 create_model_job, admin_edit_job},
 	{G_TYPE_INT, SORTID_STATE_NUM, NULL, FALSE, EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_PREEMPT_TIME, "Preempt Time", FALSE,
+	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_RESIZE, "Time Resize", FALSE,
-	 EDIT_NONE, refresh_job,
-	 create_model_job, admin_edit_job},
+	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_RUNNING, "Time Running", FALSE,
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_SUBMIT, "Time Submit", FALSE,
 	 EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_ELIGIBLE, "Time Eligible", FALSE,
-	 EDIT_TEXTBOX, refresh_job,
-	 create_model_job, admin_edit_job},
+	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_START, "Time Start", FALSE,
 	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TIME_END, "Time End", FALSE,
@@ -298,6 +313,7 @@ static display_data_t display_data_job[] = {
 	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_INT, SORTID_RESTARTS, "Restart Count", FALSE, EDIT_NONE,
 	 refresh_job, create_model_job, admin_edit_job},
+	/* Priority is a string so we can edit using a text box */
 	{G_TYPE_STRING, SORTID_PRIORITY, "Priority", FALSE,
 	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_DERIVED_EC, "Derived Exit Code", FALSE,
@@ -306,6 +322,8 @@ static display_data_t display_data_job[] = {
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_BATCH, "Batch Flag", FALSE,
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_BATCH_HOST, "Batch Host", FALSE,
+	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_CPU_MIN, "CPUs Min",
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_CPU_MAX, "CPUs Max",
@@ -322,32 +340,23 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_NODES_MAX, "Nodes Max",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
-/* 	{G_TYPE_STRING, SORTID_SOCKETS_MIN, "Min Sockets",  */
-/* 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_SOCKETS_MAX, "Max Sockets",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_CORES_MIN, "Min Cores",  */
-/* 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_CORES_MAX, "Max Cores",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_THREADS_MIN, "Min Threads",  */
-/* 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_THREADS_MAX, "Max Threads",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
 	{G_TYPE_STRING, SORTID_CPU_REQ, "Min CPUs Per Node",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
-	{G_TYPE_STRING, SORTID_MEM_MIN, "Min Memory Per Node",
+	{G_TYPE_STRING, SORTID_MEM_MIN, "Min Memory",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_TMP_DISK, "Min Tmp Disk Per Node",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
-	{G_TYPE_STRING, SORTID_NICE, "Nice",
-	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
-	{G_TYPE_STRING, SORTID_ACCOUNT, "Account",
-	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
+	/* Nice is a string so we can edit using a text box */
+	{G_TYPE_STRING, SORTID_NICE, "Nice", FALSE,
+	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_ACCOUNT, "Account", FALSE,
+	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_QOS, "QOS", FALSE,
 	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_REASON, "Reason Waiting",
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_SWITCHES, "Switches",
+	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_FEATURES, "Features",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_GRES, "Gres",
@@ -358,12 +367,6 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_ALLOC_NODE, "Alloc Node : Sid",
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
-/* 	{G_TYPE_STRING, SORTID_NTASKS_PER_NODE, "Num tasks per Node",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_NTASKS_PER_SOCKET, "Num tasks per Socket",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
-/* 	{G_TYPE_STRING, SORTID_NTASKS_PER_CORE, "Num tasks per Core",  */
-/* 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, */
 #ifdef HAVE_AIX
 	{G_TYPE_STRING, SORTID_NETWORK, "Network",
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
@@ -388,6 +391,20 @@ static display_data_t display_data_job[] = {
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
+static display_data_t create_data_job[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE,
+	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_COMMAND, "Script File", FALSE, EDIT_TEXTBOX,
+	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_TIMELIMIT, "Time Limit", FALSE, EDIT_TEXTBOX,
+	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_NODES_MIN, "Nodes Min", FALSE, EDIT_TEXTBOX,
+	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_TASKS, "Task Count", FALSE, EDIT_TEXTBOX,
+	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
 static display_data_t options_data_job[] = {
 	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
 	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, JOB_PAGE},
@@ -451,6 +468,35 @@ static void _update_info_step(sview_job_info_t *sview_job_info_ptr,
 			      GtkTreeIter *step_iter,
 			      GtkTreeIter *iter);
 
+static char *_read_file(const char *f_name)
+{
+	int fd, f_size, offset = 0;
+	ssize_t rd_size;
+	struct stat f_stat;
+	char *buf;
+
+	fd = open(f_name, 0);
+	if (fd < 0)
+		return NULL;
+	if (fstat(fd, &f_stat)) {
+		close(fd);
+		return NULL;
+	}
+	f_size = f_stat.st_size;
+	buf = xmalloc(f_size);
+	while (offset < f_size) {
+		rd_size = read(fd, buf+offset, f_size-offset);
+		if (rd_size < 0) {
+			if ((errno == EAGAIN) || (errno == EINTR))
+				continue;
+			xfree(buf);
+			break;
+		}
+		offset += rd_size;
+	}
+	close(fd);
+	return buf;
+}
 
 static void _job_info_list_del(void *object)
 {
@@ -588,7 +634,8 @@ static void _set_active_combo_job(GtkComboBox *combo,
 	char *temp_char = NULL;
 	int action = 0;
 
-	gtk_tree_model_get(model, iter, type, &temp_char, -1);
+	if (model)
+		gtk_tree_model_get(model, iter, type, &temp_char, -1);
 	if (!temp_char)
 		goto end_it;
 	switch(type) {
@@ -657,10 +704,11 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 	int temp_int = 0;
 	char *p;
 	uint16_t rotate;
-	uint16_t conn_type;
+	uint16_t conn_type[cluster_dims];
 	char* token, *delimiter = ",x", *next_ptr;
+	char *sep_char;
 	int j;
-	uint16_t geo[SYSTEM_DIMENSIONS];
+	uint16_t geo[cluster_dims];
 	char* geometry_tmp = xstrdup(new_text);
 	char* original_ptr = geometry_tmp;
 
@@ -788,11 +836,17 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 			temp_int *= 1024;
 		else if (*p == 'm' || *p == 'M')
 			temp_int *= 1048576;
+		p = slurm_strcasestr((char *)new_text, "cpu");
+		if (p)
+			type = "min memory per cpu";
+		else
+			type = "min memory per node";
 
-		type = "min memory per node";
 		if (temp_int <= 0)
 			goto return_error;
 		job_msg->pn_min_memory = (uint32_t)temp_int;
+		if (p)
+			job_msg->pn_min_memory |= MEM_PER_CPU;
 		break;
 	case SORTID_TMP_DISK:
 		temp_int = strtol(new_text, (char **)NULL, 10);
@@ -870,6 +924,34 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		job_msg->qos = xstrdup(new_text);
 		type = "qos";
 		break;
+	case SORTID_COMMAND:
+		type = "script_file";
+		xfree(job_msg->script);
+		job_msg->script = _read_file(new_text);
+		if (job_msg->script == NULL)
+			goto return_error;
+		if (job_msg->argc) {
+			for (j = 0; j < job_msg->argc; j++)
+				xfree(job_msg->argv[j]);
+		}
+		xfree(job_msg->argv);
+		xfree(job_msg->name);
+		job_msg->argc = 1;
+		job_msg->argv = xmalloc(sizeof(char *) * job_msg->argc);
+		if (new_text[0] == '/') {
+			job_msg->argv[0] = xstrdup(new_text);
+			token = strrchr(new_text, (int) '/');
+			if (token)
+				job_msg->name = xstrdup(token + 1);
+		} else {
+			job_msg->argv[0] = xmalloc(1024);
+			if (!getcwd(job_msg->argv[0], 1024))
+				goto return_error;
+			xstrcat(job_msg->argv[0], "/");
+			xstrcat(job_msg->argv[0], new_text);
+			job_msg->name = xstrdup(new_text);
+		}
+		break;
 	case SORTID_DEPENDENCY:
 		job_msg->dependency = xstrdup(new_text);
 		type = "dependency";
@@ -877,9 +959,9 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 	case SORTID_GEOMETRY:
 		type = "geometry";
 		token = strtok_r(geometry_tmp, delimiter, &next_ptr);
-		for (j=0; j<SYSTEM_DIMENSIONS; j++)
+		for (j=0; j<cluster_dims; j++)
 			geo[j] = (uint16_t) NO_VAL;
-		for (j=0; j<SYSTEM_DIMENSIONS; j++) {
+		for (j=0; j<cluster_dims; j++) {
 			if (!token) {
 				//error("insufficient dimensions in "
 				//      "Geometry");
@@ -924,22 +1006,9 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 					    (void *) &rotate);
 		break;
 	case SORTID_CONNECTION:
+		verify_conn_type(new_text, conn_type);
+
 		type = "connection";
-		if (!strcasecmp(new_text, "torus")) {
-			conn_type = SELECT_TORUS;
-		} else if (!strcasecmp(new_text, "mesh")) {
-			conn_type = SELECT_MESH;
-		} else if (!strcasecmp(new_text, "htc smp")) {
-			conn_type = SELECT_HTC_S;
-		} else if (!strcasecmp(new_text, "htc dual")) {
-			conn_type = SELECT_HTC_D;
-		} else if (!strcasecmp(new_text, "htc virtual")) {
-			conn_type = SELECT_HTC_V;
-		} else if (!strcasecmp(new_text, "htc linux")) {
-			conn_type = SELECT_HTC_L;
-		} else {
-			conn_type = SELECT_NAV;
-		}
 
 		if (!job_msg->select_jobinfo)
 			job_msg->select_jobinfo
@@ -1002,6 +1071,15 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		if (job_msg->begin_time < time(NULL))
 			job_msg->begin_time = time(NULL);
 		break;
+	case SORTID_SWITCHES:
+		type = "switches";
+		job_msg->req_switch =
+			(uint32_t) strtol(new_text, &sep_char, 10);
+		if (sep_char && sep_char[0] == '@') {
+			job_msg->wait4switch = time_str2mins(sep_char+1) * 60;
+		}
+
+		break;
 	default:
 		type = "unknown";
 		break;
@@ -1166,12 +1244,14 @@ static void _layout_job_record(GtkTreeView *treeview,
 {
 	char *nodes = NULL, *reason = NULL, *uname = NULL;
 	char tmp_char[50];
+	char time_buf[32];
 	char running_char[50];
 	time_t now_time = time(NULL);
 	int suspend_secs = 0;
 	job_info_t *job_ptr = sview_job_info_ptr->job_ptr;
 	struct group *group_info = NULL;
 	uint16_t term_sig = 0;
+	uint32_t min_mem = 0;
 
 	GtkTreeIter iter;
 	GtkTreeStore *treestore =
@@ -1235,6 +1315,11 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_BATCH),
 				   tmp_char);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_BATCH_HOST),
+				   job_ptr->batch_host);
+
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		add_display_treestore_line(update, treestore, &iter,
 					   find_col_name(display_data_job,
@@ -1441,15 +1526,26 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_CPU_REQ),
 				   tmp_char);
 
-	if (job_ptr->pn_min_memory > 0)
-		convert_num_unit((float)job_ptr->pn_min_memory,
+	min_mem = job_ptr->pn_min_memory;
+	if (min_mem & MEM_PER_CPU)
+		min_mem &= (~MEM_PER_CPU);
+
+	if (min_mem > 0) {
+		int len;
+		convert_num_unit((float)min_mem,
 				 tmp_char, sizeof(tmp_char), UNIT_MEGA);
-	else
+		len = strlen(tmp_char);
+		if (job_ptr->pn_min_memory & MEM_PER_CPU)
+			sprintf(tmp_char+len, " Per CPU");
+		else
+			sprintf(tmp_char+len, " Per Node");
+	} else
 		sprintf(tmp_char, " ");
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_MEM_MIN),
 				   tmp_char);
+
 	if (job_ptr->pn_min_tmp_disk > 0)
 		convert_num_unit((float)job_ptr->pn_min_tmp_disk,
 				 tmp_char, sizeof(tmp_char), UNIT_MEGA);
@@ -1471,7 +1567,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 							 SORTID_NETWORK),
 					   job_ptr->network);
 
-	if (job_ptr->pn_min_memory > 0)
+	if (job_ptr->nice > 0)
 		sprintf(tmp_char, "%u", job_ptr->nice - NICE_OFFSET);
 	else
 		sprintf(tmp_char, " ");
@@ -1627,6 +1723,16 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_TIMELIMIT),
 				   tmp_char);
 
+	if (job_ptr->preempt_time) {
+		slurm_make_time_str((time_t *)&job_ptr->preempt_time, tmp_char,
+				    sizeof(tmp_char));
+	} else
+		sprintf(tmp_char, "N/A");
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_PREEMPT_TIME),
+				   tmp_char);
+
 	if (job_ptr->resize_time) {
 		slurm_make_time_str((time_t *)&job_ptr->resize_time, tmp_char,
 				    sizeof(tmp_char));
@@ -1660,6 +1766,15 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_TIME_SUSPEND),
 				   tmp_char);
 
+	secs2time_str((time_t) job_ptr->wait4switch, time_buf,
+			sizeof(time_buf));
+	snprintf(tmp_char, sizeof(tmp_char), "%u@%s\n",
+			job_ptr->req_switch, time_buf);
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_SWITCHES),
+				   tmp_char);
+
 	uname = uid_to_string((uid_t)job_ptr->user_id);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
@@ -1681,24 +1796,133 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 			       GtkTreeStore *treestore,
 			       GtkTreeIter *iter)
 {
-	char *nodes = NULL, *reason = NULL, *uname = NULL;
-	char tmp_char[50];
+	char tmp_time_run[40],  tmp_time_resize[40], tmp_time_submit[40];
+	char tmp_time_elig[40], tmp_time_start[40],  tmp_time_end[40];
+	char tmp_time_sus[40],  tmp_time_limit[40],  tmp_alloc_node[40];
+	char tmp_exit[40],      tmp_group_id[40],    tmp_derived_ec[40];
+	char tmp_cpu_cnt[40],   tmp_node_cnt[40],    tmp_disk[40];
+	char tmp_cpus_max[40],  tmp_mem_min[40],     tmp_cpu_req[40];
+	char tmp_nodes_min[40], tmp_nodes_max[40],   tmp_cpus_per_task[40];
+	char tmp_prio[40],      tmp_nice[40],        tmp_preempt_time[40];
+	char tmp_rqswitch[40];
+	char *tmp_batch,  *tmp_cont, *tmp_shared, *tmp_requeue, *tmp_uname;
+	char *tmp_reason, *tmp_nodes;
+	char time_buf[32];
 	time_t now_time = time(NULL);
 	int suspend_secs = 0;
 	GtkTreeIter step_iter;
-	int childern = 0;
 	job_info_t *job_ptr = sview_job_info_ptr->job_ptr;
 	struct group *group_info = NULL;
 	uint16_t term_sig = 0;
+	uint32_t min_mem = 0;
+
+	snprintf(tmp_alloc_node, sizeof(tmp_alloc_node), "%s:%u",
+		 job_ptr->alloc_node, job_ptr->alloc_sid);
+
+	if (job_ptr->batch_flag)
+		tmp_batch = "yes";
+	else
+		tmp_batch = "no";
+
+	if (job_ptr->contiguous)
+		tmp_cont = "yes";
+	else
+		tmp_cont = "no";
+
+	if (job_ptr->cpus_per_task > 0)
+		sprintf(tmp_cpus_per_task, "%u", job_ptr->cpus_per_task);
+	else
+		tmp_cpus_per_task[0] = '\0';
+
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		convert_num_unit((float)job_ptr->num_cpus,
+				 tmp_cpu_cnt, sizeof(tmp_cpu_cnt),
+				 UNIT_NONE);
+	} else {
+		snprintf(tmp_cpu_cnt, sizeof(tmp_cpu_cnt), "%u",
+			 job_ptr->num_cpus);
+	}
+
+	convert_num_unit((float)job_ptr->pn_min_cpus,
+			 tmp_cpu_req, sizeof(tmp_cpu_req), UNIT_NONE);
+
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		convert_num_unit((float)job_ptr->max_cpus,
+				 tmp_cpus_max, sizeof(tmp_cpus_max),
+				 UNIT_NONE);
+	} else {
+		snprintf(tmp_cpus_max, sizeof(tmp_cpus_max), "%u",
+			 job_ptr->max_cpus);
+	}
+
+	convert_num_unit((float)job_ptr->pn_min_tmp_disk,
+			 tmp_disk, sizeof(tmp_disk), UNIT_MEGA);
+
+	if (WIFSIGNALED(job_ptr->derived_ec))
+		term_sig = WTERMSIG(job_ptr->derived_ec);
+	snprintf(tmp_derived_ec, sizeof(tmp_derived_ec), "%u:%u",
+		 WEXITSTATUS(job_ptr->derived_ec), term_sig);
+
+	if (WIFSIGNALED(job_ptr->exit_code))
+		term_sig = WTERMSIG(job_ptr->exit_code);
+	else
+		term_sig = 0;
+	snprintf(tmp_exit, sizeof(tmp_exit), "%u:%u",
+		 WEXITSTATUS(job_ptr->exit_code), term_sig);
+
+	group_info = getgrgid((gid_t) job_ptr->group_id);
+	if ( group_info && group_info->gr_name[ 0 ] ) {
+		snprintf(tmp_group_id, sizeof(tmp_group_id), "%s",
+			 group_info->gr_name);
+	} else {
+		snprintf(tmp_group_id, sizeof(tmp_group_id), "%u",
+			 job_ptr->group_id);
+	}
+
+	min_mem = job_ptr->pn_min_memory;
+	if (min_mem & MEM_PER_CPU)
+		min_mem &= (~MEM_PER_CPU);
+
+	if (min_mem > 0) {
+		int len;
+		convert_num_unit((float)min_mem,
+				 tmp_mem_min, sizeof(tmp_mem_min), UNIT_MEGA);
+		len = strlen(tmp_mem_min);
+		if (job_ptr->pn_min_memory & MEM_PER_CPU)
+			sprintf(tmp_mem_min+len, " Per CPU");
+		else
+			sprintf(tmp_mem_min+len, " Per Node");
+	} else
+		sprintf(tmp_mem_min, " ");
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
-		gtk_tree_store_set(treestore, iter, SORTID_SMALL_BLOCK,
-				   sview_job_info_ptr->small_block, -1);
+		convert_num_unit((float)sview_job_info_ptr->node_cnt,
+				 tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE);
+	else
+		sprintf(tmp_node_cnt, "%u", sview_job_info_ptr->node_cnt);
+
+	sprintf(tmp_nodes_min, "%u", sview_job_info_ptr->node_cnt);
+
+	if (job_ptr->state_desc)
+		tmp_reason = job_ptr->state_desc;
+	else
+		tmp_reason = job_reason_string(job_ptr->state_reason);
+
+	if (job_ptr->requeue)
+		tmp_requeue = "yes";
+	else
+		tmp_requeue =  "no";
+
+	if (job_ptr->shared)
+		tmp_shared = "yes";
+	else
+		tmp_shared = "no";
+
+	sprintf(tmp_nice, "%u", job_ptr->nice - NICE_OFFSET);
 
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
 	if (!job_ptr->nodes || !strcasecmp(job_ptr->nodes,"waiting...")) {
-		sprintf(tmp_char,"00:00:00");
-		nodes = "waiting...";
+		sprintf(tmp_time_run,"00:00:00");
+		tmp_nodes = "waiting...";
 	} else {
 		if (IS_JOB_SUSPENDED(job_ptr))
 			now_time = job_ptr->pre_sus_time;
@@ -1716,336 +1940,211 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 					now_time, job_ptr->start_time);
 		}
 		suspend_secs = (time(NULL) - job_ptr->start_time) - now_time;
-		secs2time_str(now_time, tmp_char, sizeof(tmp_char));
-		nodes = sview_job_info_ptr->nodes;
+		secs2time_str(now_time, tmp_time_run, sizeof(tmp_time_run));
+		tmp_nodes = sview_job_info_ptr->nodes;
 	}
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
-			   sview_colors[sview_job_info_ptr->color_inx], -1);
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR_INX,
-			   sview_job_info_ptr->color_inx, -1);
 
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_RUNNING, tmp_char, -1);
-	if (job_ptr->resize_time) {
-		slurm_make_time_str((time_t *)&job_ptr->resize_time, tmp_char,
-				    sizeof(tmp_char));
-	} else
-		sprintf(tmp_char, "N/A");
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_RESIZE, tmp_char, -1);
-	slurm_make_time_str((time_t *)&job_ptr->submit_time, tmp_char,
-			    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_SUBMIT, tmp_char, -1);
-	slurm_make_time_str((time_t *)&job_ptr->eligible_time, tmp_char,
-			    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_ELIGIBLE, tmp_char, -1);
-	slurm_make_time_str((time_t *)&job_ptr->start_time, tmp_char,
-			    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_START, tmp_char, -1);
+	if (job_ptr->max_nodes > 0)
+		sprintf(tmp_nodes_max, "%u", sview_job_info_ptr->node_cnt);
+	else
+		tmp_nodes_max[0] = '\0';
+
+	sprintf(tmp_prio, "%u", job_ptr->priority);
+
+	slurm_make_time_str((time_t *)&job_ptr->eligible_time, tmp_time_elig,
+			    sizeof(tmp_time_elig));
+
 	if ((job_ptr->time_limit == INFINITE) &&
 	    (job_ptr->end_time > time(NULL)))
-		sprintf(tmp_char, "Unknown");
+		sprintf(tmp_time_end, "Unknown");
 	else
-		slurm_make_time_str((time_t *)&job_ptr->end_time, tmp_char,
-				    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_END, tmp_char, -1);
-	secs2time_str(suspend_secs, tmp_char, sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_SUSPEND, tmp_char, -1);
+		slurm_make_time_str((time_t *)&job_ptr->end_time, tmp_time_end,
+				    sizeof(tmp_time_end));
 
 	if (job_ptr->time_limit == NO_VAL)
-		sprintf(tmp_char, "Partition Limit");
+		sprintf(tmp_time_limit, "Partition Limit");
 	else if (job_ptr->time_limit == INFINITE)
-		sprintf(tmp_char, "Infinite");
+		sprintf(tmp_time_limit, "Infinite");
 	else
 		secs2time_str((job_ptr->time_limit * 60),
-			      tmp_char, sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIMELIMIT, tmp_char, -1);
+			      tmp_time_limit, sizeof(tmp_time_limit));
 
-	gtk_tree_store_set(treestore, iter, SORTID_ALLOC, 1, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_JOBID, job_ptr->job_id, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_PARTITION, job_ptr->partition, -1);
-	snprintf(tmp_char, sizeof(tmp_char), "%s:%u",
-		 job_ptr->alloc_node, job_ptr->alloc_sid);
-	gtk_tree_store_set(treestore, iter, SORTID_ALLOC_NODE, tmp_char, -1);
+	if (job_ptr->preempt_time) {
+		slurm_make_time_str((time_t *)&job_ptr->preempt_time,
+				    tmp_preempt_time, sizeof(tmp_time_resize));
+	} else
+		sprintf(tmp_preempt_time, "N/A");
 
-	group_info = getgrgid((gid_t) job_ptr->group_id );
-	if ( group_info && group_info->gr_name[ 0 ] )
-		snprintf(tmp_char, sizeof(tmp_char), "%s", group_info->gr_name);
-	else
-		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->group_id);
+	if (job_ptr->resize_time) {
+		slurm_make_time_str((time_t *)&job_ptr->resize_time,
+				    tmp_time_resize, sizeof(tmp_time_resize));
+	} else
+		sprintf(tmp_time_resize, "N/A");
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_GROUP_ID,
-			   tmp_char, -1);
+	slurm_make_time_str((time_t *)&job_ptr->start_time, tmp_time_start,
+			    sizeof(tmp_time_start));
 
-	uname = uid_to_string((uid_t)job_ptr->user_id);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_USER_ID, uname, -1);
-	xfree(uname);
+	slurm_make_time_str((time_t *)&job_ptr->submit_time, tmp_time_submit,
+			    sizeof(tmp_time_submit));
 
-	if (cluster_flags & CLUSTER_FLAG_BG) {
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_BLOCK,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_BG_ID), -1);
-
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_CONNECTION,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_CONNECTION), -1);
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_GEOMETRY,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_GEOMETRY), -1);
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_ROTATE,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_ROTATE), -1);
-		if (cluster_flags & CLUSTER_FLAG_BGL)
-			gtk_tree_store_set(treestore, iter,
-					   SORTID_IMAGE_BLRTS,
-					   select_g_select_jobinfo_sprint(
-						   job_ptr->select_jobinfo,
-						   tmp_char,
-						   sizeof(tmp_char),
-						   SELECT_PRINT_BLRTS_IMAGE),
-					   -1);
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_IMAGE_LINUX,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_LINUX_IMAGE), -1);
+	secs2time_str(suspend_secs, tmp_time_sus, sizeof(tmp_time_sus));
 
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_IMAGE_MLOADER,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_MLOADER_IMAGE), -1);
+	if (job_ptr->req_switch != NO_VAL) {
+		if (job_ptr->wait4switch != NO_VAL) {
+			secs2time_str((time_t) job_ptr->wait4switch, time_buf,
+					sizeof(time_buf));
+			sprintf(tmp_rqswitch, "%u@%s",
+					job_ptr->req_switch, time_buf);
+		} else {
+			sprintf(tmp_rqswitch, "%u", job_ptr->req_switch);
+		}
 
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_IMAGE_RAMDISK,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_RAMDISK_IMAGE), -1);
+	} else {
+		sprintf(tmp_rqswitch, "N/A");
 	}
 
-	if (cluster_flags & CLUSTER_FLAG_CRAYXT)
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_ALPS_RESV_ID,
-				   select_g_select_jobinfo_sprint(
-					   job_ptr->select_jobinfo,
-					   tmp_char,
-					   sizeof(tmp_char),
-					   SELECT_PRINT_DATA), -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, job_ptr->name, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_WCKEY, job_ptr->wckey, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_STATE,
-			   job_state_string(job_ptr->job_state), -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_STATE_NUM,
-			   job_ptr->job_state, -1);
-
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)sview_job_info_ptr->node_cnt,
-				 tmp_char, sizeof(tmp_char), UNIT_NONE);
-	else
-		sprintf(tmp_char, "%u", sview_job_info_ptr->node_cnt);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODES, tmp_char, -1);
 
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)job_ptr->num_cpus,
-				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
-	else
-		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->num_cpus);
+	tmp_uname = uid_to_string((uid_t)job_ptr->user_id);
 
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPUS, tmp_char, -1);
-
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)job_ptr->num_cpus,
-				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
-	else
-		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->num_cpus);
+			   SORTID_ACCOUNT,      job_ptr->account,
+			   SORTID_ALLOC,        1,
+			   SORTID_ALLOC_NODE,   tmp_alloc_node,
+			   SORTID_BATCH,        tmp_batch,
+			   SORTID_BATCH_HOST,   job_ptr->batch_host,
+			   SORTID_COLOR,
+				sview_colors[sview_job_info_ptr->color_inx],
+			   SORTID_COLOR_INX,    sview_job_info_ptr->color_inx,
+			   SORTID_COMMAND,      job_ptr->command,
+			   SORTID_COMMENT,      job_ptr->comment,
+			   SORTID_CONTIGUOUS,   tmp_cont,
+			   SORTID_CPUS,         tmp_cpu_cnt,
+			   SORTID_CPU_MAX,      tmp_cpus_max,
+			   SORTID_CPU_MIN,      tmp_cpu_cnt,
+			   SORTID_CPUS_PER_TASK,tmp_cpus_per_task,
+			   SORTID_CPU_REQ,      tmp_cpu_req,
+			   SORTID_DEPENDENCY,   job_ptr->dependency,
+			   SORTID_DERIVED_EC,   tmp_derived_ec,
+			   SORTID_EXIT_CODE,    tmp_exit,
+			   SORTID_FEATURES,     job_ptr->features,
+			   SORTID_GRES,         job_ptr->gres,
+			   SORTID_GROUP_ID,     tmp_group_id,
+			   SORTID_JOBID,        job_ptr->job_id,
+			   SORTID_LICENSES,     job_ptr->licenses,
+			   SORTID_MEM_MIN,      tmp_mem_min,
+			   SORTID_NAME,         job_ptr->name,
+			   SORTID_NICE,         tmp_nice,
+			   SORTID_NODE_INX,     job_ptr->node_inx,
+			   SORTID_NODELIST,     tmp_nodes,
+			   SORTID_NODELIST_EXC, job_ptr->exc_nodes,
+			   SORTID_NODELIST_REQ, job_ptr->req_nodes,
+			   SORTID_NODES,        tmp_node_cnt,
+			   SORTID_NODES_MAX,    tmp_nodes_max,
+			   SORTID_NODES_MIN,    tmp_nodes_min,
+			   SORTID_PARTITION,    job_ptr->partition,
+			   SORTID_PREEMPT_TIME, tmp_preempt_time,
+			   SORTID_PRIORITY,     tmp_prio,
+			   SORTID_QOS,          job_ptr->qos,
+			   SORTID_REASON,       tmp_reason,
+			   SORTID_REQUEUE,      tmp_requeue,
+			   SORTID_RESTARTS,     job_ptr->restart_cnt,
+			   SORTID_RESV_NAME,    job_ptr->resv_name,
+			   SORTID_SHARED,       tmp_shared,
+			   SORTID_STATE,
+				job_state_string(job_ptr->job_state),
+			   SORTID_STATE_NUM,    job_ptr->job_state,
+			   SORTID_SWITCHES,     tmp_rqswitch,
+			   SORTID_TIME_ELIGIBLE,tmp_time_elig,
+			   SORTID_TIME_END,     tmp_time_end,
+			   SORTID_TIME_RESIZE,  tmp_time_resize,
+			   SORTID_TIME_RUNNING, tmp_time_run,
+			   SORTID_TIME_START,   tmp_time_start,
+			   SORTID_TIME_SUBMIT,  tmp_time_submit,
+			   SORTID_TIME_SUSPEND, tmp_time_sus,
+			   SORTID_TIMELIMIT,    tmp_time_limit,
+			   SORTID_TMP_DISK,     tmp_disk,
+			   SORTID_UPDATED,      1,
+			   SORTID_USER_ID,      tmp_uname,
+			   SORTID_WCKEY,        job_ptr->wckey,
+			   SORTID_WORKDIR,      job_ptr->work_dir,
+			   -1);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPU_MIN, tmp_char, -1);
+	xfree(tmp_uname);
 
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)job_ptr->max_cpus,
-				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
-	else
-		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->max_cpus);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPU_MAX, tmp_char, -1);
+	if (cluster_flags & CLUSTER_FLAG_AIX) {
+		gtk_tree_store_set(treestore, iter,
+				   SORTID_NETWORK, job_ptr->network, -1);
+	}
 
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST, nodes, -1);
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		char tmp_block[40], tmp_conn[40], tmp_geo[40], tmp_rotate[40];
+		char tmp_linux[40], tmp_ramdisk[40], tmp_mloader[40];
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODE_INX, job_ptr->node_inx, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_block, sizeof(tmp_block),
+					       SELECT_PRINT_BG_ID);
 
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST_REQ,
-			   job_ptr->req_nodes, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST_EXC,
-			   job_ptr->exc_nodes, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_conn, sizeof(tmp_conn),
+					       SELECT_PRINT_CONNECTION);
 
-	if (job_ptr->contiguous)
-		sprintf(tmp_char, "yes");
-	else
-		sprintf(tmp_char, "no");
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_geo, sizeof(tmp_geo),
+					       SELECT_PRINT_GEOMETRY);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CONTIGUOUS, tmp_char, -1);
-	if (job_ptr->shared)
-		sprintf(tmp_char, "yes");
-	else
-		sprintf(tmp_char, "no");
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_SHARED, tmp_char, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_linux, sizeof(tmp_linux),
+					       SELECT_PRINT_LINUX_IMAGE);
 
-	if (job_ptr->batch_flag)
-		sprintf(tmp_char, "yes");
-	else
-		sprintf(tmp_char, "no");
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_BATCH, tmp_char, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_mloader, sizeof(tmp_mloader),
+					       SELECT_PRINT_MLOADER_IMAGE);
 
-	if (job_ptr->requeue)
-		sprintf(tmp_char, "yes");
-	else
-		sprintf(tmp_char, "no");
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_REQUEUE, tmp_char, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_ramdisk, sizeof(tmp_ramdisk),
+					       SELECT_PRINT_RAMDISK_IMAGE);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_RESTARTS, job_ptr->restart_cnt, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_rotate, sizeof(tmp_rotate),
+					       SELECT_PRINT_ROTATE);
 
-	sprintf(tmp_char, "%u", sview_job_info_ptr->node_cnt);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODES_MIN, tmp_char, -1);
-	if (job_ptr->max_nodes > 0) {
-		sprintf(tmp_char, "%u", sview_job_info_ptr->node_cnt);
 		gtk_tree_store_set(treestore, iter,
-				   SORTID_NODES_MAX, tmp_char, -1);
-	}
-	if (job_ptr->cpus_per_task > 0) {
-		sprintf(tmp_char, "%u", job_ptr->cpus_per_task);
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_CPUS_PER_TASK, tmp_char, -1);
+				   SORTID_BLOCK,         tmp_block,
+				   SORTID_CONNECTION,    tmp_conn,
+				   SORTID_GEOMETRY,      tmp_geo,
+				   SORTID_IMAGE_LINUX,   tmp_linux,
+				   SORTID_IMAGE_MLOADER, tmp_mloader,
+				   SORTID_IMAGE_RAMDISK, tmp_ramdisk,
+				   SORTID_ROTATE,        tmp_rotate,
+				   SORTID_SMALL_BLOCK,
+					sview_job_info_ptr->small_block,
+				   -1);
 	}
-	convert_num_unit((float)job_ptr->pn_min_cpus,
-			 tmp_char, sizeof(tmp_char), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPU_REQ, tmp_char, -1);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_RESV_NAME, job_ptr->resv_name, -1);
-
-/* 	sprintf(tmp_char, "%u", job_ptr->min_sockets); */
-/* 	gtk_tree_store_set(treestore, iter, */
-/* 			   SORTID_SOCKETS_MIN, tmp_char, -1); */
 
-/* 	sprintf(tmp_char, "%u", job_ptr->min_cores); */
-/* 	gtk_tree_store_set(treestore, iter, */
-/* 			   SORTID_CORES_MIN, tmp_char, -1); */
-
-/* 	sprintf(tmp_char, "%u", job_ptr->min_threads); */
-/* 	gtk_tree_store_set(treestore, iter, */
-/* 			   SORTID_THREADS_MIN, tmp_char, -1); */
-
-	convert_num_unit((float)job_ptr->pn_min_memory,
-			 tmp_char, sizeof(tmp_char), UNIT_MEGA);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_MEM_MIN, tmp_char, -1);
+	if (cluster_flags & CLUSTER_FLAG_BGL) {
+		char tmp_blrts[40];
 
-	convert_num_unit((float)job_ptr->pn_min_tmp_disk,
-			 tmp_char, sizeof(tmp_char), UNIT_MEGA);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_TMP_DISK, tmp_char, -1);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_ACCOUNT, job_ptr->account, -1);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_QOS, job_ptr->qos, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_blrts, sizeof(tmp_blrts),
+					       SELECT_PRINT_BLRTS_IMAGE);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_DEPENDENCY, job_ptr->dependency, -1);
-
-	sprintf(tmp_char, "%u", job_ptr->priority);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_PRIORITY, tmp_char, -1);
-
-	if (WIFSIGNALED(job_ptr->derived_ec))
-		term_sig = WTERMSIG(job_ptr->derived_ec);
-	snprintf(tmp_char, sizeof(tmp_char), "%u:%u",
-		 WEXITSTATUS(job_ptr->derived_ec), term_sig);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_DERIVED_EC, tmp_char, -1);
+		gtk_tree_store_set(treestore, iter,
+				   SORTID_IMAGE_BLRTS,   tmp_blrts,
+				   -1);
+	}
 
-	if (WIFSIGNALED(job_ptr->exit_code))
-		term_sig = WTERMSIG(job_ptr->exit_code);
-	else
-		term_sig = 0;
-	snprintf(tmp_char, sizeof(tmp_char), "%u:%u",
-		 WEXITSTATUS(job_ptr->exit_code), term_sig);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_EXIT_CODE, tmp_char, -1);
+	if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
+		char tmp_resv_id[40];
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_FEATURES, job_ptr->features, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_GRES, job_ptr->gres, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_LICENSES, job_ptr->licenses, -1);
-	if (job_ptr->state_desc)
-		reason = job_ptr->state_desc;
-	else
-		reason = job_reason_string(job_ptr->state_reason);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_REASON, reason, -1);
+		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
+					       tmp_resv_id, sizeof(tmp_resv_id),
+					       SELECT_PRINT_DATA);
 
-	if (cluster_flags & CLUSTER_FLAG_AIX)
 		gtk_tree_store_set(treestore, iter,
-				   SORTID_NETWORK, job_ptr->network, -1);
-
-	sprintf(tmp_char, "%u", job_ptr->nice - NICE_OFFSET);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NICE, tmp_char, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_COMMAND, job_ptr->command, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_COMMENT, job_ptr->comment, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_WORKDIR, job_ptr->work_dir, -1);
-
+				   SORTID_ALPS_RESV_ID,  tmp_resv_id,
+				   -1);
+	}
 
-	childern = gtk_tree_model_iter_children(GTK_TREE_MODEL(treestore),
-						&step_iter, iter);
 	if (gtk_tree_model_iter_children(GTK_TREE_MODEL(treestore),
 					 &step_iter, iter))
 		_update_info_step(sview_job_info_ptr,
@@ -2057,13 +2156,33 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	return;
 }
 
+static void _get_step_nodelist(job_step_info_t *step_ptr, char *buf, 
+			       int buf_size)
+{
+	char *ionodes = NULL;
+
+	if (cluster_flags & CLUSTER_FLAG_BG) {
+		select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+					    SELECT_JOBDATA_IONODES,
+					    &ionodes);
+		if (step_ptr->nodes && ionodes) {
+			snprintf(buf, buf_size, "%s[%s]", step_ptr->nodes,
+				 ionodes);
+		} else {
+			snprintf(buf, buf_size, "%s", step_ptr->nodes);
+		}
+		xfree(ionodes);
+	} else {
+		snprintf(buf, buf_size, "%s", step_ptr->nodes);
+	}
+}
+
 static void _layout_step_record(GtkTreeView *treeview,
 				job_step_info_t *step_ptr,
 				int update)
 {
-	char *nodes = NULL, *uname;
-	char tmp_char[50];
-	char tmp_time[50];
+	char *uname;
+	char tmp_char[50], tmp_nodes[50], tmp_time[50];
 	time_t now_time = time(NULL);
 	GtkTreeIter iter;
 	enum job_states state;
@@ -2079,10 +2198,6 @@ static void _layout_step_record(GtkTreeView *treeview,
 				   find_col_name(display_data_job,
 						 SORTID_CPUS),
 				   tmp_char);
-	add_display_treestore_line(update, treestore, &iter,
-				   find_col_name(display_data_job,
-						 SORTID_CPU_MIN),
-				   tmp_char);
 
 	uname = uid_to_string((uid_t)step_ptr->user_id);
 	add_display_treestore_line(update, treestore, &iter,
@@ -2109,22 +2224,26 @@ static void _layout_step_record(GtkTreeView *treeview,
 				   step_ptr->name);
 
 	if (!step_ptr->nodes
-	    || !strcasecmp(step_ptr->nodes,"waiting...")) {
+	    || !strcasecmp(step_ptr->nodes, "waiting...")) {
 		sprintf(tmp_time,"00:00:00");
-		nodes = "waiting...";
+		snprintf(tmp_nodes, sizeof(tmp_nodes), "waiting...");
 		state = JOB_PENDING;
 	} else {
 		now_time -= step_ptr->start_time;
 		secs2time_str(now_time, tmp_time, sizeof(tmp_time));
-		nodes = step_ptr->nodes;
-		if (cluster_flags & CLUSTER_FLAG_BG)
+		_get_step_nodelist(step_ptr, tmp_nodes, sizeof(tmp_nodes));
+		if (cluster_flags & CLUSTER_FLAG_BGQ)
+			convert_num_unit(
+				(float)step_ptr->num_tasks,
+				tmp_char, sizeof(tmp_char), UNIT_NONE);
+		else if (cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit(
 				(float)step_ptr->num_tasks / cpus_per_node,
 				tmp_char, sizeof(tmp_char), UNIT_NONE);
-		else
-			convert_num_unit((float)_nodes_in_list(nodes),
+		else {
+			convert_num_unit((float)_nodes_in_list(tmp_nodes),
 					 tmp_char, sizeof(tmp_char), UNIT_NONE);
-
+		}
 		add_display_treestore_line(update, treestore, &iter,
 					   find_col_name(display_data_job,
 							 SORTID_NODES),
@@ -2135,7 +2254,7 @@ static void _layout_step_record(GtkTreeView *treeview,
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_NODELIST),
-				   nodes);
+				   tmp_nodes);
 
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
@@ -2176,81 +2295,84 @@ static void _update_step_record(job_step_info_t *step_ptr,
 				GtkTreeStore *treestore,
 				GtkTreeIter *iter)
 {
-	char *nodes = NULL, *uname = NULL;
-	char tmp_char[50];
-	char tmp_time[50];
+	char *tmp_uname;
+	char tmp_nodes[50];
+	char tmp_cpu_min[40],  tmp_time_run[40],   tmp_time_limit[40];
+	char tmp_node_cnt[40], tmp_time_start[40], tmp_task_cnt[40];
 	time_t now_time = time(NULL);
 	enum job_states state;
+	int color_inx = step_ptr->step_id % sview_colors_cnt;
 
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
-	if (!step_ptr->nodes
-	    || !strcasecmp(step_ptr->nodes,"waiting...")) {
-		sprintf(tmp_time,"00:00:00");
-		nodes = "waiting...";
+	convert_num_unit((float)step_ptr->num_cpus, tmp_cpu_min,
+			 sizeof(tmp_cpu_min), UNIT_NONE);
+
+	if (!step_ptr->nodes ||
+	    !strcasecmp(step_ptr->nodes,"waiting...")) {
+		sprintf(tmp_time_run, "00:00:00");
+		snprintf(tmp_nodes, sizeof(tmp_nodes), "waiting...");
+		tmp_node_cnt[0] = '\0';
 		state = JOB_PENDING;
 	} else {
 		now_time -= step_ptr->start_time;
-		secs2time_str(now_time, tmp_time, sizeof(tmp_time));
-		nodes = step_ptr->nodes;
-		if (cluster_flags & CLUSTER_FLAG_BG)
+		secs2time_str(now_time, tmp_time_run, sizeof(tmp_time_run));
+		_get_step_nodelist(step_ptr, tmp_nodes, sizeof(tmp_nodes));
+		if (cluster_flags & CLUSTER_FLAG_BGQ) {
+			uint32_t nodes = 0;
+			select_g_select_jobinfo_get(step_ptr->select_jobinfo,
+						    SELECT_JOBDATA_NODE_CNT,
+						    &nodes);
+			convert_num_unit((float)nodes, tmp_node_cnt,
+					 sizeof(tmp_node_cnt), UNIT_NONE);
+		} else if (cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit(
 				(float)step_ptr->num_tasks / cpus_per_node,
-				tmp_char, sizeof(tmp_char), UNIT_NONE);
-		else
-			convert_num_unit((float)_nodes_in_list(nodes),
-					 tmp_char, sizeof(tmp_char), UNIT_NONE);
-
-		gtk_tree_store_set(treestore, iter,
-				   SORTID_NODES, tmp_char, -1);
+				tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE);
+		} else {
+			convert_num_unit((float)_nodes_in_list(tmp_nodes),
+					 tmp_node_cnt, sizeof(tmp_node_cnt),
+					 UNIT_NONE);
+		}
 		state = JOB_RUNNING;
 	}
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_STATE,
-			   job_state_string(state), -1);
+	convert_num_unit((float)step_ptr->num_tasks, tmp_task_cnt,
+			 sizeof(tmp_task_cnt), UNIT_NONE);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_TIME_RUNNING, tmp_time, -1);
+	if ((step_ptr->time_limit == NO_VAL) ||
+	    (step_ptr->time_limit == INFINITE)) {
+		sprintf(tmp_time_limit, "Job Limit");
+	} else {
+		secs2time_str((step_ptr->time_limit * 60),
+			      tmp_time_limit, sizeof(tmp_time_limit));
+	}
 
-	gtk_tree_store_set(treestore, iter, SORTID_ALLOC, 0, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_JOBID, step_ptr->step_id, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_PARTITION, step_ptr->partition, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_GRES, step_ptr->gres, -1);
-/* #ifdef HAVE_BG */
-/* 	gtk_tree_store_set(treestore, iter,  */
-/* 			   SORTID_BLOCK,  */
-/* 			   select_g_select_jobinfo_sprint( */
-/* 				   step_ptr->select_jobinfo,  */
-/* 				   tmp_char,  */
-/* 				   sizeof(tmp_char),  */
-/* 				   SELECT_PRINT_BG_ID), -1); */
-/* #endif */
-	uname = uid_to_string((uid_t)step_ptr->user_id);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_USER_ID, uname, -1);
-	xfree(uname);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NAME, step_ptr->name, -1);
+	slurm_make_time_str((time_t *)&step_ptr->start_time, tmp_time_start,
+			    sizeof(tmp_time_start));
 
-	convert_num_unit((float)step_ptr->num_tasks, tmp_char, sizeof(tmp_char),
-			 UNIT_NONE);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_TASKS, tmp_char, -1);
+	tmp_uname = uid_to_string((uid_t)step_ptr->user_id);
 
-	convert_num_unit((float)step_ptr->num_cpus, tmp_char, sizeof(tmp_char),
-			 UNIT_NONE);
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPUS, tmp_char, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPU_MIN, tmp_char, -1);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_CPU_MAX, tmp_char, -1);
+			   SORTID_ALLOC,        0,
+			   SORTID_COLOR,	sview_colors[color_inx],
+			   SORTID_COLOR_INX,    color_inx,
+			   SORTID_CPUS,         tmp_cpu_min,
+			   SORTID_GRES,         step_ptr->gres,
+			   SORTID_JOBID,        step_ptr->step_id,
+			   SORTID_NAME,         step_ptr->name,
+			   SORTID_NODE_INX,     step_ptr->node_inx,
+			   SORTID_NODELIST,     tmp_nodes,
+			   SORTID_NODES,        tmp_node_cnt,
+			   SORTID_PARTITION,    step_ptr->partition,
+			   SORTID_STATE,        job_state_string(state),
+			   SORTID_TASKS,        tmp_task_cnt,
+			   SORTID_TIME_RUNNING, tmp_time_run,
+			   SORTID_TIME_START,   tmp_time_start,
+			   SORTID_TIMELIMIT,    tmp_time_limit,
+			   SORTID_UPDATED,      1,
+			   SORTID_USER_ID,      tmp_uname,
+			   -1);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODELIST, nodes, -1);
+	xfree(tmp_uname);
 
 	return;
 }
@@ -2495,7 +2617,6 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 				sview_job_info_ptr->small_block = 1;
 				snprintf(tmp_char, sizeof(tmp_char), "%s[%s]",
 					 job_ptr->nodes, ionodes);
-				xfree(ionodes);
 				/* keep a different string here so we don't
 				   just keep tacking on ionodes to a
 				   node list */
@@ -2503,6 +2624,7 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 			} else
 				sview_job_info_ptr->nodes =
 					xstrdup(job_ptr->nodes);
+			xfree(ionodes);
 		} else
 			sview_job_info_ptr->nodes = xstrdup(job_ptr->nodes);
 
@@ -2549,7 +2671,7 @@ void _display_info_job(List info_list, popup_info_t *popup_win)
 	int found = 0;
 	GtkTreeView *treeview = NULL;
 	int update = 0;
-	int j = 0;
+	int j, k;
 
 	if (spec_info->search_info->int_data == NO_VAL) {
 		/* 	info = xstrdup("No pointer given!"); */
@@ -2578,32 +2700,66 @@ need_refresh:
 	if (!sview_job_info) {
 		/* not found */
 	} else if (spec_info->search_info->int_data2 == NO_VAL) {
-		j=0;
+		int top_node_inx = 0;
+		int array_size = SVIEW_MAX_NODE_SPACE;
+		int  *color_inx = xmalloc(sizeof(int) * array_size);
+		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
+		j = 0;
 		while (sview_job_info->job_ptr->node_inx[j] >= 0) {
-			change_grid_color(
-				popup_win->grid_button_list,
-				sview_job_info->job_ptr->node_inx[j],
-				sview_job_info->job_ptr->node_inx[j+1],
-				sview_job_info->color_inx,
-				true, 0);
+			top_node_inx = MAX(top_node_inx,
+					   sview_job_info->job_ptr->
+					   node_inx[j+1]);
+			if (top_node_inx > SVIEW_MAX_NODE_SPACE)
+				fatal("Expand SVIEW_MAX_NODE_SPACE in sview");
+			for (k = sview_job_info->job_ptr->node_inx[j];
+			     k <= sview_job_info->job_ptr->node_inx[j+1];
+			     k++) {
+				color_set_flag[k] = true;
+				color_inx[k] = sview_job_info->
+					       color_inx;
+			}
 			j += 2;
 		}
+		change_grid_color_array(popup_win->grid_button_list,
+					top_node_inx+1, color_inx,
+					color_set_flag, true, 0);
+		xfree(color_inx);
+		xfree(color_set_flag);
 		_layout_job_record(treeview, sview_job_info, update);
 		found = 1;
 	} else {
+		int top_node_inx = 0;
+		int array_size = SVIEW_MAX_NODE_SPACE;
+		int  *color_inx = xmalloc(sizeof(int) * array_size);
+		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
 		itr = list_iterator_create(sview_job_info->step_list);
 		while ((step_ptr = list_next(itr))) {
 			if (step_ptr->step_id ==
 			    spec_info->search_info->int_data2) {
-				j=0;
+				j = 0;
 				while (step_ptr->node_inx[j] >= 0) {
-					change_grid_color(
-						popup_win->grid_button_list,
-						step_ptr->node_inx[j],
-						step_ptr->node_inx[j+1],
-						step_ptr->step_id, false, 0);
+					top_node_inx = MAX(top_node_inx,
+							   step_ptr->
+							   node_inx[j+1]);
+					if (top_node_inx > SVIEW_MAX_NODE_SPACE)
+						fatal("Expand "
+						      "SVIEW_MAX_NODE_SPACE "
+						      "in sview");
+					for (k = step_ptr->node_inx[j];
+					     k <= step_ptr->node_inx[j+1];
+					     k++) {
+						color_set_flag[k] = true;
+						color_inx[k] = step_ptr->step_id
+							% sview_colors_cnt;
+					}
 					j += 2;
 				}
+				change_grid_color_array(
+					popup_win->grid_button_list,
+					top_node_inx+1, color_inx,
+					color_set_flag, false, 0);
+				xfree(color_inx);
+				xfree(color_set_flag);
 				_layout_step_record(treeview,
 						    step_ptr, update);
 				found = 1;
@@ -2650,6 +2806,45 @@ finished:
 	return;
 }
 
+extern GtkWidget *create_job_entry(job_desc_msg_t *job_msg,
+				   GtkTreeModel *model, GtkTreeIter *iter)
+{
+	GtkScrolledWindow *window = create_scrolled_window();
+	GtkBin *bin = NULL;
+	GtkViewport *view = NULL;
+	GtkTable *table = NULL;
+	int row = 0;
+	display_data_t *display_data = create_data_job;
+
+	gtk_scrolled_window_set_policy(window,
+				       GTK_POLICY_NEVER,
+				       GTK_POLICY_AUTOMATIC);
+	bin = GTK_BIN(&window->container);
+	view = GTK_VIEWPORT(bin->child);
+	bin = GTK_BIN(&view->bin);
+	table = GTK_TABLE(bin->child);
+	gtk_table_resize(table, SORTID_CNT, 2);
+
+	gtk_table_set_homogeneous(table, FALSE);
+
+	/* NOTE: We build this in the order defined in the data structure
+	 * rather than in SORTID order for more flexibility. */
+	while (display_data++) {
+		if (display_data->id == -1)
+			break;
+		if (!display_data->name)
+			continue;
+		display_admin_edit(table, job_msg, &row, model, iter,
+				   display_data,
+				   G_CALLBACK(_admin_edit_combo_box_job),
+				   G_CALLBACK(_admin_focus_out_job),
+				   _set_active_combo_job);
+	}
+	gtk_table_resize(table, row, 2);
+
+	return GTK_WIDGET(window);
+}
+
 extern void refresh_job(GtkAction *action, gpointer user_data)
 {
 	popup_info_t *popup_win = (popup_info_t *)user_data;
@@ -2960,7 +3155,7 @@ extern void get_info_job(GtkTable *table, display_data_t *display_data)
 	static GtkWidget *display_widget = NULL;
 	List info_list = NULL;
 	int changed = 1;
-	int j=0;
+	int j, k;
 	sview_job_info_t *sview_job_info_ptr = NULL;
 	job_info_t *job_ptr = NULL;
 	ListIterator itr = NULL;
@@ -3051,30 +3246,45 @@ display_it:
 					 &path, &focus_column);
 	}
 	if (!path) {
+		int top_node_inx = 0;
+		int array_size = SVIEW_MAX_NODE_SPACE;
+		int  *color_inx = xmalloc(sizeof(int) * array_size);
+		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
 		itr = list_iterator_create(info_list);
 		while ((sview_job_info_ptr = list_next(itr))) {
+			uint16_t base_state;
 			job_ptr = sview_job_info_ptr->job_ptr;
-			j=0;
+			base_state = job_ptr->job_state & JOB_STATE_BASE;
+			if (base_state != JOB_RUNNING)
+				continue;
+			j = 0;
 			while (job_ptr->node_inx[j] >= 0) {
-				change_grid_color(grid_button_list,
-						  job_ptr->node_inx[j],
-						  job_ptr->node_inx[j+1],
-						  sview_job_info_ptr->color_inx,
-						  true, 0);
+				top_node_inx = MAX(top_node_inx,
+						   job_ptr->node_inx[j+1]);
+				if (top_node_inx > SVIEW_MAX_NODE_SPACE) {
+					fatal("Increase SVIEW_MAX_NODE_SPACE "
+					      "in sview");
+				}
+				for (k = job_ptr->node_inx[j];
+				     k <= job_ptr->node_inx[j+1]; k++) {
+					color_set_flag[k] = true;
+					color_inx[k] = sview_job_info_ptr->
+						       color_inx;
+				}
 				j += 2;
 			}
 		}
 		list_iterator_destroy(itr);
+		change_grid_color_array(grid_button_list, top_node_inx+1,
+					color_inx, color_set_flag, true, 0);
+		xfree(color_inx);
+		xfree(color_set_flag);
 		change_grid_color(grid_button_list, -1, -1,
 				  MAKE_WHITE, true, 0);
-	} else
+	} else {
 		highlight_grid(GTK_TREE_VIEW(display_widget),
 			       SORTID_NODE_INX, SORTID_COLOR_INX,
 			       grid_button_list);
-
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
 	}
 
 	if (view == ERROR_VIEW && display_widget) {
@@ -3100,6 +3310,13 @@ display_it:
 	}
 
 	view = INFO_VIEW;
+	/* If the system has a large number of jobs then not all lines
+	 * will be displayed. You can try different values for the third
+	 * argument of gtk_widget_set_size_request() in an attempt to
+	 * maximumize the data displayed in your environment. These are my
+	 * results: Y=1000 good for 43 lines, Y=-1 good for 1151 lines,
+	 *  Y=64000 good for 2781 lines, Y=99000 good for 1453 lines */
+	/* gtk_widget_set_size_request(display_widget, -1, -1); */
 	_update_info_job(info_list, GTK_TREE_VIEW(display_widget));
 end_it:
 	toggled = FALSE;
@@ -3125,13 +3342,15 @@ extern void specific_info_job(popup_info_t *popup_win)
 	List info_list = NULL;
 	List send_info_list = NULL;
 	int changed = 1;
-	int j=0, i=-1;
+	int i=-1, j, k;
 	sview_job_info_t *sview_job_info_ptr = NULL;
 	job_info_t *job_ptr = NULL;
 	ListIterator itr = NULL;
 	char name[30], *uname = NULL;
 	hostset_t hostset = NULL;
 	int name_diff;
+	int top_node_inx, array_size, *color_inx;
+	bool *color_set_flag;
 
 	if (!spec_info->display_widget)
 		setup_popup_info(popup_win, display_data_job, SORTID_CNT);
@@ -3291,8 +3510,9 @@ display_it:
 				continue;
 			break;
 		case RESV_PAGE:
-			if (strcmp(search_info->gchar_data,
-				   job_ptr->resv_name))
+			if (!job_ptr->resv_name
+			    || strcmp(search_info->gchar_data,
+				      job_ptr->resv_name))
 				continue;
 			break;
 		case BLOCK_PAGE:
@@ -3322,16 +3542,28 @@ display_it:
 		}
 
 		list_push(send_info_list, sview_job_info_ptr);
-		j=0;
+		top_node_inx = 0;
+		array_size = SVIEW_MAX_NODE_SPACE;
+		color_inx = xmalloc(sizeof(int) * array_size);
+		color_set_flag = xmalloc(sizeof(bool) * array_size);
+		j = 0;
 		while (job_ptr->node_inx[j] >= 0) {
-			change_grid_color(
-				popup_win->grid_button_list,
-				job_ptr->node_inx[j],
-				job_ptr->node_inx[j+1],
-				sview_job_info_ptr->color_inx,
-				true, 0);
+			top_node_inx = MAX(top_node_inx,
+					   job_ptr->node_inx[j+1]);
+			if (top_node_inx > SVIEW_MAX_NODE_SPACE)
+				fatal("Increase SVIEW_MAX_NODE_SPACE in sview");
+			for (k = job_ptr->node_inx[j];
+			     k <= job_ptr->node_inx[j+1]; k++) {
+				color_set_flag[k] = true;
+				color_inx[k] = sview_job_info_ptr->color_inx;
+			}
 			j += 2;
 		}
+		change_grid_color_array(popup_win->grid_button_list,
+					top_node_inx+1, color_inx,
+					color_set_flag, true, 0);
+		xfree(color_inx);
+		xfree(color_set_flag);
 	}
 	list_iterator_destroy(itr);
 	post_setup_popup_grid_list(popup_win);
@@ -3418,14 +3650,14 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		if (stepid == NO_VAL)
 			snprintf(title, 100, "Partition with job %d", jobid);
 		else
-			snprintf(title, 100, "Partition with job %d.%d",
+			snprintf(title, 100, "Partition with job step %d.%d",
 				 jobid, stepid);
 		break;
 	case RESV_PAGE:
 		if (stepid == NO_VAL)
 			snprintf(title, 100, "Reservation with job %d", jobid);
 		else
-			snprintf(title, 100, "Reservation with job %d.%d",
+			snprintf(title, 100, "Reservation with job step %d.%d",
 				 jobid, stepid);
 		break;
 	case NODE_PAGE:
@@ -3433,21 +3665,21 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 			snprintf(title, 100,
 				 "%s(s) running job %d", type, jobid);
 		else
-			snprintf(title, 100, "%s(s) running job %d.%d",
+			snprintf(title, 100, "%s(s) running job step %d.%d",
 				 type, jobid, stepid);
 		break;
 	case BLOCK_PAGE:
 		if (stepid == NO_VAL)
 			snprintf(title, 100, "Block with job %d", jobid);
 		else
-			snprintf(title, 100, "Block with job %d.%d",
+			snprintf(title, 100, "Block with job step %d.%d",
 				 jobid, stepid);
 		break;
 	case INFO_PAGE:
 		if (stepid == NO_VAL)
 			snprintf(title, 100, "Full info for job %d", jobid);
 		else
-			snprintf(title, 100, "Full info for job %d.%d",
+			snprintf(title, 100, "Full info for job step %d.%d",
 				 jobid, stepid);
 		break;
 	default:
@@ -3823,7 +4055,6 @@ extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter,
 	int response = 0;
 	char tmp_char[255];
 	int edit_type = 0;
-	int row_count=0;
 	job_desc_msg_t *job_msg;
 	GtkWidget *label = NULL;
 	GtkWidget *entry = NULL;
@@ -3840,10 +4071,6 @@ extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter,
 			NULL);
 	gtk_window_set_transient_for(GTK_WINDOW(popup), NULL);
 
-	if (treeview)
-		row_count = gtk_tree_selection_count_selected_rows(
-			gtk_tree_view_get_selection(treeview));
-
 	gtk_tree_model_get(model, iter, SORTID_JOBID, &jobid, -1);
 	gtk_tree_model_get(model, iter, SORTID_ALLOC, &stepid, -1);
 	if (stepid)
@@ -3958,7 +4185,7 @@ extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter,
 	return;
 }
 
-extern void cluster_change_job()
+extern void cluster_change_job(void)
 {
 	display_data_t *display_data = display_data_job;
 	while (display_data++) {
@@ -3982,7 +4209,7 @@ extern void cluster_change_job()
 		if (cluster_flags & CLUSTER_FLAG_CRAYXT) {
 			switch(display_data->id) {
 			case SORTID_ALPS_RESV_ID:
-				display_data->name = "ALPS Resv ID";
+				display_data->name = "ALPS";
 				break;
 			}
 		} else {
diff --git a/src/sview/node_info.c b/src/sview/node_info.c
index 0ae9209db..62f62efb0 100644
--- a/src/sview/node_info.c
+++ b/src/sview/node_info.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -41,22 +41,25 @@ int g_node_scaling = 1;
 /* These need to be in alpha order (except POS and CNT) */
 enum {
 	SORTID_POS = POS_LOC,
+	SORTID_ARCH,
 	SORTID_BOOT_TIME,
 	SORTID_COLOR,
 	SORTID_CPUS,
 	SORTID_CORES,
-	SORTID_DISK,
 	SORTID_ERR_CPUS,
 	SORTID_FEATURES,
 	SORTID_GRES,
-	SORTID_MEMORY,
 	SORTID_NAME,
+	SORTID_NODE_ADDR,
+	SORTID_NODE_HOSTNAME,
+	SORTID_MEMORY,	/* RealMemory */
 	SORTID_REASON,
 	SORTID_SLURMD_START_TIME,
 	SORTID_SOCKETS,
 	SORTID_STATE,
 	SORTID_STATE_NUM,
 	SORTID_THREADS,
+	SORTID_DISK,	/* TmpDisk */
 	SORTID_UPDATED,
 	SORTID_USED_CPUS,
 	SORTID_WEIGHT,
@@ -80,6 +83,10 @@ static display_data_t display_data_node[] = {
 	 create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_NAME, "Name", FALSE, EDIT_NONE, refresh_node,
 	 create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_NODE_ADDR, "NodeAddr", FALSE, EDIT_NONE,
+	 refresh_node, create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_NODE_HOSTNAME, "NodeHostName", FALSE, EDIT_NONE,
+	 refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_COLOR, NULL, TRUE, EDIT_COLOR, refresh_node,
 	 create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_STATE, "State", FALSE, EDIT_MODEL, refresh_node,
@@ -104,6 +111,8 @@ static display_data_t display_data_node[] = {
 	 create_model_node, admin_edit_node},
 	{G_TYPE_INT, SORTID_WEIGHT,"Weight", FALSE, EDIT_NONE, refresh_node,
 	 create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_ARCH, "Arch", FALSE,
+	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_FEATURES, "Features", FALSE,
 	 EDIT_TEXTBOX, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_GRES, "Gres", FALSE,
@@ -125,14 +134,14 @@ static display_data_t options_data_node[] = {
 #ifdef HAVE_BG
 	{G_TYPE_STRING, NODE_PAGE, "Drain Base Partition", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, NODE_PAGE, "Resume Base Partition", TRUE, ADMIN_PAGE},
-	{G_TYPE_STRING, NODE_PAGE, "Put Base Partition Down",
+	{G_TYPE_STRING, NODE_PAGE, "Set Base Partition Down",
 	 TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, NODE_PAGE, "Make Base Partition Idle",
 	 TRUE, ADMIN_PAGE},
 #else
 	{G_TYPE_STRING, NODE_PAGE, "Drain Node", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, NODE_PAGE, "Resume Node", TRUE, ADMIN_PAGE},
-	{G_TYPE_STRING, NODE_PAGE, "Put Node(s) Down", TRUE, ADMIN_PAGE},
+	{G_TYPE_STRING, NODE_PAGE, "Set Node(s) Down", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, NODE_PAGE, "Make Node(s) Idle", TRUE, ADMIN_PAGE},
 #endif
 	{G_TYPE_STRING, NODE_PAGE, "Update Features", TRUE, ADMIN_PAGE},
@@ -171,6 +180,16 @@ static void _layout_node_record(GtkTreeView *treeview,
 						 SORTID_NAME),
 				   node_ptr->name);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_NODE_ADDR),
+				   node_ptr->node_addr);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_NODE_HOSTNAME),
+				   node_ptr->node_hostname);
+
 	convert_num_unit((float)node_ptr->cpus, tmp_cnt, sizeof(tmp_cnt),
 			 UNIT_NONE);
 	add_display_treestore_line(update, treestore, &iter,
@@ -262,6 +281,10 @@ static void _layout_node_record(GtkTreeView *treeview,
 				   find_col_name(display_data_node,
 						 SORTID_WEIGHT),
 				   tmp_cnt);
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_ARCH),
+				   node_ptr->arch);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_FEATURES),
@@ -288,92 +311,85 @@ static void _layout_node_record(GtkTreeView *treeview,
 static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 				GtkTreeStore *treestore, GtkTreeIter *iter)
 {
-	char tmp_cnt[17];
-	char *upper = NULL, *lower = NULL;
-	uint16_t err_cpus = 0, alloc_cpus = 0;
+	uint16_t alloc_cpus = 0, err_cpus = 0, idle_cpus;
 	node_info_t *node_ptr = sview_node_info_ptr->node_ptr;
-	int idle_cpus = node_ptr->cpus;
-
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
-			   sview_colors[sview_node_info_ptr->pos
-					% sview_colors_cnt], -1);
+	char tmp_disk[20], tmp_err_cpus[20], tmp_mem[20], tmp_used_cpus[20];
+	char *tmp_state_lower, *tmp_state_upper;
 
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, node_ptr->name, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_STATE_NUM,
-			   node_ptr->node_state, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_CPUS, node_ptr->cpus, -1);
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_SUBCNT,
 				     NODE_STATE_ALLOCATED,
 				     &alloc_cpus);
 	if (cluster_flags & CLUSTER_FLAG_BG) {
-		if (!alloc_cpus
-		    && (IS_NODE_ALLOCATED(node_ptr)
-			|| IS_NODE_COMPLETING(node_ptr)))
+		if (!alloc_cpus &&
+		    (IS_NODE_ALLOCATED(node_ptr) ||
+		     IS_NODE_COMPLETING(node_ptr)))
 			alloc_cpus = node_ptr->cpus;
 		else
 			alloc_cpus *= cpus_per_node;
 	}
-
-	idle_cpus -= alloc_cpus;
-	convert_num_unit((float)alloc_cpus, tmp_cnt,
-			 sizeof(tmp_cnt), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter, SORTID_USED_CPUS,
-			   tmp_cnt, -1);
+	idle_cpus = node_ptr->cpus - alloc_cpus;
+	convert_num_unit((float)alloc_cpus, tmp_used_cpus,
+			 sizeof(tmp_used_cpus), UNIT_NONE);
 
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_SUBCNT,
 				     NODE_STATE_ERROR,
 				     &err_cpus);
-
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		err_cpus *= cpus_per_node;
-
 	idle_cpus -= err_cpus;
-	convert_num_unit((float)err_cpus, tmp_cnt, sizeof(tmp_cnt), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter, SORTID_ERR_CPUS,
-			   tmp_cnt, -1);
+	convert_num_unit((float)err_cpus, tmp_err_cpus, sizeof(tmp_err_cpus),
+			 UNIT_NONE);
 
 	if (IS_NODE_DRAIN(node_ptr)) {
 		/* don't worry about mixed since the
-		   whole node is being drained. */
-	} else if ((alloc_cpus && err_cpus)
-		   || (idle_cpus  && (idle_cpus != node_ptr->cpus))) {
+		 * whole node is being drained. */
+	} else if ((alloc_cpus && err_cpus) ||
+		   (idle_cpus  && (idle_cpus != node_ptr->cpus))) {
 		node_ptr->node_state &= NODE_STATE_FLAGS;
 		node_ptr->node_state |= NODE_STATE_MIXED;
 	}
+	tmp_state_upper = node_state_string(node_ptr->node_state);
+	tmp_state_lower = str_tolower(tmp_state_upper);
 
-	upper = node_state_string(node_ptr->node_state);
-	lower = str_tolower(upper);
-
-	gtk_tree_store_set(treestore, iter, SORTID_STATE, lower, -1);
-	xfree(lower);
-
-	gtk_tree_store_set(treestore, iter, SORTID_CORES, node_ptr->cpus, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_SOCKETS,
-			   node_ptr->sockets, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_THREADS,
-			   node_ptr->threads, -1);
-	convert_num_unit((float)node_ptr->real_memory, tmp_cnt, sizeof(tmp_cnt),
+	convert_num_unit((float)node_ptr->real_memory, tmp_mem, sizeof(tmp_mem),
 			 UNIT_MEGA);
-	gtk_tree_store_set(treestore, iter, SORTID_MEMORY, tmp_cnt, -1);
-	convert_num_unit((float)node_ptr->tmp_disk, tmp_cnt, sizeof(tmp_cnt),
+
+	convert_num_unit((float)node_ptr->tmp_disk, tmp_disk, sizeof(tmp_disk),
 			 UNIT_MEGA);
-	gtk_tree_store_set(treestore, iter, SORTID_DISK, tmp_cnt, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_WEIGHT,
-			   node_ptr->weight, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_FEATURES,
-			   node_ptr->features, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_GRES,
-			   node_ptr->gres, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_BOOT_TIME,
-			   sview_node_info_ptr->boot_time, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_SLURMD_START_TIME,
-			   sview_node_info_ptr->slurmd_start_time, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_REASON,
-			   sview_node_info_ptr->reason, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
+
+
+	/* Combining these records provides a slight performance improvement */
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_ARCH,      node_ptr->arch,
+			   SORTID_BOOT_TIME, sview_node_info_ptr->boot_time,
+			   SORTID_COLOR,
+				sview_colors[sview_node_info_ptr->pos
+				% sview_colors_cnt],
+			   SORTID_CORES,     node_ptr->cpus,
+			   SORTID_CPUS,      node_ptr->cpus,
+			   SORTID_DISK,      tmp_disk,
+			   SORTID_ERR_CPUS,  tmp_err_cpus,
+			   SORTID_FEATURES,  node_ptr->features,
+			   SORTID_GRES,      node_ptr->gres,
+			   SORTID_MEMORY,    tmp_mem,
+			   SORTID_NAME,      node_ptr->name,
+			   SORTID_NODE_ADDR, node_ptr->node_addr,
+			   SORTID_NODE_HOSTNAME, node_ptr->node_hostname,
+			   SORTID_REASON,    sview_node_info_ptr->reason,
+			   SORTID_SLURMD_START_TIME,
+				sview_node_info_ptr->slurmd_start_time,
+			   SORTID_SOCKETS,   node_ptr->sockets,
+			   SORTID_STATE,     tmp_state_lower,
+			   SORTID_STATE_NUM, node_ptr->node_state,
+			   SORTID_THREADS,   node_ptr->threads,
+			   SORTID_USED_CPUS, tmp_used_cpus,
+			   SORTID_WEIGHT,    node_ptr->weight,
+			   SORTID_UPDATED,   1,
+			  -1);
+
+	xfree(tmp_state_lower);
 
 	return;
 }
@@ -387,34 +403,6 @@ static void _append_node_record(sview_node_info_t *sview_node_info,
 	_update_node_record(sview_node_info, treestore, iter);
 }
 
-static int _get_topo_color_ndx(int node_ndx)
-{
-	int i = 0;
-	int rdx = MAKE_TOPO_2;
-	switch_record_bitmaps_t *sw_nodes_bitmaps_ptr = g_switch_nodes_maps;
-
-	for (i=0; i<g_topo_info_msg_ptr->record_count;
-	     i++, sw_nodes_bitmaps_ptr++) {
-		if (g_topo_info_msg_ptr->topo_array[i].level)
-			continue;
-		if (bit_test(sw_nodes_bitmaps_ptr->node_bitmap, node_ndx)) {
-			rdx = i;
-			break;
-		}
-	}
-	if (rdx == MAKE_TOPO_2)
-		return rdx;
-//	if (rdx != _l_sw_color_ndx) {
-//		_l_sw_color_ndx = rdx;
-//		if (_l_topo_color_ndx == MAKE_TOPO_1)
-//			_l_topo_color_ndx = MAKE_TOPO_2;
-//		else
-//			_l_topo_color_ndx = MAKE_TOPO_1;
-//	}
-//	return _l_topo_color_ndx;
-	return MAKE_TOPO_1;
-}
-
 static void _update_info_node(List info_list, GtkTreeView *tree_view)
 {
 	GtkTreePath *path = gtk_tree_path_new_first();
@@ -436,38 +424,28 @@ static void _update_info_node(List info_list, GtkTreeView *tree_view)
 			}
 		}
 	}
+
 	itr = list_iterator_create(info_list);
 	while ((sview_node_info = (sview_node_info_t*) list_next(itr))) {
 		node_ptr = sview_node_info->node_ptr;
-		/* get the iter, or find out the list is empty goto add */
-		if (!gtk_tree_model_get_iter(model, &iter, path)) {
-			goto adding;
-		}
-
-		while (1) {
-			/* search for the node name and check to see if
-			   it is in the list */
-			gtk_tree_model_get(model, &iter, SORTID_NAME,
-					   &name, -1);
-			if (!strcmp(name, node_ptr->name)) {
-				/* update with new info */
-				g_free(name);
-				_update_node_record(sview_node_info,
-						    GTK_TREE_STORE(model),
-						    &iter);
-				goto found;
-			}
-			g_free(name);
-
-			if (!gtk_tree_model_iter_next(model, &iter)) {
-				break;
+		if (sview_node_info->iter_set) {
+			gtk_tree_model_get(model, &sview_node_info->iter_ptr,
+					   SORTID_NAME, &name, -1);
+			if (strcmp(name, node_ptr->name)) { /* Bad pointer */
+				sview_node_info->iter_set = false;
+				//g_print("bad node iter pointer\n");
 			}
 		}
-	adding:
-		_append_node_record(sview_node_info,
-				    GTK_TREE_STORE(model), &iter);
-	found:
-		;
+		if (sview_node_info->iter_set) {
+			_update_node_record(sview_node_info,
+					    GTK_TREE_STORE(model),
+					    &sview_node_info->iter_ptr);
+		} else {
+			_append_node_record(sview_node_info,
+					    GTK_TREE_STORE(model),
+					    &sview_node_info->iter_ptr);
+			sview_node_info->iter_set = true;
+		}
 	}
 	list_iterator_destroy(itr);
 
@@ -693,9 +671,7 @@ extern int get_new_info_node(node_info_msg_t **info_ptr, int force)
 		if (*info_ptr != g_node_info_ptr)
 			error_code = SLURM_SUCCESS;
 		*info_ptr = g_node_info_ptr;
-		if (changed)
-			error_code = SLURM_SUCCESS;
-		goto end_it;
+		return error_code;
 	}
 	last = now;
 
@@ -806,9 +782,9 @@ extern int get_new_info_node(node_info_msg_t **info_ptr, int force)
 
 	if (!g_topo_info_msg_ptr &&
 	    default_sview_config.grid_topological) {
-		get_topo_conf(); /*pull in topology NOW*/
+		get_topo_conf(); /* pull in topology NOW */
 	}
-end_it:
+
 	return error_code;
 }
 
@@ -1030,11 +1006,18 @@ extern int update_state_node(GtkDialog *dialog,
 			 nodelist);
 		label = gtk_label_new(tmp_char);
 		state = NODE_RESUME;
+	} else if (!strncasecmp("set", type, 3)) {
+		snprintf(tmp_char, sizeof(tmp_char),
+			 "Are you sure you want to down node(s) %s?\n\n"
+			 "Please put reason.",
+			 nodelist);
+		entry = create_entry();
+		label = gtk_label_new(tmp_char);
+		state = NODE_STATE_DOWN;
 	} else {
+
 		if (!strncasecmp("make", type, 4))
 			type = "idle";
-		else if (!strncasecmp("put", type, 3))
-			type = "down";
 		for(i = 0; i < NODE_STATE_END; i++) {
 			upper = node_state_string(i);
 			lower = str_tolower(upper);
@@ -1183,8 +1166,7 @@ extern void get_info_node(GtkTable *table, display_data_t *display_data)
 	static GtkWidget *display_widget = NULL;
 	List info_list = NULL;
 	int changed = 1;
-	int i = 0;
-	int b_color_ndx;
+	int i = 0, sort_key;
 	sview_node_info_t *sview_node_info_ptr = NULL;
 	ListIterator itr = NULL;
 	GtkTreePath *path = NULL;
@@ -1234,14 +1216,12 @@ extern void get_info_node(GtkTable *table, display_data_t *display_data)
 		gtk_widget_show(label);
 		goto end_it;
 	}
-display_it:
-
-	info_list = create_node_info_list(node_info_ptr, changed,
-					  FALSE);
 
+display_it:
+	info_list = create_node_info_list(node_info_ptr, changed, FALSE);
 	if (!info_list)
 		goto reset_curs;
-	i=0;
+	i = 0;
 	/* set up the grid */
 	if (display_widget && GTK_IS_TREE_VIEW(display_widget) &&
 	    gtk_tree_selection_count_selected_rows(
@@ -1252,35 +1232,25 @@ display_it:
 		gtk_tree_view_get_cursor(GTK_TREE_VIEW(display_widget),
 					 &path, &focus_column);
 	}
-	if (!path || working_sview_config.grid_topological) {
+	if (!path) {
+		int array_size = node_info_ptr->record_count;
+		int  *color_inx = xmalloc(sizeof(int) * array_size);
+		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
 		itr = list_iterator_create(info_list);
 		while ((sview_node_info_ptr = list_next(itr))) {
-			if (g_topo_info_msg_ptr) {
-				//derive topo_color
-				b_color_ndx = _get_topo_color_ndx(i);
-
-				if (b_color_ndx != MAKE_TOPO_2) {
-					/* node belongs to a switch */
-					if (sview_node_info_ptr->node_ptr->
-					    node_state != NODE_STATE_IDLE )
-						b_color_ndx = i;
-				}
-			} else
-				b_color_ndx = i;
-			change_grid_color(grid_button_list, i, i,
-					  b_color_ndx, true, 0);
+			color_set_flag[i] = true;
+			color_inx[i] = i;
 			i++;
 		}
 		list_iterator_destroy(itr);
+		change_grid_color_array(grid_button_list, array_size,
+					color_inx, color_set_flag, true, 0);
+		xfree(color_inx);
+		xfree(color_set_flag);
 	} else
 		highlight_grid(GTK_TREE_VIEW(display_widget),
 			       SORTID_POS, (int)NO_VAL, grid_button_list);
 
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
-
 	if (view == ERROR_VIEW && display_widget) {
 		gtk_widget_destroy(display_widget);
 		display_widget = NULL;
@@ -1296,13 +1266,26 @@ display_it:
 		gtk_table_attach_defaults(GTK_TABLE(table),
 					  GTK_WIDGET(tree_view),
 					  0, 1, 0, 1);
-		/* since this function sets the model of the tree_view
-		   to the treestore we don't really care about
-		   the return value */
+		/* Since this function sets the model of the tree_view to the
+		 * treestore we don't really care about the return value
+		 * On large clusters, sorting on the node name slows GTK down
+		 * by a large margin. */
+		if (node_info_ptr->record_count > 1000)
+			sort_key = -1;
+		else
+			sort_key = SORTID_NAME;
 		create_treestore(tree_view, display_data_node,
-				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+				 SORTID_CNT, sort_key, SORTID_COLOR);
 	}
+
 	view = INFO_VIEW;
+	/* If the system has a large number of nodes then not all lines
+	 * will be displayed. You can try different values for the third
+	 * argument of gtk_widget_set_size_request() in an attempt to
+	 * maximumize the data displayed in your environment. These are my
+	 * results: Y=1000 good for 43 lines, Y=-1 good for 1151 lines,
+	 *  Y=64000 good for 2781 lines, Y=99000 good for 1453 lines */
+	/* gtk_widget_set_size_request(display_widget, -1, -1); */
 	_update_info_node(info_list, GTK_TREE_VIEW(display_widget));
 end_it:
 	toggled = FALSE;
@@ -1330,7 +1313,7 @@ extern void specific_info_node(popup_info_t *popup_win)
 	node_info_t *node_ptr = NULL;
 	hostlist_t hostlist = NULL;
 	hostlist_iterator_t host_itr = NULL;
-	int i = -1;
+	int i = -1, sort_key;
 	sview_search_info_t *search_info = spec_info->search_info;
 
 	if (!spec_info->display_widget)
@@ -1387,11 +1370,16 @@ display_it:
 		gtk_table_attach_defaults(popup_win->table,
 					  GTK_WIDGET(tree_view),
 					  0, 1, 0, 1);
-		/* since this function sets the model of the tree_view
-		   to the treestore we don't really care about
-		   the return value */
+		/* Since this function sets the model of the tree_view to the
+		 * treestore we don't really care about the return value
+		 * On large clusters, sorting on the node name slows GTK down
+		 * by a large margin. */
+		if (node_info_ptr->record_count > 1000)
+			sort_key = -1;
+		else
+			sort_key = SORTID_NAME;
 		create_treestore(tree_view, popup_win->display_data,
-				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+				 SORTID_CNT, sort_key, SORTID_COLOR);
 	}
 
 	setup_popup_grid_list(popup_win);
@@ -1718,7 +1706,7 @@ extern void admin_node_name(char *name, char *old_value, char *type)
 	return;
 }
 
-extern void cluster_change_node()
+extern void cluster_change_node(void)
 {
 	display_data_t *display_data = options_data_node;
 	while (display_data++) {
diff --git a/src/sview/part_info.c b/src/sview/part_info.c
index 0f7b5770c..90136ec89 100644
--- a/src/sview/part_info.c
+++ b/src/sview/part_info.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -35,6 +35,8 @@
 
 #define _DEBUG 0
 
+static GtkListStore *_create_model_part2(int type);
+
 typedef struct {
 	uint32_t cpu_alloc_cnt;
 	uint32_t cpu_error_cnt;
@@ -61,6 +63,7 @@ typedef struct {
 
 enum {
 	EDIT_PART_STATE = 1,
+	EDIT_REMOVE_PART,
 	EDIT_EDIT
 };
 
@@ -77,6 +80,7 @@ enum {
 	SORTID_CPUS,
 	SORTID_DEFAULT,
 	SORTID_FEATURES,
+	SORTID_GRACE_TIME,
 	SORTID_GROUPS,
 	SORTID_HIDDEN,
 	SORTID_JOB_SIZE,
@@ -120,23 +124,25 @@ static display_data_t display_data_part[] = {
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_DEFAULT, "Default", FALSE,
 	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_GRACE_TIME, "GraceTime", FALSE,
+	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_HIDDEN, "Hidden", FALSE,
 	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_PART_STATE, "Part State", FALSE,
 	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
-	{G_TYPE_STRING, SORTID_TIMELIMIT, "Time Limit",
-	 FALSE, EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
-	{G_TYPE_STRING, SORTID_NODES, "Node Count",
-	 FALSE, EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
-	{G_TYPE_STRING, SORTID_CPUS, "CPU Count",
-	 FALSE, EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
-	{G_TYPE_STRING, SORTID_NODE_STATE, "Node State",
-	 FALSE, EDIT_MODEL, refresh_part,
+	{G_TYPE_STRING, SORTID_TIMELIMIT, "Time Limit", FALSE,
+	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_NODES, "Node Count", FALSE,
+	 EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_CPUS, "CPU Count", FALSE,
+	 EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_NODE_STATE, "Node State", FALSE,
+	 EDIT_MODEL, refresh_part,
 	 create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_JOB_SIZE, "Job Size", FALSE,
 	 EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_PREEMPT_MODE, "PreemptMode", FALSE,
-	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
+	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_PRIORITY, "Priority", FALSE,
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_NODES_MIN, "Nodes Min", FALSE,
@@ -149,13 +155,8 @@ static display_data_t display_data_part[] = {
 	 create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_GROUPS, "Groups Allowed", FALSE,
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
-#ifdef HAVE_BG
-	{G_TYPE_STRING, SORTID_NODES_ALLOWED, "BPs Allowed Allocating", FALSE,
-	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
-#else
 	{G_TYPE_STRING, SORTID_NODES_ALLOWED, "Nodes Allowed Allocating", FALSE,
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
-#endif
 	{G_TYPE_STRING, SORTID_TMP_DISK, "Temp Disk", FALSE,
 	 EDIT_NONE, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_MEM, "Memory", FALSE, EDIT_NONE, refresh_part,
@@ -184,9 +185,57 @@ static display_data_t display_data_part[] = {
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
+static display_data_t create_data_part[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE, refresh_part},
+	{G_TYPE_STRING, SORTID_NAME, "Name", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_ALTERNATE, "Alternate", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_DEFAULT, "Default", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_GRACE_TIME, "GraceTime", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_HIDDEN, "Hidden", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_PART_STATE, "State", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_TIMELIMIT, "Time Limit", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_PREEMPT_MODE, "PreemptMode", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_PRIORITY, "Priority", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_NODES_MIN, "Nodes Min", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_NODES_MAX, "Nodes Max", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_ROOT, "Root", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_SHARE, "Share", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_GROUPS, "Groups Allowed", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_NODES_ALLOWED, "Nodes Allowed Allocating", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_FEATURES, "Features", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_REASON, "Reason", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+#ifdef HAVE_BG
+	{G_TYPE_STRING, SORTID_NODELIST, "BP List", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+#else
+	{G_TYPE_STRING, SORTID_NODELIST, "NodeList", FALSE,
+	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
+#endif
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
 static display_data_t options_data_part[] = {
 	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
 	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, PART_PAGE},
+	{G_TYPE_STRING, PART_PAGE, "Edit Partition", TRUE, ADMIN_PAGE},
+	{G_TYPE_STRING, PART_PAGE, "Remove Partition", TRUE, ADMIN_PAGE},
 #ifdef HAVE_BG
 	{G_TYPE_STRING, PART_PAGE, "Drain Base Partitions",
 	 TRUE, ADMIN_PAGE | EXTRA_NODES},
@@ -210,9 +259,8 @@ static display_data_t options_data_part[] = {
 	{G_TYPE_STRING, PART_PAGE, "Update Node Features",
 	 TRUE, ADMIN_PAGE | EXTRA_NODES},
 #endif
-	{G_TYPE_STRING, PART_PAGE, "Change Part State",
+	{G_TYPE_STRING, PART_PAGE, "Change Partition State",
 	 TRUE, ADMIN_PAGE},
-	{G_TYPE_STRING, PART_PAGE, "Edit Part", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, JOB_PAGE, "Jobs", TRUE, PART_PAGE},
 #ifdef HAVE_BG
 	{G_TYPE_STRING, BLOCK_PAGE, "Blocks", TRUE, PART_PAGE},
@@ -227,16 +275,15 @@ static display_data_t options_data_part[] = {
 };
 
 static display_data_t *local_display_data = NULL;
-
 static char *got_edit_signal = NULL;
 static char *got_features_edit_signal = NULL;
 
-static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
-				    GtkTreeStore *treestore,
-				    GtkTreeIter *iter);
 static void _append_part_sub_record(sview_part_sub_t *sview_part_sub,
 				    GtkTreeStore *treestore, GtkTreeIter *iter,
 				    int line);
+static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
+				    GtkTreeStore *treestore,
+				    GtkTreeIter *iter);
 
 static int _build_min_max_32_string(char *buffer, int buf_size,
 				    uint32_t min, uint32_t max, bool range)
@@ -268,7 +315,8 @@ static void _set_active_combo_part(GtkComboBox *combo,
 	int i = 0, unknown_found = 0;
 	char *upper = NULL;
 
-	gtk_tree_model_get(model, iter, type, &temp_char, -1);
+	if (model)
+		gtk_tree_model_get(model, iter, type, &temp_char, -1);
 	if (!temp_char)
 		goto end_it;
 	switch(type) {
@@ -327,6 +375,20 @@ static void _set_active_combo_part(GtkComboBox *combo,
 			}
 
 		break;
+	case SORTID_PREEMPT_MODE:
+		if (!strcasecmp(temp_char, "cancel"))
+			action = 0;
+		else if (!strcasecmp(temp_char, "checkpoint"))
+			action = 1;
+		else if (!strcasecmp(temp_char, "off"))
+			action = 2;
+		else if (!strcasecmp(temp_char, "requeue"))
+			action = 3;
+		else if (!strcasecmp(temp_char, "suspend"))
+			action = 4;
+		else
+			action = 2;	/* off */
+		break;
 	default:
 		break;
 	}
@@ -386,9 +448,8 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 				 const char *new_text,
 				 int column)
 {
-	char *type = "";
+	char *type = "", *temp_char;
 	int temp_int = 0;
-	uint16_t temp_uint16 = 0;
 
 	global_edit_error = 0;
 
@@ -404,24 +465,32 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		if (!strcasecmp(new_text, "yes")) {
 			part_msg->flags |= PART_FLAG_DEFAULT;
 			part_msg->flags &= (~PART_FLAG_DEFAULT_CLR);
-		} else {
+		} else if (!strcasecmp(new_text, "no")) {
 			part_msg->flags &= (~PART_FLAG_DEFAULT);
 			part_msg->flags |= PART_FLAG_DEFAULT_CLR;
 		}
 		type = "default";
 		break;
+	case SORTID_GRACE_TIME:
+		temp_int = time_str2mins((char *)new_text);
+		type = "grace_time";
+		if (temp_int <= 0)
+			goto return_error;
+		/* convert to seconds */
+		part_msg->grace_time = (uint32_t)(temp_int * 60);
+		break;
 	case SORTID_HIDDEN:
 		if (!strcasecmp(new_text, "yes")) {
 			part_msg->flags |= PART_FLAG_HIDDEN;
 			part_msg->flags &= (~PART_FLAG_HIDDEN_CLR);
-		} else {
+		} else if (!strcasecmp(new_text, "no")) {
 			part_msg->flags &= (~PART_FLAG_HIDDEN);
 			part_msg->flags |= PART_FLAG_HIDDEN_CLR;
 		}
 		type = "hidden";
 		break;
 	case SORTID_TIMELIMIT:
-		if ((strcasecmp(new_text,"infinite") == 0))
+		if ((strcasecmp(new_text, "infinite") == 0))
 			temp_int = INFINITE;
 		else
 			temp_int = time_str2mins((char *)new_text);
@@ -432,17 +501,27 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		part_msg->max_time = (uint32_t)temp_int;
 		break;
 	case SORTID_PREEMPT_MODE:
-		temp_uint16 = preempt_mode_num(new_text);
+		if (!strcasecmp(new_text, "cancel"))
+			part_msg->preempt_mode = PREEMPT_MODE_CANCEL;
+		else if (!strcasecmp(new_text, "checkpoint"))
+			part_msg->preempt_mode = PREEMPT_MODE_CHECKPOINT;
+		else if (!strcasecmp(new_text, "off"))
+			part_msg->preempt_mode = PREEMPT_MODE_OFF;
+		else if (!strcasecmp(new_text, "requeue"))
+			part_msg->preempt_mode = PREEMPT_MODE_REQUEUE;
+		else if (!strcasecmp(new_text, "suspend"))
+			part_msg->preempt_mode = PREEMPT_MODE_SUSPEND;
 		type = "preempt_mode";
-		if (temp_uint16 == (uint16_t) NO_VAL)
-			goto return_error;
-		part_msg->preempt_mode = temp_uint16;
 		break;
 	case SORTID_PRIORITY:
 		temp_int = strtol(new_text, (char **)NULL, 10);
 		type = "priority";
 		part_msg->priority = (uint16_t)temp_int;
 		break;
+	case SORTID_NAME:
+		type = "name";
+		part_msg->name = xstrdup(new_text);
+		break;
 	case SORTID_NODES_MIN:
 		temp_int = strtol(new_text, (char **)NULL, 10);
 		type = "min_nodes";
@@ -455,11 +534,15 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		if (!strcasecmp(new_text, "infinite")) {
 			temp_int = INFINITE;
 		} else {
-			temp_int = strtol(new_text, (char **)NULL, 10);
+			temp_int = strtol(new_text, &temp_char, 10);
+			if ((temp_char[0] == 'k') || (temp_char[0] == 'K'))
+				temp_int *= 1024;
+			if ((temp_char[0] == 'm') || (temp_char[0] == 'M'))
+				temp_int *= (1024 * 1024);
 		}
 
 		type = "max_nodes";
-		if (temp_int <= 0 && temp_int != INFINITE)
+		if ((temp_int <= 0) && (temp_int != INFINITE))
 			goto return_error;
 		part_msg->max_nodes = (uint32_t)temp_int;
 		break;
@@ -467,7 +550,7 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		if (!strcasecmp(new_text, "yes")) {
 			part_msg->flags |= PART_FLAG_ROOT_ONLY;
 			part_msg->flags &= (~PART_FLAG_ROOT_ONLY_CLR);
-		} else {
+		} else if (!strcasecmp(new_text, "no")) {
 			part_msg->flags &= (~PART_FLAG_ROOT_ONLY);
 			part_msg->flags |= PART_FLAG_ROOT_ONLY_CLR;
 		}
@@ -545,7 +628,6 @@ static void _admin_edit_combo_box_part(GtkComboBox *combo,
 	GtkTreeIter iter;
 	int column = 0;
 	char *name = NULL;
-	const char *col_name = NULL;
 
 	if (!part_msg)
 		return;
@@ -563,7 +645,7 @@ static void _admin_edit_combo_box_part(GtkComboBox *combo,
 	gtk_tree_model_get(model, &iter, 0, &name, -1);
 	gtk_tree_model_get(model, &iter, 1, &column, -1);
 
-	col_name = _set_part_msg(part_msg, name, column);
+	(void) _set_part_msg(part_msg, name, column);
 	if (name)
 		g_free(name);
 }
@@ -811,8 +893,8 @@ static void _layout_part_record(GtkTreeView *treeview,
 	snprintf(ind_cnt, sizeof(ind_cnt), "%s/%s/%s",
 		 tmp_cnt, tmp_cnt1, tmp_cnt2);
 
-	for(i = 0; i < SORTID_CNT; i++) {
-		switch(i) {
+	for (i = 0; i < SORTID_CNT; i++) {
+		switch (i) {
 		case SORTID_PART_STATE:
 			switch(part_ptr->state_up) {
 			case PARTITION_UP:
@@ -851,8 +933,13 @@ static void _layout_part_record(GtkTreeView *treeview,
 				yes_no = 0;
 			break;
 		case SORTID_FEATURES:
-			sview_part_sub = list_peek(sview_part_info->sub_list);
-			temp_char = sview_part_sub->features;
+			if (sview_part_sub)
+				temp_char = sview_part_sub->features;
+			else
+				temp_char = "";
+			break;
+		case SORTID_GRACE_TIME:
+			limit_set = part_ptr->grace_time;
 			break;
 		case SORTID_GROUPS:
 			if (part_ptr->allow_groups)
@@ -916,7 +1003,10 @@ static void _layout_part_record(GtkTreeView *treeview,
 			break;
 		case SORTID_REASON:
 			sview_part_sub = list_peek(sview_part_info->sub_list);
-			temp_char = sview_part_sub->features;
+			if (sview_part_sub)
+				temp_char = sview_part_sub->reason;
+			else
+				temp_char = "";
 			break;
 		case SORTID_ROOT:
 			if (part_ptr->flags & PART_FLAG_ROOT_ONLY)
@@ -997,152 +1087,153 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 				GtkTreeStore *treestore,
 				GtkTreeIter *iter)
 {
-	char time_buf[20], tmp_buf[20];
-	char tmp_cnt[8];
-	char *temp_char = NULL;
-	uint16_t tmp_uint16 = 0;
+	char tmp_prio[40], tmp_size[40], tmp_share_buf[40], tmp_time[40];
+	char tmp_max_nodes[40], tmp_min_nodes[40], tmp_grace[40];
+	char tmp_cpu_cnt[40], tmp_node_cnt[40];
+	char *tmp_alt, *tmp_default, *tmp_groups, *tmp_hidden;
+	char *tmp_root, *tmp_share, *tmp_state;
+	uint16_t tmp_preempt;
 	partition_info_t *part_ptr = sview_part_info->part_ptr;
 	GtkTreeIter sub_iter;
-	int childern = 0;
 
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
-			   sview_colors[sview_part_info->color_inx], -1);
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR_INX,
-			   sview_part_info->color_inx, -1);
+	if (part_ptr->alternate)
+		tmp_alt = part_ptr->alternate;
+	else
+		tmp_alt = "";
 
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, part_ptr->name, -1);
+	if (cluster_flags & CLUSTER_FLAG_BG)
+		convert_num_unit((float)part_ptr->total_cpus, tmp_cpu_cnt,
+				 sizeof(tmp_cpu_cnt), UNIT_NONE);
+	else
+		sprintf(tmp_cpu_cnt, "%u", part_ptr->total_cpus);
 
 	if (part_ptr->flags & PART_FLAG_DEFAULT)
-		temp_char = "yes";
+		tmp_default = "yes";
+	else
+		tmp_default = "no";
+
+	if (part_ptr->allow_groups)
+		tmp_groups = part_ptr->allow_groups;
 	else
-		temp_char = "no";
-	gtk_tree_store_set(treestore, iter, SORTID_DEFAULT, temp_char, -1);
+		tmp_groups = "all";
 
 	if (part_ptr->flags & PART_FLAG_HIDDEN)
-		temp_char = "yes";
+		tmp_hidden = "yes";
 	else
-		temp_char = "no";
-	gtk_tree_store_set(treestore, iter, SORTID_HIDDEN, temp_char, -1);
+		tmp_hidden = "no";
 
-	if (part_ptr->alternate)
-		temp_char = part_ptr->alternate;
+	if (part_ptr->grace_time == (uint32_t) NO_VAL)
+		snprintf(tmp_grace, sizeof(tmp_grace), "none");
+	else {
+		secs2time_str(part_ptr->grace_time,
+			      tmp_grace, sizeof(tmp_grace));
+	}
+
+	if (part_ptr->max_nodes == (uint32_t) INFINITE)
+		snprintf(tmp_max_nodes, sizeof(tmp_max_nodes), "infinite");
+	else {
+		convert_num_unit((float)part_ptr->max_nodes,
+				 tmp_max_nodes, sizeof(tmp_max_nodes),
+				 UNIT_NONE);
+	}
+
+	if (part_ptr->min_nodes == (uint32_t) INFINITE)
+		snprintf(tmp_min_nodes, sizeof(tmp_min_nodes), "infinite");
+	else {
+		convert_num_unit((float)part_ptr->min_nodes,
+				 tmp_min_nodes, sizeof(tmp_min_nodes), UNIT_NONE);
+	}
+
+	if (cluster_flags & CLUSTER_FLAG_BG)
+		convert_num_unit((float)part_ptr->total_nodes, tmp_node_cnt,
+				 sizeof(tmp_node_cnt), UNIT_NONE);
+	else
+		sprintf(tmp_node_cnt, "%u", part_ptr->total_nodes);
+
+	if (part_ptr->flags & PART_FLAG_ROOT_ONLY)
+		tmp_root = "yes";
 	else
-		temp_char = "";
-	gtk_tree_store_set(treestore, iter, SORTID_ALTERNATE, temp_char, -1);
+		tmp_root = "no";
 
 	if (part_ptr->state_up == PARTITION_UP)
-		temp_char = "up";
+		tmp_state = "up";
 	else if (part_ptr->state_up == PARTITION_DOWN)
-		temp_char = "down";
+		tmp_state = "down";
 	else if (part_ptr->state_up == PARTITION_INACTIVE)
-		temp_char = "inact";
+		tmp_state = "inact";
 	else if (part_ptr->state_up == PARTITION_DRAIN)
-		temp_char = "drain";
+		tmp_state = "drain";
 	else
-		temp_char = "unk";
-	gtk_tree_store_set(treestore, iter, SORTID_PART_STATE, temp_char, -1);
+		tmp_state = "unk";
 
-	if (part_ptr->max_time == INFINITE)
-		snprintf(time_buf, sizeof(time_buf), "infinite");
-	else {
-		secs2time_str((part_ptr->max_time * 60),
-			      time_buf, sizeof(time_buf));
-	}
-
-	gtk_tree_store_set(treestore, iter, SORTID_TIMELIMIT, time_buf, -1);
-
-	_build_min_max_32_string(time_buf, sizeof(time_buf),
+	_build_min_max_32_string(tmp_size, sizeof(tmp_size),
 				 part_ptr->min_nodes,
 				 part_ptr->max_nodes, true);
-	gtk_tree_store_set(treestore, iter, SORTID_JOB_SIZE, time_buf, -1);
 
-	tmp_uint16 = part_ptr->preempt_mode;
-	if (tmp_uint16 == (uint16_t) NO_VAL)
-		tmp_uint16 = slurm_get_preempt_mode();	/* use cluster param */
-	gtk_tree_store_set(treestore, iter, SORTID_PREEMPT_MODE,
-			   preempt_mode_string(tmp_uint16), -1);
+	tmp_preempt = part_ptr->preempt_mode;
+	if (tmp_preempt == (uint16_t) NO_VAL)
+		tmp_preempt = slurm_get_preempt_mode();	/* use cluster param */
 
 	convert_num_unit((float)part_ptr->priority,
-			 time_buf, sizeof(time_buf), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter, SORTID_PRIORITY,
-			   time_buf, -1);
-
-	if (part_ptr->min_nodes == (uint32_t) INFINITE)
-		snprintf(time_buf, sizeof(time_buf), "infinite");
-	else {
-		convert_num_unit((float)part_ptr->min_nodes,
-				 time_buf, sizeof(time_buf), UNIT_NONE);
-	}
-	gtk_tree_store_set(treestore, iter, SORTID_NODES_MIN,
-			   time_buf, -1);
-	if (part_ptr->max_nodes == (uint32_t) INFINITE)
-		snprintf(time_buf, sizeof(time_buf), "infinite");
-	else {
-		convert_num_unit((float)part_ptr->max_nodes,
-				 time_buf, sizeof(time_buf), UNIT_NONE);
-	}
-	gtk_tree_store_set(treestore, iter, SORTID_NODES_MAX,
-			   time_buf, -1);
-
-	if (part_ptr->flags & PART_FLAG_ROOT_ONLY)
-		temp_char = "yes";
-	else
-		temp_char = "no";
-	gtk_tree_store_set(treestore, iter, SORTID_ROOT, temp_char, -1);
+			 tmp_prio, sizeof(tmp_prio), UNIT_NONE);
 
 	if (part_ptr->max_share & SHARED_FORCE) {
-		snprintf(tmp_buf, sizeof(tmp_buf), "force:%u",
+		snprintf(tmp_share_buf, sizeof(tmp_share_buf), "force:%u",
 			 (part_ptr->max_share & ~(SHARED_FORCE)));
-		temp_char = tmp_buf;
-	} else if (part_ptr->max_share == 0)
-		temp_char = "exclusive";
-	else if (part_ptr->max_share > 1) {
-		snprintf(tmp_buf, sizeof(tmp_buf), "yes:%u",
+		tmp_share = tmp_share_buf;
+	} else if (part_ptr->max_share == 0) {
+		tmp_share = "exclusive";
+	} else if (part_ptr->max_share > 1) {
+		snprintf(tmp_share_buf, sizeof(tmp_share_buf), "yes:%u",
 			 part_ptr->max_share);
-		temp_char = tmp_buf;
+		tmp_share = tmp_share_buf;
 	} else
-		temp_char = "no";
-	gtk_tree_store_set(treestore, iter, SORTID_SHARE, temp_char, -1);
+		tmp_share = "no";
 
-	if (part_ptr->allow_groups)
-		temp_char = part_ptr->allow_groups;
-	else
-		temp_char = "all";
-	gtk_tree_store_set(treestore, iter, SORTID_GROUPS, temp_char, -1);
-
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)part_ptr->total_nodes, tmp_cnt,
-				 sizeof(tmp_cnt), UNIT_NONE);
-	else
-		sprintf(tmp_cnt, "%u", part_ptr->total_nodes);
-	gtk_tree_store_set(treestore, iter, SORTID_NODES, tmp_cnt, -1);
-
-	if (cluster_flags & CLUSTER_FLAG_BG)
-		convert_num_unit((float)part_ptr->total_cpus, tmp_cnt,
-				 sizeof(tmp_cnt), UNIT_NONE);
-	else
-		sprintf(tmp_cnt, "%u", part_ptr->total_cpus);
-
-	gtk_tree_store_set(treestore, iter, SORTID_CPUS, tmp_cnt, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST,
-			   part_ptr->nodes, -1);
+	if (part_ptr->max_time == INFINITE)
+		snprintf(tmp_time, sizeof(tmp_time), "infinite");
+	else {
+		secs2time_str((part_ptr->max_time * 60),
+			      tmp_time, sizeof(tmp_time));
+	}
 
+	/* Combining these records provides a slight performance improvement
+	 * NOTE: Some of these fields are cleared here and filled in based upon
+	 * the configuration of nodes within this partition. */
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODE_INX, part_ptr->node_inx, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_ONLY_LINE, 0, -1);
-	/* clear out info for the main listing */
-	gtk_tree_store_set(treestore, iter, SORTID_NODE_STATE, "", -1);
-	gtk_tree_store_set(treestore, iter, SORTID_NODE_STATE_NUM, -1, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_TMP_DISK, "", -1);
-	gtk_tree_store_set(treestore, iter, SORTID_MEM, "", -1);
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_FEATURES, "", -1);
-	gtk_tree_store_set(treestore, iter, SORTID_REASON, "", -1);
-
-	childern = gtk_tree_model_iter_children(GTK_TREE_MODEL(treestore),
-						&sub_iter, iter);
+			   SORTID_ALTERNATE,  tmp_alt,
+			   SORTID_COLOR,
+				sview_colors[sview_part_info->color_inx],
+			   SORTID_COLOR_INX,  sview_part_info->color_inx,
+			   SORTID_CPUS,       tmp_cpu_cnt,
+			   SORTID_DEFAULT,    tmp_default,
+			   SORTID_FEATURES,   "",
+			   SORTID_GRACE_TIME, tmp_grace,
+			   SORTID_GROUPS,     tmp_groups,
+			   SORTID_HIDDEN,     tmp_hidden,
+			   SORTID_JOB_SIZE,   tmp_size,
+			   SORTID_MEM,        "",
+			   SORTID_NAME,       part_ptr->name,
+			   SORTID_NODE_INX,   part_ptr->node_inx,
+			   SORTID_NODE_STATE, "",
+			   SORTID_NODE_STATE_NUM, -1,
+			   SORTID_NODES,      tmp_node_cnt,
+			   SORTID_NODES_MAX,  tmp_max_nodes,
+			   SORTID_NODES_MIN,  tmp_min_nodes,
+			   SORTID_NODELIST,   part_ptr->nodes,
+			   SORTID_ONLY_LINE,  0,
+			   SORTID_PART_STATE, tmp_state,
+			   SORTID_PREEMPT_MODE,
+				preempt_mode_string(tmp_preempt),
+			   SORTID_PRIORITY,   tmp_prio,
+			   SORTID_REASON,     "",
+			   SORTID_ROOT,       tmp_root,
+			   SORTID_SHARE,      tmp_share,
+			   SORTID_TIMELIMIT,  tmp_time,
+			   SORTID_TMP_DISK,   "",
+			   SORTID_UPDATED,    1,
+			   -1);
+
 	if (gtk_tree_model_iter_children(GTK_TREE_MODEL(treestore),
 					 &sub_iter, iter))
 		_subdivide_part(sview_part_info,
@@ -1157,23 +1248,13 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 				    GtkTreeStore *treestore, GtkTreeIter *iter)
 {
-	char tmp_cnt[20];
-	char *cpu_tmp = NULL;
-	char *node_tmp = NULL;
 	partition_info_t *part_ptr = sview_part_sub->part_ptr;
-	char *upper = NULL, *lower = NULL;
-	char *tmp;
+	char *tmp_cpus = NULL, *tmp_nodes = NULL, *tmp_nodelist;
+	char *tmp_state_lower, *tmp_state_upper;
+	char tmp_cnt[40], tmp_disk[40], tmp_mem[40];
 
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, part_ptr->name, -1);
-
-	upper = node_state_string(sview_part_sub->node_state);
-	lower = str_tolower(upper);
-	gtk_tree_store_set(treestore, iter, SORTID_NODE_STATE,
-			   lower, -1);
-	xfree(lower);
-
-	gtk_tree_store_set(treestore, iter, SORTID_NODE_STATE_NUM,
-			   sview_part_sub->node_state, -1);
+	tmp_state_upper = node_state_string(sview_part_sub->node_state);
+	tmp_state_lower = str_tolower(tmp_state_upper);
 
 	if ((sview_part_sub->node_state & NODE_STATE_BASE)
 	    == NODE_STATE_MIXED) {
@@ -1181,85 +1262,90 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 			convert_num_unit((float)sview_part_sub->cpu_alloc_cnt,
 					 tmp_cnt,
 					 sizeof(tmp_cnt), UNIT_NONE);
-			xstrfmtcat(cpu_tmp, "Alloc:%s", tmp_cnt);
+			xstrfmtcat(tmp_cpus, "Alloc:%s", tmp_cnt);
 			if (cluster_flags & CLUSTER_FLAG_BG) {
 				convert_num_unit(
 					(float)(sview_part_sub->cpu_alloc_cnt
 						/ cpus_per_node),
 					tmp_cnt,
 					sizeof(tmp_cnt), UNIT_NONE);
-				xstrfmtcat(node_tmp, "Alloc:%s", tmp_cnt);
+				xstrfmtcat(tmp_nodes, "Alloc:%s", tmp_cnt);
 			}
 		}
 		if (sview_part_sub->cpu_error_cnt) {
 			convert_num_unit((float)sview_part_sub->cpu_error_cnt,
 					 tmp_cnt,
 					 sizeof(tmp_cnt), UNIT_NONE);
-			if (cpu_tmp)
-				xstrcat(cpu_tmp, " ");
-			xstrfmtcat(cpu_tmp, "Err:%s", tmp_cnt);
+			if (tmp_cpus)
+				xstrcat(tmp_cpus, " ");
+			xstrfmtcat(tmp_cpus, "Err:%s", tmp_cnt);
 			if (cluster_flags & CLUSTER_FLAG_BG) {
 				convert_num_unit(
 					(float)(sview_part_sub->cpu_error_cnt
 						/ cpus_per_node),
 					tmp_cnt,
 					sizeof(tmp_cnt), UNIT_NONE);
-				if (node_tmp)
-					xstrcat(node_tmp, " ");
-				xstrfmtcat(node_tmp, "Err:%s", tmp_cnt);
+				if (tmp_nodes)
+					xstrcat(tmp_nodes, " ");
+				xstrfmtcat(tmp_nodes, "Err:%s", tmp_cnt);
 			}
 		}
 		if (sview_part_sub->cpu_idle_cnt) {
 			convert_num_unit((float)sview_part_sub->cpu_idle_cnt,
 					 tmp_cnt,
 					 sizeof(tmp_cnt), UNIT_NONE);
-			if (cpu_tmp)
-				xstrcat(cpu_tmp, " ");
-			xstrfmtcat(cpu_tmp, "Idle:%s", tmp_cnt);
+			if (tmp_cpus)
+				xstrcat(tmp_cpus, " ");
+			xstrfmtcat(tmp_cpus, "Idle:%s", tmp_cnt);
 			if (cluster_flags & CLUSTER_FLAG_BG) {
 				convert_num_unit(
 					(float)(sview_part_sub->cpu_idle_cnt
 						/ cpus_per_node),
 					tmp_cnt,
 					sizeof(tmp_cnt), UNIT_NONE);
-				if (node_tmp)
-					xstrcat(node_tmp, " ");
-				xstrfmtcat(node_tmp, "Idle:%s", tmp_cnt);
+				if (tmp_nodes)
+					xstrcat(tmp_nodes, " ");
+				xstrfmtcat(tmp_nodes, "Idle:%s", tmp_cnt);
 			}
 		}
 	} else {
-		cpu_tmp = xmalloc(20);
+		tmp_cpus = xmalloc(20);
 		convert_num_unit((float)sview_part_sub->cpu_idle_cnt,
-				 cpu_tmp, 20, UNIT_NONE);
+				 tmp_cpus, 20, UNIT_NONE);
 	}
-	gtk_tree_store_set(treestore, iter, SORTID_CPUS, cpu_tmp, -1);
-	xfree(cpu_tmp);
-
-	convert_num_unit((float)sview_part_sub->disk_total, tmp_cnt,
-			 sizeof(tmp_cnt), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter, SORTID_TMP_DISK, tmp_cnt, -1);
 
-	convert_num_unit((float)sview_part_sub->mem_total, tmp_cnt,
-			 sizeof(tmp_cnt), UNIT_MEGA);
-	gtk_tree_store_set(treestore, iter, SORTID_MEM, tmp_cnt, -1);
-
-	if (!node_tmp) {
+	if (!tmp_nodes) {
 		convert_num_unit((float)sview_part_sub->node_cnt, tmp_cnt,
 				 sizeof(tmp_cnt), UNIT_NONE);
-		node_tmp = xstrdup(tmp_cnt);
+		tmp_nodes = xstrdup(tmp_cnt);
 	}
-	gtk_tree_store_set(treestore, iter, SORTID_NODES, node_tmp, -1);
-	xfree(node_tmp);
 
-	tmp = hostlist_ranged_string_xmalloc(sview_part_sub->hl);
-	gtk_tree_store_set(treestore, iter, SORTID_NODELIST, tmp, -1);
-	xfree(tmp);
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
+	convert_num_unit((float)sview_part_sub->disk_total, tmp_disk,
+			 sizeof(tmp_disk), UNIT_NONE);
+
+	convert_num_unit((float)sview_part_sub->mem_total, tmp_mem,
+			 sizeof(tmp_mem), UNIT_MEGA);
 
-	gtk_tree_store_set(treestore, iter, SORTID_FEATURES,
-			   sview_part_sub->features, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_REASON,
-			   sview_part_sub->reason, -1);
+	tmp_nodelist = hostlist_ranged_string_xmalloc(sview_part_sub->hl);
+
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_CPUS,           tmp_cpus,
+			   SORTID_FEATURES,       sview_part_sub->features,
+			   SORTID_MEM,            tmp_mem,
+			   SORTID_NAME,           part_ptr->name,
+			   SORTID_NODE_STATE_NUM, sview_part_sub->node_state,
+			   SORTID_NODELIST,       tmp_nodelist,
+			   SORTID_NODES,          tmp_nodes,
+			   SORTID_NODE_STATE,     tmp_state_lower,
+			   SORTID_REASON,         sview_part_sub->reason,
+			   SORTID_TMP_DISK,       tmp_disk,
+			   SORTID_UPDATED,        1,
+			   -1);
+
+	xfree(tmp_cpus);
+	xfree(tmp_nodelist);
+	xfree(tmp_nodes);
+	xfree(tmp_state_lower);
 
 	return;
 }
@@ -1618,14 +1704,11 @@ static void _display_info_part(List info_list,	popup_info_t *popup_win)
 	sview_part_info_t *sview_part_info = NULL;
 	int update = 0;
 	int j = 0;
-	int first_time = 0;
 
 	if (!spec_info->search_info->gchar_data) {
 		//info = xstrdup("No pointer given!");
 		goto finished;
 	}
-	if (!list_count(popup_win->grid_button_list))
-		first_time = 1;
 
 need_refresh:
 	if (!spec_info->display_widget) {
@@ -1642,7 +1725,7 @@ need_refresh:
 	while ((sview_part_info = (sview_part_info_t*) list_next(itr))) {
 		part_ptr = sview_part_info->part_ptr;
 		if (!strcmp(part_ptr->name, name)) {
-			j=0;
+			j = 0;
 			while (part_ptr->node_inx[j] >= 0) {
 				change_grid_color(
 					popup_win->grid_button_list,
@@ -1705,6 +1788,50 @@ static void _process_each_partition(GtkTreeModel  *model,
 }
 /*process_each_partition ^^^*/
 
+extern GtkWidget *create_part_entry(update_part_msg_t *part_msg,
+				    GtkTreeModel *model, GtkTreeIter *iter)
+{
+	GtkScrolledWindow *window = create_scrolled_window();
+	GtkBin *bin = NULL;
+	GtkViewport *view = NULL;
+	GtkTable *table = NULL;
+	int i = 0, row = 0;
+	display_data_t *display_data = create_data_part;
+
+	gtk_scrolled_window_set_policy(window,
+				       GTK_POLICY_NEVER,
+				       GTK_POLICY_AUTOMATIC);
+	bin = GTK_BIN(&window->container);
+	view = GTK_VIEWPORT(bin->child);
+	bin = GTK_BIN(&view->bin);
+	table = GTK_TABLE(bin->child);
+	gtk_table_resize(table, SORTID_CNT, 2);
+
+	gtk_table_set_homogeneous(table, FALSE);
+
+	for(i = 0; i < SORTID_CNT; i++) {
+		while (display_data++) {
+			if (display_data->id == -1)
+				break;
+			if (!display_data->name)
+				continue;
+			if (display_data->id != i)
+				continue;
+			display_admin_edit(
+				table, part_msg, &row, model, iter,
+				display_data,
+				G_CALLBACK(_admin_edit_combo_box_part),
+				G_CALLBACK(_admin_focus_out_part),
+				_set_active_combo_part);
+			break;
+		}
+		display_data = create_data_part;
+	}
+	gtk_table_resize(table, row, 2);
+
+	return GTK_WIDGET(window);
+}
+
 extern bool check_part_includes_node(int node_dx)
 {
 	partition_info_t *part_ptr = NULL;
@@ -1839,81 +1966,145 @@ end_it:
 	return error_code;
 }
 
-extern GtkListStore *create_model_part(int type)
+static GtkListStore *_create_model_part2(int type)
 {
 	GtkListStore *model = NULL;
 	GtkTreeIter iter;
-	char *upper = NULL, *lower = NULL;
-	int i=0;
-	switch(type) {
+
+	switch (type) {
 	case SORTID_DEFAULT:
+	case SORTID_HIDDEN:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "yes",
-				   1, SORTID_DEFAULT,
-				   -1);
+				   0, "no (default)", 1, SORTID_DEFAULT, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "no",
-				   1, SORTID_DEFAULT,
-				   -1);
+				   0, "yes", 1, SORTID_DEFAULT, -1);
 		break;
-	case SORTID_HIDDEN:
+	case SORTID_ROOT:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "yes",
-				   1, SORTID_HIDDEN,
-				   -1);
+				   0, "yes (default)", 1, SORTID_DEFAULT, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "no",
-				   1, SORTID_HIDDEN,
-				   -1);
-
+				   0, "no", 1, SORTID_DEFAULT, -1);
+		break;
+	case SORTID_SHARE:
+		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "no (default)", 1, SORTID_SHARE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "yes", 1, SORTID_SHARE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "force", 1, SORTID_SHARE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "exclusive", 1, SORTID_SHARE, -1);
+		break;
+	case SORTID_PART_STATE:
+		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "up (default)", 1, SORTID_PART_STATE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "down", 1, SORTID_PART_STATE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "inactive", 1, SORTID_PART_STATE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "drain", 1, SORTID_PART_STATE, -1);
 		break;
 	case SORTID_PREEMPT_MODE:
-	case SORTID_PRIORITY:
-	case SORTID_TIMELIMIT:
-	case SORTID_NODES_MIN:
-	case SORTID_NODES_MAX:
+		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter, 0,
+				preempt_mode_string(slurm_get_preempt_mode()),
+				1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "cancel", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "checkpoint", 1, SORTID_PREEMPT_MODE,-1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "off", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "requeue", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "suspend", 1, SORTID_PREEMPT_MODE, -1);
 		break;
+	}
+
+	return model;
+}
+
+extern GtkListStore *create_model_part(int type)
+{
+	GtkListStore *model = NULL;
+	GtkTreeIter iter;
+	char *upper = NULL, *lower = NULL;
+	int i = 0;
+
+	switch (type) {
+	case SORTID_DEFAULT:
+	case SORTID_HIDDEN:
 	case SORTID_ROOT:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "yes",
-				   1, SORTID_ROOT,
-				   -1);
+				   0, "yes", 1, type, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "no",
-				   1, SORTID_ROOT,
-				   -1);
+				   0, "no", 1, type, -1);
+		break;
+	case SORTID_PREEMPT_MODE:
+		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "cancel", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "checkpoint", 1, SORTID_PREEMPT_MODE,-1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "off", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "requeue", 1, SORTID_PREEMPT_MODE, -1);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   0, "suspend", 1, SORTID_PREEMPT_MODE, -1);
+		break;
+	case SORTID_GRACE_TIME:
+	case SORTID_PRIORITY:
+	case SORTID_TIMELIMIT:
+	case SORTID_NODES_MIN:
+	case SORTID_NODES_MAX:
 		break;
 	case SORTID_SHARE:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "force",
-				   1, SORTID_SHARE,
-				   -1);
+				   0, "force", 1, SORTID_SHARE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "no",
-				   1, SORTID_SHARE,
-				   -1);
+				   0, "no", 1, SORTID_SHARE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "yes",
-				   1, SORTID_SHARE,
-				   -1);
+				   0, "yes", 1, SORTID_SHARE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "exclusive",
-				   1, SORTID_SHARE,
-				   -1);
+				   0, "exclusive", 1, SORTID_SHARE, -1);
 		break;
 	case SORTID_GROUPS:
 		break;
@@ -1923,37 +2114,25 @@ extern GtkListStore *create_model_part(int type)
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "up",
-				   1, SORTID_PART_STATE,
-				   -1);
+				   0, "up", 1, SORTID_PART_STATE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "down",
-				   1, SORTID_PART_STATE,
-				   -1);
+				   0, "down", 1, SORTID_PART_STATE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "inactive",
-				   1, SORTID_PART_STATE,
-				   -1);
+				   0, "inactive", 1, SORTID_PART_STATE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "drain",
-				   1, SORTID_PART_STATE,
-				   -1);
+				   0, "drain", 1, SORTID_PART_STATE, -1);
 		break;
 	case SORTID_NODE_STATE:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "drain",
-				   1, SORTID_NODE_STATE,
-				   -1);
+				   0, "drain", 1, SORTID_NODE_STATE, -1);
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
-				   0, "resume",
-				   1, SORTID_NODE_STATE,
-				   -1);
+				   0, "resume", 1, SORTID_NODE_STATE, -1);
 		for(i = 0; i < NODE_STATE_END; i++) {
 			upper = node_state_string(i);
 			if (!strcmp(upper, "UNKNOWN"))
@@ -1962,15 +2141,13 @@ extern GtkListStore *create_model_part(int type)
 			gtk_list_store_append(model, &iter);
 			lower = str_tolower(upper);
 			gtk_list_store_set(model, &iter,
-					   0, lower,
-					   1, SORTID_NODE_STATE,
-					   -1);
+					   0, lower, 1, SORTID_NODE_STATE, -1);
 			xfree(lower);
 		}
 
 		break;
-
 	}
+
 	return model;
 }
 
@@ -2063,7 +2240,7 @@ extern void get_info_part(GtkTable *table, display_data_t *display_data)
 	static GtkWidget *display_widget = NULL;
 	List info_list = NULL;
 	int changed = 1;
-	int j=0;
+	int j, k;
 	sview_part_info_t *sview_part_info = NULL;
 	partition_info_t *part_ptr = NULL;
 	ListIterator itr = NULL;
@@ -2155,33 +2332,35 @@ display_it:
 					 &path, &focus_column);
 	}
 	if (!path) {
+		int array_size = node_info_ptr->record_count;
+		int  *color_inx = xmalloc(sizeof(int) * array_size);
+		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
 		itr = list_iterator_create(info_list);
 		while ((sview_part_info = list_next(itr))) {
 			part_ptr = sview_part_info->part_ptr;
-			j=0;
+			j = 0;
 			while (part_ptr->node_inx[j] >= 0) {
-				change_grid_color(grid_button_list,
-						  part_ptr->node_inx[j],
-						  part_ptr->node_inx[j+1],
-						  sview_part_info->color_inx,
-						  true, 0);
+				for (k = part_ptr->node_inx[j];
+				     k <= part_ptr->node_inx[j+1]; k++) {
+					color_set_flag[k] = true;
+					color_inx[k] = sview_part_info->
+						       color_inx;
+				}
 				j += 2;
 			}
 		}
 		list_iterator_destroy(itr);
+		change_grid_color_array(grid_button_list, array_size,
+					color_inx, color_set_flag, true, 0);
 		change_grid_color(grid_button_list, -1, -1,
 				  MAKE_WHITE, true, 0);
+		xfree(color_inx);
+		xfree(color_set_flag);
 	} else
 		highlight_grid(GTK_TREE_VIEW(display_widget),
 			       SORTID_NODE_INX, SORTID_COLOR_INX,
 			       grid_button_list);
 
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
-
-
 	if (view == ERROR_VIEW && display_widget) {
 		gtk_widget_destroy(display_widget);
 		display_widget = NULL;
@@ -2631,7 +2810,7 @@ extern void admin_part(GtkTreeModel *model, GtkTreeIter *iter, char *type)
 
 	part_msg->name = xstrdup(partid);
 
-	if (!strcasecmp("Change Part State", type)) {
+	if (!strcasecmp("Change Partition State", type)) {
 		GtkCellRenderer *renderer = NULL;
 		GtkTreeModel *model2 = GTK_TREE_MODEL(
 			create_model_part(SORTID_PART_STATE));
@@ -2667,7 +2846,19 @@ extern void admin_part(GtkTreeModel *model, GtkTreeIter *iter, char *type)
 			 partid);
 		label = gtk_label_new(tmp_char);
 		edit_type = EDIT_PART_STATE;
-	} else if (!strcasecmp("Edit Part", type)) {
+	} else if (!strcasecmp("Remove Partition", type)) {
+		label = gtk_dialog_add_button(GTK_DIALOG(popup),
+					      GTK_STOCK_YES, GTK_RESPONSE_OK);
+		gtk_window_set_default(GTK_WINDOW(popup), label);
+		gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
+
+		snprintf(tmp_char, sizeof(tmp_char),
+			 "Are you sure you want to remove partition %s?",
+			 partid);
+		label = gtk_label_new(tmp_char);
+		edit_type = EDIT_REMOVE_PART;
+	} else if (!strcasecmp("Edit Partition", type)) {
 		label = gtk_dialog_add_button(GTK_DIALOG(popup),
 					      GTK_STOCK_OK, GTK_RESPONSE_OK);
 		gtk_window_set_default(GTK_WINDOW(popup), label);
@@ -2714,7 +2905,21 @@ extern void admin_part(GtkTreeModel *model, GtkTreeIter *iter, char *type)
 		int rc;
 		if (global_edit_error)
 			temp = global_edit_error_msg;
-		else if (!global_send_update_msg) {
+		else if (edit_type == EDIT_REMOVE_PART) {
+			delete_part_msg_t part_del_msg;
+			part_del_msg.name = partid;
+			rc = slurm_delete_partition(&part_del_msg);
+			if (rc == SLURM_SUCCESS) {
+				temp = g_strdup_printf(
+					"Partition %s removed successfully",
+					partid);
+			} else {
+				temp = g_strdup_printf(
+					"Problem removing partition %s: %s",
+					partid, slurm_strerror(rc));
+				global_multi_error = TRUE;
+			}
+		} else if (!global_send_update_msg) {
 			temp = g_strdup_printf("No change detected.");
 		} else if ((rc = slurm_update_partition(part_msg))
 			   == SLURM_SUCCESS) {
@@ -2751,7 +2956,7 @@ end_it:
 }
 
 
-extern void cluster_change_part()
+extern void cluster_change_part(void)
 {
 	display_data_t *display_data = display_data_part;
 	while (display_data++) {
@@ -2759,9 +2964,6 @@ extern void cluster_change_part()
 			break;
 		if (cluster_flags & CLUSTER_FLAG_BG) {
 			switch(display_data->id) {
-			case SORTID_NODES_ALLOWED:
-				display_data->name = "BPs Allowed Allocating";
-				break;
 			case SORTID_NODELIST:
 				display_data->name = "BP List";
 				break;
@@ -2770,9 +2972,6 @@ extern void cluster_change_part()
 			}
 		} else {
 			switch(display_data->id) {
-			case SORTID_NODES_ALLOWED:
-				display_data->name = "Nodes Allowed Allocating";
-				break;
 			case SORTID_NODELIST:
 				display_data->name = "NodeList";
 				break;
diff --git a/src/sview/popups.c b/src/sview/popups.c
index 292b5c68d..fa75134a5 100644
--- a/src/sview/popups.c
+++ b/src/sview/popups.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -375,7 +375,6 @@ extern void create_config_popup(GtkAction *action, gpointer user_data)
 		GTK_STOCK_CLOSE,
 		GTK_RESPONSE_OK,
 		NULL);
-	int error_code;
 	GtkTreeStore *treestore =
 		_local_create_treestore_2cols(popup, 600, 400);
 	static slurm_ctl_conf_info_msg_t  *slurm_ctl_conf_ptr = NULL;
@@ -385,7 +384,7 @@ extern void create_config_popup(GtkAction *action, gpointer user_data)
 	g_signal_connect(G_OBJECT(popup), "response",
 			 G_CALLBACK(_delete_popup), NULL);
 
-	error_code = get_new_info_config(&slurm_ctl_conf_ptr);
+	(void) get_new_info_config(&slurm_ctl_conf_ptr);
 	_layout_conf_ctl(treestore, slurm_ctl_conf_ptr);
 
 	gtk_widget_show_all(popup);
@@ -483,6 +482,164 @@ extern void create_daemon_popup(GtkAction *action, gpointer user_data)
 	return;
 }
 
+extern void create_create_popup(GtkAction *action, gpointer user_data)
+{
+	GtkWidget *popup = gtk_dialog_new_with_buttons(
+		"Create",
+		GTK_WINDOW(user_data),
+		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
+		NULL);
+	int i, response = 0;
+	GtkWidget *label = NULL;
+	GtkWidget *entry = NULL;
+	GtkTreeModel *model = NULL;
+	GtkTreeIter iter;
+	const gchar *name = gtk_action_get_name(action);
+	sview_search_info_t sview_search_info;
+	job_desc_msg_t *job_msg = NULL;
+	submit_response_msg_t *slurm_alloc_msg = NULL;
+	update_part_msg_t *part_msg = NULL;
+	resv_desc_msg_t *resv_msg = NULL;
+	char *res_name, *temp;
+
+	sview_search_info.gchar_data = NULL;
+	sview_search_info.int_data = NO_VAL;
+	sview_search_info.int_data2 = NO_VAL;
+
+	label = gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_OK, GTK_RESPONSE_OK);
+	gtk_window_set_default(GTK_WINDOW(popup), label);
+	gtk_dialog_add_button(GTK_DIALOG(popup),
+			      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
+	gtk_window_set_default_size(GTK_WINDOW(popup), 400, 600);
+
+	if (!strcmp(name, "batch_job")) {
+		sview_search_info.search_type = CREATE_BATCH_JOB;
+		entry = create_entry();
+		label = gtk_label_new(
+			"Batch job submission specifications\n\n"
+			"Specify size (task and/or node count) plus the\n"
+			"script. All other fields are optional.\n\n"
+			"More fields will be made available later.");
+		job_msg = xmalloc(sizeof(job_desc_msg_t));
+		slurm_init_job_desc_msg(job_msg);
+		job_msg->group_id = getuid();
+		job_msg->user_id  = getgid();
+		job_msg->work_dir = xmalloc(1024);
+		if (!getcwd(job_msg->work_dir, 1024))
+			goto end_it;
+		entry = create_job_entry(job_msg, model, &iter);
+	} else if (!strcmp(name, "partition")) {
+		sview_search_info.search_type = CREATE_PARTITION;
+		entry = create_entry();
+		label = gtk_label_new(
+			"Partition creation specifications\n\n"
+			"Specify Name. All other fields are optional.");
+		part_msg = xmalloc(sizeof(update_part_msg_t));
+		slurm_init_part_desc_msg(part_msg);
+		entry = create_part_entry(part_msg, model, &iter);
+	} else if (!strcmp(name, "reservation")) {
+		sview_search_info.search_type = CREATE_RESERVATION;
+		label = gtk_label_new(
+			"Reservation creation specifications\n\n"
+			"Specify Time_Start and either Duration or Time_End.\n"
+#ifdef HAVE_BG
+			"Specify either Node_Count or BP_List.\n"
+#else
+			"Specify either Node_Count or Node_List.\n"
+#endif
+			"Specify either Accounts or Users.\n\n"
+			"Supported Flags include: Maintenance, Overlap,\n"
+			"Ignore_Jobs, Daily and Weekly.\n"
+			"All other fields are optional.");
+		resv_msg = xmalloc(sizeof(resv_desc_msg_t));
+		slurm_init_resv_desc_msg(resv_msg);
+		entry = create_resv_entry(resv_msg, model, &iter);
+	} else {
+		sview_search_info.search_type = 0;
+		goto end_it;
+	}
+
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+			   label, FALSE, FALSE, 0);
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+			   entry, TRUE, TRUE, 0);
+
+	gtk_widget_show_all(popup);
+	response = gtk_dialog_run (GTK_DIALOG(popup));
+
+	if (response == GTK_RESPONSE_OK) {
+		if (!sview_search_info.search_type)
+			goto end_it;
+
+		switch(sview_search_info.search_type) {
+		case CREATE_BATCH_JOB:
+			response = slurm_submit_batch_job(job_msg,
+							  &slurm_alloc_msg);
+			if (response == SLURM_SUCCESS) {
+				temp = g_strdup_printf(
+					"Job %u submitted",
+					slurm_alloc_msg->job_id);
+			} else {
+				temp = g_strdup_printf(
+					"Problem submitting job: %s",
+					slurm_strerror(slurm_get_errno()));
+			}
+			display_edit_note(temp);
+			g_free(temp);
+			break;
+		case CREATE_PARTITION:
+			response = slurm_create_partition(part_msg);
+			if (response == SLURM_SUCCESS) {
+				temp = g_strdup_printf("Partition %s created",
+						       part_msg->name);
+			} else {
+				temp = g_strdup_printf(
+					"Problem creating partition: %s",
+					slurm_strerror(slurm_get_errno()));
+			}
+			display_edit_note(temp);
+			g_free(temp);
+			break;
+		case CREATE_RESERVATION:
+			res_name = slurm_create_reservation(resv_msg);
+			if (res_name) {
+				temp = g_strdup_printf(
+					"Reservation %s created",
+					res_name);
+				free(res_name);
+			} else {
+				temp = g_strdup_printf(
+					"Problem creating reservation: %s",
+					slurm_strerror(slurm_get_errno()));
+			}
+			display_edit_note(temp);
+			g_free(temp);
+			break;
+		default:
+			break;
+		}
+	}
+
+end_it:
+	gtk_widget_destroy(popup);
+	if (slurm_alloc_msg)
+		slurm_free_submit_response_response_msg(slurm_alloc_msg);
+	if (job_msg) {
+		for (i = 0; i < job_msg->argc; i++)
+			xfree(job_msg->argv[i]);
+		xfree(job_msg->argv);
+		xfree(job_msg->name);
+		xfree(job_msg->script);
+		xfree(job_msg->work_dir);
+		xfree(job_msg);
+	}
+	xfree(part_msg);
+	if (resv_msg)
+		slurm_free_resv_desc_msg(resv_msg);
+	return;
+}
+
 extern void create_search_popup(GtkAction *action, gpointer user_data)
 {
 	GtkWidget *popup = gtk_dialog_new_with_buttons(
@@ -528,6 +685,7 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 			{G_TYPE_NONE, JOB_FAILED, "Failed", TRUE, -1},
 			{G_TYPE_NONE, JOB_TIMEOUT, "Timeout", TRUE, -1},
 			{G_TYPE_NONE, JOB_NODE_FAIL, "Node Failure", TRUE, -1},
+			{G_TYPE_NONE, JOB_PREEMPTED, "Preempted", TRUE, -1},
 			{G_TYPE_NONE, -1, NULL, FALSE, -1}
 		};
 
@@ -590,18 +748,17 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 		sview_search_info.search_type = SEARCH_BLOCK_SIZE;
 		entry = create_entry();
 		label = gtk_label_new("Which block size?");
-	} else if ((cluster_flags & CLUSTER_FLAG_BG)
-		   && !strcmp(name, "bg_block_state")) {
+	} else if (!strcmp(name, "bg_block_state")) {
 		display_data_t pulldown_display_data[] = {
-			{G_TYPE_NONE, RM_PARTITION_FREE, "Free", TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_CONFIGURING, "Configuring",
-			 TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_READY, "Ready", TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_BUSY, NULL, TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_REBOOTING, NULL, TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_DEALLOCATING,
-			 "Deallocating", TRUE, -1},
-			{G_TYPE_NONE, RM_PARTITION_ERROR, "Error", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_NAV, "Nav", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_FREE, "Free", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_BUSY, NULL, TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_BOOTING, "Booting", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_REBOOTING, NULL, TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_INITED, "Inited", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_ALLOCATED, NULL, TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_TERM, "Terminating", TRUE, -1},
+			{G_TYPE_NONE, BG_BLOCK_ERROR_FLAG, "Error", TRUE, -1},
 			{G_TYPE_NONE, -1, NULL, FALSE, -1}
 		};
 		display_data_t *display_data = pulldown_display_data;
@@ -610,16 +767,22 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 				break;
 			if (cluster_flags & CLUSTER_FLAG_BGL) {
 				switch(display_data->id) {
-				case RM_PARTITION_BUSY:
+				case BG_BLOCK_BUSY:
 					display_data->name = "Busy";
 					break;
 				}
-			} else {
+			} else if (cluster_flags & CLUSTER_FLAG_BGP){
 				switch(display_data->id) {
-				case RM_PARTITION_REBOOTING:
+				case BG_BLOCK_REBOOTING:
 					display_data->name = "Rebooting";
 					break;
 				}
+			} else {
+				switch(display_data->id) {
+				case BG_BLOCK_ALLOCATED:
+					display_data->name = "Allocated";
+					break;
+				}
 			}
 		}
 		sview_search_info.search_type = SEARCH_BLOCK_STATE;
@@ -887,7 +1050,6 @@ extern void about_popup(GtkAction *action, gpointer user_data)
 		GTK_WINDOW(user_data),
 		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
 		NULL);
-	int response = 0;
 	char *version = NULL;
 
 	version = xstrdup_printf("SLURM Version: %s", SLURM_VERSION_STRING);
@@ -911,7 +1073,57 @@ extern void about_popup(GtkAction *action, gpointer user_data)
 	gtk_table_attach_defaults(GTK_TABLE(table), label, 0, 1, 0, 1);
 
 	gtk_widget_show_all(popup);
-	response = gtk_dialog_run (GTK_DIALOG(popup));
+	(void) gtk_dialog_run (GTK_DIALOG(popup));
+
+	gtk_widget_destroy(popup);
+
+	return;
+}
+
+extern void usage_popup(GtkAction *action, gpointer user_data)
+{
+	GtkWidget *table = gtk_table_new(1, 1, FALSE);
+	GtkWidget *label = NULL;
+
+	GtkWidget *popup = gtk_dialog_new_with_buttons(
+		"Usage",
+		GTK_WINDOW(user_data),
+		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
+		NULL);
+	char *help_msg =
+		"sview can be used to view and modify many of SLURM's\n"
+		"records.\n\n"
+		"Tabs are used to select the data type to work with.\n"
+		"Right click on the tab to select it. Left click on\n"
+		"the tab to control the fields of the table to be\n"
+		"displayed. Those fields can then be re-ordered or used\n"
+		"for sorting the records.\n\n"
+		"Left click on a record to see the compute nodes\n"
+		"associated with it. Right click on a record to modify\n"
+		"it. The colored boxes represent compute nodes associated\n"
+		"with each job, partition, etc. and may also selected\n"
+		"with right and left buttons.\n\n"
+		"Select 'Option' then 'Admin mode' to enable editing\n"
+		"of the records.\n";
+
+	label = gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_OK, GTK_RESPONSE_OK);
+
+	gtk_window_set_default(GTK_WINDOW(popup), label);
+
+	gtk_window_set_default_size(GTK_WINDOW(popup), 200, 50);
+
+	label = gtk_label_new(help_msg);
+
+	gtk_container_set_border_width(GTK_CONTAINER(table), 10);
+
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox),
+			   table, FALSE, FALSE, 0);
+
+	gtk_table_attach_defaults(GTK_TABLE(table), label, 0, 1, 0, 1);
+
+	gtk_widget_show_all(popup);
+	(void) gtk_dialog_run (GTK_DIALOG(popup));
 
 	gtk_widget_destroy(popup);
 
diff --git a/src/sview/resv_info.c b/src/sview/resv_info.c
index 3187bacba..39772c11e 100644
--- a/src/sview/resv_info.c
+++ b/src/sview/resv_info.c
@@ -2,13 +2,13 @@
  *  resv_info.c - Functions related to advanced reservation display
  *  mode of sview.
  *****************************************************************************
- *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -92,7 +92,7 @@ static display_data_t display_data_resv[] = {
 #ifdef HAVE_BG
 	 "BP List",
 #else
-	 "NodeList",
+	 "Node List",
 #endif
 	 FALSE, EDIT_TEXTBOX, refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_TIME_START, "Time Start", FALSE, EDIT_TEXTBOX,
@@ -122,10 +122,45 @@ static display_data_t display_data_resv[] = {
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
+static display_data_t create_data_resv[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NAME,  "Name", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NODE_CNT,   "Node_Count", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NODELIST,
+#ifdef HAVE_BG
+	 "BP_List",
+#else
+	 "Node_List",
+#endif
+	 FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_TIME_START, "Time_Start",
+	 FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_TIME_END,   "Time_End", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_DURATION,   "Duration", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_ACCOUNTS,   "Accounts", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_USERS,      "Users", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_PARTITION,  "Partition", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_FEATURES,   "Features", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_FLAGS, "Flags", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
 static display_data_t options_data_resv[] = {
 	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
 	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, RESV_PAGE},
-	{G_TYPE_STRING, RESV_PAGE, "Remove", TRUE, ADMIN_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Remove Reservation", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, RESV_PAGE, "Edit Reservation", TRUE, ADMIN_PAGE},
 	{G_TYPE_STRING, JOB_PAGE, "Jobs", TRUE, RESV_PAGE},
 	{G_TYPE_STRING, PART_PAGE, "Partitions", TRUE, RESV_PAGE},
@@ -201,6 +236,13 @@ static uint32_t _parse_flags(const char *flagstr)
 				outflags |= RESERVE_FLAG_NO_WEEKLY;
 			else
 				outflags |= RESERVE_FLAG_WEEKLY;
+		} else if (strncasecmp(curr, "License_Only", MAX(taglen,1))
+			    == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_LIC_ONLY;
+			else
+				outflags |= RESERVE_FLAG_LIC_ONLY;
 		} else {
 			char *temp = g_strdup_printf("Error parsing flags %s.",
 						     flagstr);
@@ -250,7 +292,7 @@ static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 				 const char *new_text,
 				 int column)
 {
-	char *type = "";
+	char *type = "", *temp_str;
 	int temp_int = 0;
 	uint32_t f;
 
@@ -303,7 +345,11 @@ static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 		type = "name";
 		break;
 	case SORTID_NODE_CNT:
-		temp_int = strtol(new_text, (char **)NULL, 10);
+		temp_int = strtol(new_text, &temp_str, 10);
+		if ((temp_str[0] == 'k') || (temp_str[0] == 'k'))
+			temp_int *= 1024;
+		if ((temp_str[0] == 'm') || (temp_str[0] == 'm'))
+			temp_int *= (1024 * 1024);
 
 		type = "Node Count";
 		if (temp_int <= 0)
@@ -531,61 +577,47 @@ static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
 				GtkTreeStore *treestore,
 				GtkTreeIter *iter)
 {
-	char *tmp_ptr = NULL;
-	char tmp_char[50];
+	char tmp_duration[40], tmp_end[40], tmp_nodes[40], tmp_start[40];
+	char *tmp_flags;
 	reserve_info_t *resv_ptr = sview_resv_info_ptr->resv_ptr;
 
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR,
-			   sview_colors[sview_resv_info_ptr->color_inx], -1);
-	gtk_tree_store_set(treestore, iter, SORTID_COLOR_INX,
-			   sview_resv_info_ptr->color_inx, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_ACCOUNTS, resv_ptr->accounts, -1);
-
 	secs2time_str((uint32_t)difftime(resv_ptr->end_time,
 					 resv_ptr->start_time),
-		      tmp_char, sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_DURATION, tmp_char, -1);
-
-	slurm_make_time_str((time_t *)&resv_ptr->end_time, tmp_char,
-			    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_END, tmp_char, -1);
+		      tmp_duration, sizeof(tmp_duration));
 
-	gtk_tree_store_set(treestore, iter, SORTID_FEATURES,
-			   resv_ptr->features, -1);
+	slurm_make_time_str((time_t *)&resv_ptr->end_time, tmp_end,
+			    sizeof(tmp_end));
 
-	tmp_ptr = reservation_flags_string(resv_ptr->flags);
-	gtk_tree_store_set(treestore, iter, SORTID_FLAGS,
-			   tmp_ptr, -1);
-	xfree(tmp_ptr);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_LICENSES, resv_ptr->licenses, -1);
-
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, resv_ptr->name, -1);
+	tmp_flags = reservation_flags_string(resv_ptr->flags);
 
 	convert_num_unit((float)resv_ptr->node_cnt,
-			 tmp_char, sizeof(tmp_char), UNIT_NONE);
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODE_CNT, tmp_char, -1);
-
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODELIST, resv_ptr->node_list, -1);
+			 tmp_nodes, sizeof(tmp_nodes), UNIT_NONE);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_NODE_INX, resv_ptr->node_inx, -1);
+	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_start,
+			    sizeof(tmp_start));
 
+	/* Combining these records provides a slight performance improvement */
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_PARTITION, resv_ptr->partition, -1);
-
-	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_char,
-			    sizeof(tmp_char));
-	gtk_tree_store_set(treestore, iter, SORTID_TIME_START, tmp_char, -1);
+			   SORTID_ACCOUNTS,   resv_ptr->accounts,
+			   SORTID_COLOR,
+				sview_colors[sview_resv_info_ptr->color_inx],
+			   SORTID_COLOR_INX,  sview_resv_info_ptr->color_inx,
+			   SORTID_DURATION,   tmp_duration,
+			   SORTID_FEATURES,   resv_ptr->features,
+			   SORTID_FLAGS,      tmp_flags,
+			   SORTID_LICENSES,   resv_ptr->licenses,
+			   SORTID_NAME,       resv_ptr->name,
+			   SORTID_NODE_CNT,   tmp_nodes,
+			   SORTID_NODE_INX,   resv_ptr->node_inx,
+			   SORTID_NODELIST,   resv_ptr->node_list,
+			   SORTID_PARTITION,  resv_ptr->partition,
+			   SORTID_TIME_START, tmp_start,
+			   SORTID_TIME_END,   tmp_end,
+			   SORTID_UPDATED,    1,
+			   SORTID_USERS,      resv_ptr->users, 
+			   -1);
 
-	gtk_tree_store_set(treestore, iter,
-			   SORTID_USERS, resv_ptr->users, -1);
+	xfree(tmp_flags);
 
 	return;
 }
@@ -726,7 +758,7 @@ update_color:
 	return info_list;
 }
 
-void _display_info_resv(List info_list,	popup_info_t *popup_win)
+static void _display_info_resv(List info_list, popup_info_t *popup_win)
 {
 	specific_info_t *spec_info = popup_win->spec_info;
 	char *name = (char *)spec_info->search_info->gchar_data;
@@ -805,6 +837,50 @@ finished:
 	return;
 }
 
+extern GtkWidget *create_resv_entry(resv_desc_msg_t *resv_msg,
+				    GtkTreeModel *model, GtkTreeIter *iter)
+{
+	GtkScrolledWindow *window = create_scrolled_window();
+	GtkBin *bin = NULL;
+	GtkViewport *view = NULL;
+	GtkTable *table = NULL;
+	int i = 0, row = 0;
+	display_data_t *display_data = create_data_resv;
+
+	gtk_scrolled_window_set_policy(window,
+				       GTK_POLICY_NEVER,
+				       GTK_POLICY_AUTOMATIC);
+	bin = GTK_BIN(&window->container);
+	view = GTK_VIEWPORT(bin->child);
+	bin = GTK_BIN(&view->bin);
+	table = GTK_TABLE(bin->child);
+	gtk_table_resize(table, SORTID_CNT, 2);
+
+	gtk_table_set_homogeneous(table, FALSE);
+
+	for (i = 0; i < SORTID_CNT; i++) {
+		while (display_data++) {
+			if (display_data->id == -1)
+				break;
+			if (!display_data->name)
+				continue;
+			if (display_data->id != i)
+				continue;
+			display_admin_edit(
+				table, resv_msg, &row, model, iter,
+				display_data,
+				G_CALLBACK(_admin_edit_combo_box_resv),
+				G_CALLBACK(_admin_focus_out_resv),
+				_set_active_combo_resv);
+			break;
+		}
+		display_data = create_data_resv;
+	}
+	gtk_table_resize(table, row, 2);
+
+	return GTK_WIDGET(window);
+}
+
 extern void refresh_resv(GtkAction *action, gpointer user_data)
 {
 	popup_info_t *popup_win = (popup_info_t *)user_data;
@@ -878,7 +954,7 @@ extern GtkListStore *create_model_resv(int type)
 		gtk_list_store_append(model, &iter);
 		gtk_list_store_set(model, &iter,
 				   1, SORTID_ACTION,
-				   0, "Remove",
+				   0, "Remove Reservation",
 				   -1);
 		break;
 	default:
@@ -1065,11 +1141,6 @@ display_it:
 			       SORTID_NODE_INX, SORTID_COLOR_INX,
 			       grid_button_list);
 
-	if (working_sview_config.grid_speedup) {
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 0);
-		gtk_widget_set_sensitive(GTK_WIDGET(main_grid_table), 1);
-	}
-
 	if (view == ERROR_VIEW && display_widget) {
 		gtk_widget_destroy(display_widget);
 		display_widget = NULL;
@@ -1452,7 +1523,7 @@ static void _admin_resv(GtkTreeModel *model, GtkTreeIter *iter, char *type)
 
 	resv_msg->name = xstrdup(resvid);
 
-	if (!strcasecmp("Remove", type)) {
+	if (!strcasecmp("Remove Reservation", type)) {
 		resv_name_msg.name = resvid;
 
 		label = gtk_dialog_add_button(GTK_DIALOG(popup),
@@ -1545,7 +1616,7 @@ end_it:
 	return;
 }
 
-extern void cluster_change_resv()
+extern void cluster_change_resv(void)
 {
 	display_data_t *display_data = display_data_resv;
 	while (display_data++) {
diff --git a/src/sview/submit_info.c b/src/sview/submit_info.c
index 3fd0fa3fb..c0ef20503 100644
--- a/src/sview/submit_info.c
+++ b/src/sview/submit_info.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/src/sview/sview.c b/src/sview/sview.c
index dbe5d5927..d0f2aac43 100644
--- a/src/sview/sview.c
+++ b/src/sview/sview.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,7 @@
 
 #include "sview.h"
 
+#define _DEBUG 0
 #define MAX_RETRIES 3		/* g_thread_create retries */
 
 typedef struct {
@@ -86,6 +87,7 @@ switch_record_bitmaps_t *g_switch_nodes_maps = NULL;
 popup_pos_t popup_pos;
 
 block_info_msg_t *g_block_info_ptr = NULL;
+front_end_info_msg_t *g_front_end_info_ptr;
 job_info_msg_t *g_job_info_ptr = NULL;
 node_info_msg_t *g_node_info_ptr = NULL;
 partition_info_msg_t *g_part_info_ptr = NULL;
@@ -142,6 +144,10 @@ display_data_t main_display_data[] = {
 	 get_info_node, specific_info_node,
 	 set_menus_node, NULL},
 #endif
+	{G_TYPE_NONE, FRONT_END_PAGE, "Front End Nodes", FALSE, -1,
+	 refresh_main, create_model_front_end, admin_edit_front_end,
+	 get_info_front_end, specific_info_front_end,
+	 set_menus_front_end, NULL},
 	{G_TYPE_NONE, SUBMIT_PAGE, NULL, FALSE, -1,
 	 refresh_main, NULL, NULL, NULL,
 	 NULL, NULL, NULL},
@@ -167,9 +173,8 @@ void *_page_thr(void *arg)
 	display_data_t *display_data = &main_display_data[num];
 	static int thread_count = 0;
 	bool reset_highlight = true;
-	/* 	DEF_TIMERS; */
-	xfree(page);
 
+	xfree(page);
 	if (!grid_init) {
 		/* we need to signal any threads that are waiting */
 		g_mutex_lock(grid_mutex);
@@ -190,7 +195,10 @@ void *_page_thr(void *arg)
 	thread_count++;
 	g_static_mutex_unlock(&sview_mutex);
 	while (page_running == num) {
-/* 		START_TIMER; */
+#if _DEBUG
+		DEF_TIMERS;
+		START_TIMER;
+#endif
 //		g_static_mutex_lock(&sview_mutex);
 		gdk_threads_enter();
 		sview_init_grid(reset_highlight);
@@ -199,8 +207,10 @@ void *_page_thr(void *arg)
 		//gdk_flush();
 		gdk_threads_leave();
 //		g_static_mutex_unlock(&sview_mutex);
-/* 		END_TIMER; */
-/* 		g_print("got for initeration: %s\n", TIME_STR); */
+#if _DEBUG
+		END_TIMER;
+		g_print("got for iteration: %s\n", TIME_STR);
+#endif
 		sleep(working_sview_config.refresh_delay);
 		g_static_mutex_lock(&sview_mutex);
 		if (thread_count > 1) {
@@ -443,6 +453,7 @@ static void _set_ruled(GtkToggleAction *action)
 
 	/* get rid of each existing table */
 	cluster_change_block();
+	cluster_change_front_end();
 	cluster_change_resv();
 	cluster_change_part();
 	cluster_change_job();
@@ -488,6 +499,147 @@ static void _get_current_debug(GtkRadioAction *action)
 					     debug_level);
 }
 
+static void _get_current_debug_flags(GtkToggleAction *action)
+{
+	static uint32_t debug_flags = 0;
+	static slurm_ctl_conf_info_msg_t  *slurm_ctl_conf_ptr = NULL;
+	int err_code = get_new_info_config(&slurm_ctl_conf_ptr);
+	GtkAction *debug_action = NULL;
+	GtkToggleAction *toggle_action;
+	gboolean orig_state, new_state;
+
+	if (err_code != SLURM_ERROR)
+		debug_flags = slurm_ctl_conf_ptr->debug_flags;
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_backfill");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_BACKFILL;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_bg_algo");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_BG_ALGO;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_bg_algo_deep");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_BG_ALGO_DEEP;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_bg_pick");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_BG_PICK;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_bg_wires");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_BG_WIRES;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_cpu_bind");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_CPU_BIND;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_front_end");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_FRONT_END;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_gang");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_GANG;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_gres");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_GRES;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_no_conf_hash");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_NO_CONF_HASH;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_prio");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_PRIO;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_reservation");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_RESERVATION;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_select_type");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_SELECT_TYPE;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_steps");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_STEPS;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_triggers");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_TRIGGERS;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+
+	debug_action = gtk_action_group_get_action(menu_action_group,
+						  "flags_wiki");
+	toggle_action = GTK_TOGGLE_ACTION(debug_action);
+	orig_state = gtk_toggle_action_get_active(toggle_action);
+	new_state = debug_flags & DEBUG_FLAG_WIKI;
+	if (orig_state != new_state)
+		gtk_toggle_action_set_active(toggle_action, new_state);
+}
+
 static void _set_debug(GtkRadioAction *action,
 		       GtkRadioAction *extra,
 		       GtkNotebook *notebook)
@@ -513,6 +665,89 @@ static void _set_debug(GtkRadioAction *action,
 	g_free(temp);
 }
 
+static void _set_flags(GtkToggleAction *action, uint32_t flag)
+{
+	char *temp = NULL;
+	uint32_t debug_flags_plus = 0, debug_flags_minus = 0;
+
+	if (action && gtk_toggle_action_get_active(action))
+		debug_flags_plus  |= flag;
+	else
+		debug_flags_minus |= flag;
+
+	if (!slurm_set_debugflags(debug_flags_plus, debug_flags_minus))
+		temp = g_strdup_printf("Slurmctld DebugFlags reset");
+	else
+		temp = g_strdup_printf("Problem with set DebugFlags request");
+	display_edit_note(temp);
+	g_free(temp);
+}
+
+static void _set_flags_backfill(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_BACKFILL);
+}
+static void _set_flags_bg_algo(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_BG_ALGO);
+}
+static void _set_flags_bg_algo_deep(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_BG_ALGO_DEEP);
+}
+static void _set_flags_bg_pick(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_BG_PICK);
+}
+static void _set_flags_bg_wires(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_BG_WIRES);
+}
+static void _set_flags_cpu_bind(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_CPU_BIND);
+}
+static void _set_flags_front_end(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_FRONT_END);
+}
+static void _set_flags_gang(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_GANG);
+}
+static void _set_flags_gres(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_GRES);
+}
+static void _set_flags_no_conf_hash(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_NO_CONF_HASH);
+}
+static void _set_flags_prio(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_PRIO);
+}
+static void _set_flags_reservation(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_RESERVATION);
+}
+static void _set_flags_select_type(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_SELECT_TYPE);
+}
+static void _set_flags_steps(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_STEPS);
+}
+static void _set_flags_triggers(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_TRIGGERS);
+}
+static void _set_flags_wiki(GtkToggleAction *action)
+{
+	_set_flags(action, DEBUG_FLAG_WIKI);
+}
+
 static void _tab_pos(GtkRadioAction *action,
 		     GtkRadioAction *extra,
 		     GtkNotebook *notebook)
@@ -522,7 +757,7 @@ static void _tab_pos(GtkRadioAction *action,
 	gtk_notebook_set_tab_pos(notebook, working_sview_config.tab_pos);
 }
 
-static void _init_pages()
+static void _init_pages(void)
 {
 	int i;
 	for(i=0; i<PAGE_CNT; i++) {
@@ -532,7 +767,7 @@ static void _init_pages()
 	}
 }
 
-static void _persist_dynamics()
+static void _persist_dynamics(void)
 {
 
 	gint g_x;
@@ -553,8 +788,10 @@ static gboolean _delete(GtkWidget *widget,
 	_persist_dynamics();
 	fini = 1;
 	gtk_main_quit();
-	ba_fini();
 
+	select_g_ba_fini();
+
+#ifdef MEMORY_LEAK_DEBUG
 	if (popup_list)
 		list_destroy(popup_list);
 	if (grid_button_list)
@@ -566,6 +803,7 @@ static gboolean _delete(GtkWidget *widget,
 	if (cluster_list)
 		list_destroy(cluster_list);
 	xfree(orig_cluster_name);
+#endif
 	return FALSE;
 }
 
@@ -578,6 +816,11 @@ static char *_get_ui_description()
 		"<ui>"
 		"  <menubar name='main'>"
 		"    <menu action='actions'>"
+		"      <menu action='create'>"
+		"        <menuitem action='batch_job'/>"
+		"        <menuitem action='partition'/>"
+		"        <menuitem action='reservation'/>"
+		"      </menu>"
 		"      <menu action='search'>"
 		"        <menuitem action='jobid'/>"
 		"        <menuitem action='user_jobs'/>"
@@ -620,6 +863,24 @@ static char *_get_ui_description()
 		"        <menuitem action='debug_debug4'/>"
 		"        <menuitem action='debug_debug5'/>"
 		"      </menu>"
+		"      <menu action='debugflags'>"
+		"        <menuitem action='flags_backfill'/>"
+		"        <menuitem action='flags_bg_algo'/>"
+		"        <menuitem action='flags_bg_algo_deep'/>"
+		"        <menuitem action='flags_bg_pick'/>"
+		"        <menuitem action='flags_bg_wires'/>"
+		"        <menuitem action='flags_cpu_bind'/>"
+		"        <menuitem action='flags_front_end'/>"
+		"        <menuitem action='flags_gang'/>"
+		"        <menuitem action='flags_gres'/>"
+		"        <menuitem action='flags_no_conf_hash'/>"
+		"        <menuitem action='flags_prio'/>"
+		"        <menuitem action='flags_reservation'/>"
+		"        <menuitem action='flags_select_type'/>"
+		"        <menuitem action='flags_steps'/>"
+		"        <menuitem action='flags_triggers'/>"
+		"        <menuitem action='flags_wiki'/>"
+		"      </menu>"
 		"      <separator/>"
 		"      <menuitem action='exit'/>"
 		"    </menu>"
@@ -655,6 +916,7 @@ static char *_get_ui_description()
 		"    </menu>"
 		"    <menu action='help'>"
 		"      <menuitem action='about'/>"
+		"      <menuitem action='usage'/>"
 		/* "      <menuitem action='manual'/>" */
 		"    </menu>"
 		"  </menubar>"
@@ -673,6 +935,12 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		{"actions", NULL, "_Actions", "<alt>a"},
 		{"options", NULL, "_Options", "<alt>o"},
 		{"displays", NULL, "_Query", "<alt>q"},
+		{"batch_job", NULL, "Batch Job", "", "Submit batch job",
+		 G_CALLBACK(create_create_popup)},
+		{"partition", NULL, "Partition", "", "Create partition",
+		 G_CALLBACK(create_create_popup)},
+		{"reservation", NULL, "Reservation", "", "Create reservation",
+		 G_CALLBACK(create_create_popup)},
 		{"search", GTK_STOCK_FIND, "Search", ""},
 		{"jobid", NULL, "Job ID",
 		 "", "Search for jobid",
@@ -692,7 +960,8 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		{"reservation_name", NULL, "Reservation Name",
 		 "", "Search for reservation",
 		 G_CALLBACK(create_search_popup)},
-		{"tab_pos", NULL, "_Tab Pos"},
+		{"tab_pos", NULL, "_Tab Position"},
+		{"create", GTK_STOCK_ADD, "Create"},
 		{"interval", GTK_STOCK_REFRESH, "Set Refresh _Interval",
 		 "<control>i", "Change Refresh Interval",
 		 G_CALLBACK(change_refresh_popup)},
@@ -711,6 +980,8 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		{"help", NULL, "_Help", "<alt>h"},
 		{"about", GTK_STOCK_ABOUT, "Ab_out", "<control>o",
 		 "About", G_CALLBACK(about_popup)},
+		{"usage", GTK_STOCK_HELP, "Usage", "",
+		 "Usage", G_CALLBACK(usage_popup)},
 		//{"manual", GTK_STOCK_HELP, "_Manual", "<control>m"},
 		{"grid_specs", GTK_STOCK_EDIT, "Set Grid _Properties",
 		 "<control>p", "Change Grid Properties",
@@ -757,6 +1028,10 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		{"reconfig", GTK_STOCK_REDO, "SLUR_M Reconfigure",
 		 "<control>m", "Reconfigures System",
 		 G_CALLBACK(_reconfigure)},
+		{"debugflags", GTK_STOCK_DIALOG_WARNING,
+		 "Slurmctld DebugFlags",
+		 "", "Set slurmctld DebugFlags",
+		 G_CALLBACK(_get_current_debug_flags)},
 		{"debuglevel", GTK_STOCK_DIALOG_WARNING,
 		 "Slurmctld Debug Level",
 		 "", "Set slurmctld debug level",
@@ -814,15 +1089,48 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		{"debug_debug5", NULL, "debug5(9)", "", "Debug5 level", 9},
 	};
 
+	GtkToggleActionEntry debug_flags[] = {
+		{"flags_backfill", NULL, "Backfill", NULL,
+		 "Backfill", G_CALLBACK(_set_flags_backfill), FALSE},
+		{"flags_bg_algo", NULL, "BgBlockAlgo", NULL,
+		 "BgBlockAlgo", G_CALLBACK(_set_flags_bg_algo), FALSE},
+		{"flags_bg_algo_deep", NULL, "BgBlockAlgoDeep", NULL,
+		 "BgBlockAlgoDeep", G_CALLBACK(_set_flags_bg_algo_deep),FALSE},
+		{"flags_bg_pick", NULL, "BgBlockPick", NULL,
+		 "BgBlockPick", G_CALLBACK(_set_flags_bg_pick), FALSE},
+		{"flags_bg_wires", NULL, "BgBlockWires", NULL,
+		 "BgBlockWires", G_CALLBACK(_set_flags_bg_wires), FALSE},
+		{"flags_cpu_bind", NULL, "CPU Bind", NULL,
+		 "CPU_Bind", G_CALLBACK(_set_flags_cpu_bind), FALSE},
+		{"flags_front_end", NULL, "FrontEnd", NULL,
+		 "FrontEnd", G_CALLBACK(_set_flags_front_end), FALSE},
+		{"flags_gang", NULL, "Gang", NULL,
+		 "Gang", G_CALLBACK(_set_flags_gang), FALSE},
+		{"flags_gres", NULL, "Gres", NULL,
+		 "Gres", G_CALLBACK(_set_flags_gres), FALSE},
+		{"flags_no_conf_hash", NULL, "NO CONF HASH", NULL,
+		 "NO_CONF_HASH", G_CALLBACK(_set_flags_no_conf_hash), FALSE},
+		{"flags_prio", NULL, "Priority", NULL,
+		 "Priority", G_CALLBACK(_set_flags_prio), FALSE},
+		{"flags_reservation", NULL, "Reservation", NULL,
+		 "Reservation", G_CALLBACK(_set_flags_reservation), FALSE},
+		{"flags_select_type", NULL, "SelectType", NULL,
+		 "SelectType", G_CALLBACK(_set_flags_select_type), FALSE},
+		{"flags_steps", NULL, "Steps", NULL,
+		 "Steps", G_CALLBACK(_set_flags_steps), FALSE},
+		{"flags_triggers", NULL, "Triggers", NULL,
+		 "Triggers", G_CALLBACK(_set_flags_triggers), FALSE},
+		{"flags_wiki", NULL, "Wiki", NULL,
+		 "Wiki", G_CALLBACK(_set_flags_wiki), FALSE},
+	};
+
 	/* Make an accelerator group (shortcut keys) */
 	menu_action_group = gtk_action_group_new ("MenuActions");
 	gtk_action_group_add_actions(menu_action_group, entries,
 				     G_N_ELEMENTS(entries), window);
 
-	//if (cluster_flags & CLUSTER_FLAG_BG)
 	gtk_action_group_add_actions(menu_action_group, bg_entries,
 				     G_N_ELEMENTS(bg_entries), window);
-	//else
 	gtk_action_group_add_actions(menu_action_group, nonbg_entries,
 				     G_N_ELEMENTS(nonbg_entries),
 				     window);
@@ -831,6 +1139,8 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 					   G_N_ELEMENTS(radio_entries),
 					   working_sview_config.tab_pos,
 					   G_CALLBACK(_tab_pos), notebook);
+	gtk_action_group_add_toggle_actions(menu_action_group, debug_flags,
+					    G_N_ELEMENTS(debug_flags), NULL);
 	gtk_action_group_add_radio_actions(menu_action_group, debug_entries,
 					   G_N_ELEMENTS(debug_entries),
 					   -1, G_CALLBACK(_set_debug),
@@ -842,7 +1152,8 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 	gtk_action_group_add_actions(admin_action_group, admin_entries,
 				     G_N_ELEMENTS(admin_entries),
 				     window);
-	gtk_action_group_set_sensitive(admin_action_group, FALSE);
+	gtk_action_group_set_sensitive(admin_action_group,
+				       working_sview_config.admin_mode);
 
 	g_ui_manager = gtk_ui_manager_new();
 	gtk_ui_manager_insert_action_group(g_ui_manager, menu_action_group, 0);
@@ -988,6 +1299,8 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 	/* free old info under last cluster */
 	slurm_free_block_info_msg(g_block_info_ptr);
 	g_block_info_ptr = NULL;
+	slurm_free_front_end_info_msg(g_front_end_info_ptr);
+	g_front_end_info_ptr = NULL;
 	slurm_free_job_info_msg(g_job_info_ptr);
 	g_job_info_ptr = NULL;
 	slurm_free_node_info_msg(g_node_info_ptr);
@@ -1064,6 +1377,7 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 
 	/* make changes for each object */
 	cluster_change_block();
+	cluster_change_front_end();
 	cluster_change_resv();
 	cluster_change_part();
 	cluster_change_job();
@@ -1075,7 +1389,8 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 		grid_button_list = NULL;
 		got_grid = 1;
 	}
-	ba_fini();
+
+	select_g_ba_fini();
 
 	/* sorry popups can't survive a cluster change */
 	if (popup_list)
@@ -1315,7 +1630,9 @@ int main(int argc, char *argv[])
 	GtkBin *bin = NULL;
 	GtkViewport *view = NULL;
 	int i=0;
+	log_options_t lopts = LOG_OPTS_STDERR_ONLY;
 
+	log_init(argv[0], lopts, SYSLOG_FACILITY_USER, NULL);
 	load_defaults();
 	cluster_flags = slurmdb_setup_cluster_flags();
 	cluster_dims = slurmdb_setup_cluster_dims();
diff --git a/src/sview/sview.h b/src/sview/sview.h
index fcaf05539..976f5a716 100644
--- a/src/sview/sview.h
+++ b/src/sview/sview.h
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -50,15 +50,15 @@
 #  include "src/common/getopt.h"
 #endif
 
-#include <stdlib.h>
-#include <pwd.h>
 #include <ctype.h>
+#include <pwd.h>
+#include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 #include <unistd.h>
 
-#include <slurm/slurmdb.h>
-
 #if defined(HAVE_AIX)
 /* AIX defines a func_data macro which conflicts with func_data
  * variable names in the gtk.h headers */
@@ -68,21 +68,21 @@
 #  include <gtk/gtk.h>
 #endif
 
+#include "slurm/slurmdb.h"
+
 #include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
-#include "src/common/uid.h"
+#include "src/common/parse_time.h"
 #include "src/common/slurmdb_defs.h"
-#include "src/plugins/select/bluegene/block_allocator/block_allocator.h"
-#include "src/plugins/select/bluegene/plugin/bluegene.h"
-//#include "src/plugins/select/bluegene/wrap_rm_api.h"
-
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
 
-#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#include "src/plugins/select/bluegene/bg_enums.h"
 
 /* getopt_long options, integers but not characters */
 #define OPT_LONG_HELP	0x100
@@ -108,6 +108,7 @@ enum { JOB_PAGE,
        RESV_PAGE,
        BLOCK_PAGE,
        NODE_PAGE,
+       FRONT_END_PAGE,
        SUBMIT_PAGE,
        ADMIN_PAGE,
        INFO_PAGE,
@@ -148,7 +149,11 @@ typedef struct specific_info specific_info_t;
 typedef struct popup_info popup_info_t;
 typedef struct popup_positioner popup_positioner_t;
 
-typedef enum { SEARCH_JOB_ID = 1,
+typedef enum {
+	       CREATE_BATCH_JOB = 1,
+	       CREATE_PARTITION,
+	       CREATE_RESERVATION,
+	       SEARCH_JOB_ID = 10,
 	       SEARCH_JOB_USER,
 	       SEARCH_JOB_STATE,
 	       SEARCH_BLOCK_NAME,
@@ -190,12 +195,13 @@ typedef struct {
 	GtkToggleAction *action_gridtopo;
 	GtkToggleAction *action_ruled;
 	GtkRadioAction *action_tab;
+	uint16_t button_size;
+	uint16_t gap_size;
 	bool admin_mode;
 	uint16_t default_page;
 	uint32_t fi_popup_width;
 	uint32_t fi_popup_height;
 	uint32_t grid_hori;
-	bool grid_speedup;
 	bool grid_topological;
 	uint32_t grid_vert;
 	uint32_t grid_x_width;
@@ -303,6 +309,8 @@ typedef struct {
 	char *boot_time;
 	char *reason;
 	char *slurmd_start_time;
+	bool iter_set;
+	GtkTreeIter iter_ptr;
 } sview_node_info_t;
 
 typedef struct {
@@ -314,7 +322,6 @@ extern sview_config_t default_sview_config;
 extern sview_config_t working_sview_config;
 
 extern int fini;
-extern ba_system_t *ba_system_ptr;
 extern bool toggled;
 extern bool force_refresh;
 extern bool apply_hidden_change;
@@ -346,6 +353,7 @@ extern uint32_t cluster_flags;
 extern int cluster_dims;
 extern List cluster_list;
 extern block_info_msg_t *g_block_info_ptr;
+extern front_end_info_msg_t *g_front_end_info_ptr;
 extern job_info_msg_t *g_job_info_ptr;
 extern node_info_msg_t *g_node_info_ptr;
 extern partition_info_msg_t *g_part_info_ptr;
@@ -362,8 +370,6 @@ extern int set_grid(int start, int end, int count);
 extern int set_grid_bg(int *start, int *end, int count, int set);
 extern void print_grid(int dir);
 
-extern void print_date();
-
 //sview.c
 extern void refresh_main(GtkAction *action, gpointer user_data);
 extern void toggle_tab_visiblity(GtkToggleButton *toggle_button,
@@ -375,12 +381,14 @@ extern void close_tab(GtkWidget *widget, GdkEventButton *event,
 
 //popups.c
 extern void create_config_popup(GtkAction *action, gpointer user_data);
+extern void create_create_popup(GtkAction *action, gpointer user_data);
 extern void create_dbconfig_popup(GtkAction *action, gpointer user_data);
 extern void create_daemon_popup(GtkAction *action, gpointer user_data);
 extern void create_search_popup(GtkAction *action, gpointer user_data);
 extern void change_refresh_popup(GtkAction *action, gpointer user_data);
 extern void change_grid_popup(GtkAction *action, gpointer user_data);
 extern void about_popup(GtkAction *action, gpointer user_data);
+extern void usage_popup(GtkAction *action, gpointer user_data);
 
 //grid.c
 extern void destroy_grid_button(void *arg);
@@ -390,6 +398,10 @@ extern grid_button_t *create_grid_button_from_another(
 extern char *change_grid_color(List button_list, int start, int end,
 			       int color_inx, bool change_unused,
 			       enum node_states state_override);
+extern void change_grid_color_array(List button_list, int array_len,
+				    int *color_inx, bool *color_set_flag,
+				    bool only_change_unused,
+				    enum node_states state_override);
 extern void highlight_grid(GtkTreeView *tree_view,
 			   int node_inx_id, int color_inx_id, List button_list);
 extern void highlight_grid_range(int start, int end, List button_list);
@@ -410,6 +422,8 @@ extern void setup_popup_grid_list(popup_info_t *popup_win);
 extern void post_setup_popup_grid_list(popup_info_t *popup_win);
 
 // part_info.c
+extern GtkWidget *create_part_entry(update_part_msg_t *part_msg,
+				    GtkTreeModel *model, GtkTreeIter *iter);
 extern bool visible_part(char* part_name);
 extern bool check_part_includes_node(int node_dx);
 extern void refresh_part(GtkAction *action, gpointer user_data);
@@ -427,7 +441,7 @@ extern void select_admin_partitions(GtkTreeModel *model, GtkTreeIter *iter,
 				    display_data_t *display_data,
 				    GtkTreeView *treeview);
 extern void admin_part(GtkTreeModel *model, GtkTreeIter *iter, char *type);
-extern void cluster_change_part();
+extern void cluster_change_part(void);
 
 // accnt_info.c
 extern void refresh_accnt(GtkAction *action, gpointer user_data);
@@ -455,9 +469,28 @@ extern void popup_all_block(GtkTreeModel *model, GtkTreeIter *iter, int id);
 extern void select_admin_block(GtkTreeModel *model, GtkTreeIter *iter,
 			       display_data_t *display_data,
 			       GtkTreeView *treeview);
-extern void cluster_change_block();
+extern void cluster_change_block(void);
+
+// front_end_info.c
+extern void admin_edit_front_end(GtkCellRendererText *cell,
+				 const char *path_string,
+				 const char *new_text, gpointer data);
+extern void cluster_change_front_end(void);
+extern GtkListStore *create_model_front_end(int type);
+extern void get_info_front_end(GtkTable *table, display_data_t *display_data);
+extern int  get_new_info_front_end(front_end_info_msg_t **info_ptr, int force);
+extern void popup_all_front_end(GtkTreeModel *model, GtkTreeIter *iter, int id);
+extern void refresh_front_end(GtkAction *action, gpointer user_data);
+extern void select_admin_front_end(GtkTreeModel *model, GtkTreeIter *iter,
+				  display_data_t *display_data,
+				  GtkTreeView *treeview);
+extern void set_menus_front_end(void *arg, void *arg2, GtkTreePath *path,
+				int type);
+extern void specific_info_front_end(popup_info_t *popup_win);
 
 // job_info.c
+extern GtkWidget *create_job_entry(job_desc_msg_t *job_msg,
+				   GtkTreeModel *model, GtkTreeIter *iter);
 extern void refresh_job(GtkAction *action, gpointer user_data);
 extern GtkListStore *create_model_job(int type);
 extern void admin_edit_job(GtkCellRendererText *cell,
@@ -473,7 +506,7 @@ extern void set_menus_job(void *arg, void *arg2, GtkTreePath *path, int type);
 extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id);
 extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter, char *type,
 		      GtkTreeView *treeview);
-extern void cluster_change_job();
+extern void cluster_change_job(void);
 
 // node_info.c
 extern void refresh_node(GtkAction *action, gpointer user_data);
@@ -501,9 +534,11 @@ extern void popup_all_node_name(char *name, int id);
 extern void admin_menu_node_name(char *name, GdkEventButton *event);
 extern void admin_node(GtkTreeModel *model, GtkTreeIter *iter, char *type);
 extern void admin_node_name(char *name, char *old_value, char *type);
-extern void cluster_change_node();
+extern void cluster_change_node(void);
 
 // resv_info.c
+extern GtkWidget *create_resv_entry(resv_desc_msg_t *resv_msg,
+				    GtkTreeModel *model, GtkTreeIter *iter);
 extern void refresh_resv(GtkAction *action, gpointer user_data);
 extern GtkListStore *create_model_resv(int type);
 extern void admin_edit_resv(GtkCellRendererText *cell,
@@ -518,7 +553,7 @@ extern void popup_all_resv(GtkTreeModel *model, GtkTreeIter *iter, int id);
 extern void select_admin_resv(GtkTreeModel *model, GtkTreeIter *iter,
 			      display_data_t *display_data,
 			      GtkTreeView *treeview);
-extern void cluster_change_resv();
+extern void cluster_change_resv(void);
 
 
 // submit_info.c
@@ -546,8 +581,8 @@ extern void make_fields_menu(popup_info_t *popup_win, GtkMenu *menu,
 			     display_data_t *display_data, int count);
 extern void make_options_menu(GtkTreeView *tree_view, GtkTreePath *path,
 			      GtkMenu *menu, display_data_t *display_data);
-extern GtkScrolledWindow *create_scrolled_window();
-extern GtkWidget *create_entry();
+extern GtkScrolledWindow *create_scrolled_window(void);
+extern GtkWidget *create_entry(void);
 extern void create_page(GtkNotebook *notebook, display_data_t *display_data);
 extern GtkTreeView *create_treeview(display_data_t *local, List *button_list);
 extern GtkTreeView *create_treeview_2cols_attach_to_table(GtkTable *table);
@@ -588,13 +623,13 @@ extern void destroy_popup_info(void *arg);
 extern void destroy_signal_params(void *arg);
 
 extern gboolean delete_popup(GtkWidget *widget, GtkWidget *event, char *title);
-extern gboolean delete_popups();
+extern gboolean delete_popups(void);
 extern void *popup_thr(popup_info_t *popup_win);
 extern void remove_old(GtkTreeModel *model, int updated);
 extern GtkWidget *create_pulldown_combo(display_data_t *display_data,
 					int count);
 extern char *str_tolower(char *upper_str);
-extern char *get_reason();
+extern char *get_reason(void);
 extern void display_admin_edit(GtkTable *table, void *type_msg, int *row,
 			       GtkTreeModel *model, GtkTreeIter *iter,
 			       display_data_t *display_data,
@@ -625,9 +660,9 @@ extern char *visible_to_str(sview_config_t *sview_config);
 extern gboolean entry_changed(GtkWidget *widget, void *msg);
 
 // defaults.c
-extern int load_defaults();
+extern int load_defaults(void);
 extern int save_defaults(bool final_save);
 extern GtkListStore *create_model_defaults(int type);
-extern int configure_defaults();
+extern int configure_defaults(void);
 
 #endif
diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in
index 4feae32f1..d6f9445d7 100644
--- a/testsuite/Makefile.in
+++ b/testsuite/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -138,7 +140,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -175,6 +180,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -232,6 +238,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -267,6 +274,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am
index 6bbc48ef0..2b40d4a70 100644
--- a/testsuite/expect/Makefile.am
+++ b/testsuite/expect/Makefile.am
@@ -111,6 +111,7 @@ EXTRA_DIST = \
 	test2.12			\
 	test2.13			\
 	test2.14			\
+	test2.15			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -123,6 +124,7 @@ EXTRA_DIST = \
 	test3.9				\
 	test3.10			\
 	test3.11			\
+	test3.12			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -200,6 +202,11 @@ EXTRA_DIST = \
 	test8.7.crypto.c		\
 	test8.7.prog.c			\
 	test8.8				\
+	test8.20			\
+	test8.21			\
+	test8.21.bash			\
+	test8.22			\
+	test8.23			\
 	test9.1				\
 	test9.2				\
 	test9.3				\
@@ -351,6 +358,8 @@ EXTRA_DIST = \
 	test24.1.prog.c			\
 	test24.2			\
 	test25.1			\
+	test26.1			\
+	test26.2			\
 	usleep
 
 distclean-local:
diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in
index f7601d865..bb98c8481 100644
--- a/testsuite/expect/Makefile.in
+++ b/testsuite/expect/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -91,7 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -128,6 +133,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -185,6 +191,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -220,6 +227,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
@@ -383,6 +391,7 @@ EXTRA_DIST = \
 	test2.12			\
 	test2.13			\
 	test2.14			\
+	test2.15			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -395,6 +404,7 @@ EXTRA_DIST = \
 	test3.9				\
 	test3.10			\
 	test3.11			\
+	test3.12			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -472,6 +482,11 @@ EXTRA_DIST = \
 	test8.7.crypto.c		\
 	test8.7.prog.c			\
 	test8.8				\
+	test8.20			\
+	test8.21			\
+	test8.21.bash			\
+	test8.22			\
+	test8.23			\
 	test9.1				\
 	test9.2				\
 	test9.3				\
@@ -623,6 +638,8 @@ EXTRA_DIST = \
 	test24.1.prog.c			\
 	test24.2			\
 	test25.1			\
+	test26.1			\
+	test26.2			\
 	usleep
 
 all: all-am
diff --git a/testsuite/expect/README b/testsuite/expect/README
index 788606a21..0e20828eb 100644
--- a/testsuite/expect/README
+++ b/testsuite/expect/README
@@ -1,13 +1,14 @@
 ############################################################################
 # Copyright (C) 2002-2007 The Regents of the University of California.
-# Copyright (C) 2008-2010 Lawrence Livermore National Security.
+# Copyright (C) 2008-2011 Lawrence Livermore National Security.
+# Copyright (C) 2010-2011 SchedMD LLC
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # Additionals by Joseph Donaghy <donaghy1@llnl.gov>
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -201,7 +202,7 @@ test2.2    Validate scontrol help command.
 test2.3    Validate scontrol ping command.
 test2.4    Validate scontrol exit, quit, and !! commands.
 test2.5    Validate scontrol show commands for configuation, daemons,
-           nodes, and partitions.
+           nodes, frontends, and partitions.
 test2.6    Validate scontrol verbose and quite options.
 test2.7    Validate scontrol pidinfo command.
 test2.8    Validate scontrol show commands for jobs and steps.
@@ -211,6 +212,7 @@ test2.11   Validate scontrol listpids command.
 test2.12   Validate scontrol show topology option.
 test2.13   Validate scontrol update command for job steps.
 test2.14   Validate scontrol update size of running job.
+test2.15   Validate scontrol update size of running job with some running tasks.
 
 
 test3.#    Testing of scontrol options (best run as SlurmUser or root).
@@ -226,6 +228,7 @@ test3.8    Test of batch job requeue.
 test3.9    Test of "scontrol show slurmd"
 test3.10   Test of "scontrol notify <jobid> <message>"
 test3.11   Validate scontrol create, delete, and update of reservations.
+test3.12   Validate scontrol update command for front end nodes.
 UNTESTED   "scontrol abort"    would stop slurm
 UNTESTED   "scontrol shutdown" would stop slurm
 
@@ -325,6 +328,15 @@ test8.5    Test creation of all blocks 1 midplane and smaller.
 test8.6    Stress test Dynamic mode block creation.
 test8.7    Test of Blue Gene scheduling with sched/wik2 plugin.
 test8.8    Test result of marking smaller blocks in an error state.
+**NOTE**   The following tests are specificallly for Bluegene/Q systems
+test8.20   Bluegene/Q only: Test that job step allocations are a valid size
+           and within the job's allocation
+test8.21   Bluegene/Q only: Test that multple job step allocations are
+           properly packed within the job's allocation
+test8.22   Bluegene/Q only: Stress test of running many job step allocations
+           within the job's allocation
+test8.23   Bluegene/Q only: Test that multple jobs allocations are properly
+           packed within a midplane
 
 
 test9.#    System stress testing. Exercises all commands and daemons.
@@ -570,3 +582,8 @@ test24.2   sshare h, n, p, P, v, and V options.
 test25.#   Testing of sprio command and options.
 =================================================
 test25.1   sprio all options
+
+test26.#   Test of Cray specific functionality.
+================================================
+test26.1   Validate scontrol update command for nodes is disabled.
+test26.2   Test of srun/aprun wrapper use of --alps= option
diff --git a/testsuite/expect/globals b/testsuite/expect/globals
index 264ef42e5..219ba5127 100755
--- a/testsuite/expect/globals
+++ b/testsuite/expect/globals
@@ -10,6 +10,12 @@
 # set slurm_dir "/usr/local"
 # set mpicc     "/usr/local/bin/mpicc"
 #
+# If you want to have more than one test going at the same time for multiple
+# installs you can have multiple globals.local files and set the
+# SLURM_LOCAL_GLOBALS_FILE env var, and have that set to the correct
+# globals.local file for your various installs.  The file can be named anything,
+# not just globals.local.
+#
 ############################################################################
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Copyright (C) 2008-2010 Lawrence Livermore National Security.
@@ -19,7 +25,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -47,8 +53,14 @@ proc cset {name value} {
 	}
 }
 
-if [file exists globals.local] {
-	source globals.local
+cset local_globals_file "./globals.local"
+
+if {[info exists env(SLURM_LOCAL_GLOBALS_FILE)]} {
+	set local_globals_file $env(SLURM_LOCAL_GLOBALS_FILE)
+}
+
+if [file exists $local_globals_file] {
+	source $local_globals_file
 }
 
 #
@@ -100,9 +112,6 @@ cset poe	"/usr/bin/poe"
 cset mpirun	"mpirun"
 cset totalviewcli	"/usr/local/bin/totalviewcli"
 
-# Set if using "--enable_front_end" configuration option
-cset enable_front_end 0
-
 # Set if using "--enable-memory-leak-debug" configuration option
 cset enable_memory_leak_debug 0
 
@@ -128,7 +137,11 @@ cset bin_env	"env"
 cset bin_file	"file"
 cset bin_id	"id"
 cset bin_grep    "grep"
-#cset bin_hostname "hostname" #Don't user $bin_hostname use $bin_printenv SLURMD_NODENAME
+
+# Don't user $bin_hostname unless on a front-end system that
+# doesn't fully use the slurmd, use $bin_printenv SLURMD_NODENAME
+cset bin_hostname "hostname"
+
 cset bin_kill	"kill"
 cset bin_make	"make"
 cset bin_od     "od"
@@ -136,6 +149,7 @@ cset bin_pkill	"pkill"
 cset bin_ps	"ps"
 cset bin_pwd	"pwd"
 cset bin_rm	"rm"
+cset bin_sed	"sed"
 cset bin_sleep  "sleep"
 cset bin_sort   "sort"
 cset bin_touch  "touch"
@@ -177,13 +191,16 @@ cset sleep_error_message "(invalid time interval)|(bad character in argument)"
 set alpha                "\[a-zA-Z\]+"
 set alpha_cap            "\[A-Z\]+"
 set alpha_numeric        "\[a-zA-Z0-9\]+"
+set alpha_numeric_comma  "\[a-zA-Z0-9_,\-\]+"
 set alpha_numeric_under  "\[a-zA-Z0-9_\-\]+"
 set alpha_under          "\[A-Z_\]+"
+set digit                "\[0-9\]"
 set end_of_line          "\[\r\n\]"
 set number               "\[0-9\]+"
+set number_with_suffix   "\[0-9\]+\[KM\]*"
 set float                "\[0-9\]+\\.?\[0-9\]+"
 set whitespace		 "\[ \t\n\r\f\v\]+"
-set alpha_numeric_nodelist "$alpha_numeric_under\\\[?\[$alpha_numeric_under\]?\\\]?"
+set alpha_numeric_nodelist "$alpha_numeric_under\\\[?\[$alpha_numeric_comma\]?\\\]?"
 #
 # Cache SlurmUser to check for SuperUser requests
 #
@@ -597,6 +614,9 @@ proc wait_for_step { step_id } {
 		if {[regexp {Nodes=} $line foo] == 1} {
 			return 0
 		}
+		if {[regexp {BP_List=} $line foo] == 1} {
+			return 0
+		}
 		if { $my_delay > $max_job_state_delay } {
 			send_user "FAILURE: Timeout waiting for job step\n"
 			return 1
@@ -1029,8 +1049,7 @@ proc get_default_acct { user } {
 #
 # Purpose: Determine if the execution host is one in which the
 # slurmd daemon executes on a front-end node rather than the
-# compute hosts (e.g. Blue Gene systems). This is based upon
-# the value of SelectType in the slurm.conf.
+# compute hosts (e.g. Blue Gene systems).
 #
 # Returns 1 if the system uses a front-end, 0 otherwise
 #
@@ -1039,19 +1058,15 @@ proc get_default_acct { user } {
 proc test_front_end { } {
 	global enable_front_end scontrol
 
-	if {$enable_front_end != 0} {
-		return 1
-	}
-
 	log_user 0
 	set front_end 0
-	spawn $scontrol show config
+	spawn $scontrol show frontend
 	expect {
-		"HAVE_XCPU" {
+		"FrontendName=" {
 			set front_end 1
 			exp_continue
 		}
-		"select/bluegene" {
+		"select/cray" {
 			set front_end 1
 			exp_continue
 		}
@@ -1153,6 +1168,67 @@ proc test_bluegene { } {
 	return $bluegene
 }
 
+################################################################
+#
+# Proc: test_cray
+#
+# Purpose: Determine if the system is a cray system
+#
+# Returns 1 if the system is a cray, 0 otherwise
+#
+################################################################
+
+proc test_cray { } {
+	global scontrol bin_bash bin_grep
+
+	log_user 0
+	set cray 0
+	spawn -noecho $bin_bash -c "exec $scontrol show config | $bin_grep SelectType"
+	expect {
+		"select/cray" {
+			set cray 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+
+	return $cray
+}
+
+################################################################
+#
+# Proc: test_emulated
+#
+# Purpose: Determine if the system is emulated (not running on
+#          actual Cray or Bluegene hardware
+#
+# Returns 1 if the system is emulated otherwise
+#
+################################################################
+
+proc test_emulated { } {
+	global scontrol bin_bash
+
+	log_user 0
+	set emulated 0
+	spawn -noecho $bin_bash -c "exec $scontrol show config"
+	expect {
+		"Emulated * = yes" {
+			set emulated 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+
+	return $emulated
+}
+
 ################################################################
 #
 # Proc: get_cycle_count
@@ -1165,7 +1241,7 @@ proc test_bluegene { } {
 ################################################################
 
 proc get_cycle_count { } {
-	global enable_memory_leak_debug test_bluegene
+	global enable_memory_leak_debug
 
 	if {$enable_memory_leak_debug != 0} {
 		return 2
@@ -1498,7 +1574,16 @@ proc get_suffix { hostname } {
 		send_user "\nHostname lacks a suffix:$hostname\n"
 		return "-1"
 	}
-	return [string range $hostname $host_inx $host_len]
+
+#	Strip off leading zeros to avoid doing octal arithmetic
+	set suffix [string range $hostname $host_inx $host_len]
+	set suffix_len [string length $suffix]
+	for {set suffix_inx 0} {$suffix_inx < [expr $suffix_len - 1]} {incr suffix_inx} {
+		set suffix_char [string index $suffix $suffix_inx]
+		if {[string compare $suffix_char "0"] != 0} { break }
+	}
+
+	return [string range $suffix $suffix_inx $suffix_len]
 }
 
 ################################################################
@@ -1752,7 +1837,7 @@ proc get_bluegene_psets { } {
 	set psets 0
 	set scon_pid [spawn -noecho $scontrol show config]
 	expect {
-		-re "Numpsets *= ($number)" {
+		-re "IONodesPerMP *= ($number)" {
 			set psets $expect_out(1,string)
 			exp_continue
 		}
@@ -1845,7 +1930,7 @@ proc get_bluegene_procs_per_cnode { } {
 
 ################################################################
 #
-# Proc: get_bluegene_cnodes_per_np
+# Proc: get_bluegene_cnodes_per_mp
 #
 # Purpose: Determine how many cnodes are in a midplane
 #
@@ -1860,7 +1945,7 @@ proc get_bluegene_cnodes_per_mp { } {
 	set node_cnt 1
 	set scon_pid [spawn -noecho $scontrol show config]
 	expect {
-		-re "BasePartitionNodeCnt *= ($number)" {
+		-re "MidPlaneNodeCnt *= ($number)" {
 			set node_cnt $expect_out(1,string)
 			exp_continue
 		}
diff --git a/testsuite/expect/globals_accounting b/testsuite/expect/globals_accounting
index 3808e5de3..1025e95e9 100644
--- a/testsuite/expect/globals_accounting
+++ b/testsuite/expect/globals_accounting
@@ -20,7 +20,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/pkill b/testsuite/expect/pkill
index d9843aa11..22759da76 100755
--- a/testsuite/expect/pkill
+++ b/testsuite/expect/pkill
@@ -10,7 +10,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/regression b/testsuite/expect/regression
index 430e4d605..986d1be1d 100755
--- a/testsuite/expect/regression
+++ b/testsuite/expect/regression
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/regression.py b/testsuite/expect/regression.py
index a40cba62c..82b18d86e 100755
--- a/testsuite/expect/regression.py
+++ b/testsuite/expect/regression.py
@@ -6,7 +6,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.1 b/testsuite/expect/test1.1
index 377bf5ee4..f1048728f 100755
--- a/testsuite/expect/test1.1
+++ b/testsuite/expect/test1.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.10 b/testsuite/expect/test1.10
index e429ca15b..fab793a2e 100755
--- a/testsuite/expect/test1.10
+++ b/testsuite/expect/test1.10
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,11 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that will bogus executable
 # Debug debug level is 2, value set with --debug has that offset
diff --git a/testsuite/expect/test1.11 b/testsuite/expect/test1.11
index 89e05d7bc..a97192b00 100755
--- a/testsuite/expect/test1.11
+++ b/testsuite/expect/test1.11
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -58,6 +58,11 @@ set job_id         0
 
 set srun_pid [spawn $srun --verbose --job-name=$job_name -t1 $bin_id]
 expect {
+	# needed for cray systems
+	-re "Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
 	-re "jobid ($number):" {
 		set job_id $expect_out(1,string)
 		exp_continue
diff --git a/testsuite/expect/test1.12 b/testsuite/expect/test1.12
index 014dc9b38..f4d9db3b2 100755
--- a/testsuite/expect/test1.12
+++ b/testsuite/expect/test1.12
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,12 @@ set ckpt_out             -1
 
 print_header $test_id
 
+# if the srun --checkpoint option worked this test would work.
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that print it's info from scontrol
 #
diff --git a/testsuite/expect/test1.13 b/testsuite/expect/test1.13
index e8546e8e0..b8eb0cbf8 100755
--- a/testsuite/expect/test1.13
+++ b/testsuite/expect/test1.13
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -77,6 +77,11 @@ expect {
 		incr matches
 		exp_continue
 	}
+	# needed for cray systems
+	-re "Granted job allocation ($number)" {
+		incr matches
+		exp_continue
+	}
 	-re "tasks started" {
 		incr matches
 		exp_continue
diff --git a/testsuite/expect/test1.14 b/testsuite/expect/test1.14
index fa8aa9450..151e90b09 100755
--- a/testsuite/expect/test1.14
+++ b/testsuite/expect/test1.14
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,10 +39,11 @@ set file_out        "test$test_id.output"
 set job_id           0
 set sleep_secs       10
 
+
 print_header $test_id
 
-if {[test_bluegene]} {
-        send_user "\nWARNING: This test is incompatible with bluegene systems\n"
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
         exit $exit_code
 }
 
diff --git a/testsuite/expect/test1.15 b/testsuite/expect/test1.15
index e73204620..54461c98e 100755
--- a/testsuite/expect/test1.15
+++ b/testsuite/expect/test1.15
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,15 @@ set matches     0
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over input script
 # Build input script file
diff --git a/testsuite/expect/test1.16 b/testsuite/expect/test1.16
index 0069b72d6..d24b671b7 100755
--- a/testsuite/expect/test1.16
+++ b/testsuite/expect/test1.16
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.17 b/testsuite/expect/test1.17
index 92176b238..4a6e58286 100755
--- a/testsuite/expect/test1.17
+++ b/testsuite/expect/test1.17
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.18 b/testsuite/expect/test1.18
index fd267fcd3..02be33df0 100755
--- a/testsuite/expect/test1.18
+++ b/testsuite/expect/test1.18
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.19 b/testsuite/expect/test1.19
index 7fb44c1ba..a78410d1b 100755
--- a/testsuite/expect/test1.19
+++ b/testsuite/expect/test1.19
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,10 +42,20 @@ set file_out_n       "test$test_id.n.%n.output"
 set file_out_s       "test$test_id.s.%s.output"
 set file_out_t       "test$test_id.t.%t.output"
 set job_id           0
-set task_cnt         5
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+if {[test_bluegene]} {
+	# We never launch more than one task on emulated Bluegene
+	set task_cnt 1
+} else {
+	set task_cnt 5
+}
+
 #
 # Spawn a program that generates "task_id" (%t) in stdout file names
 # and confirm they are created
diff --git a/testsuite/expect/test1.2 b/testsuite/expect/test1.2
index 534318031..cce46c6f4 100755
--- a/testsuite/expect/test1.2
+++ b/testsuite/expect/test1.2
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -35,16 +35,29 @@ source ./globals
 
 set test_id     "1.2"
 set exit_code   0
-set task_cnt    10
 set tasks       0
 
 print_header $test_id
 
+# if the srun --overcommit option worked this test would work.
+if {[test_bluegene]} {
+	# We never launch more than one task on emulated Bluegene
+	set task_cnt 1
+} else {
+	set task_cnt 10
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks
 #
 set timeout $max_job_delay
+set srun_pid [spawn $srun -N1 -n$task_cnt -t1 $bin_id]
 set srun_pid [spawn $srun -N1 -n$task_cnt --overcommit -l -t1 $bin_id]
+
 expect {
 	-re "uid=" {
 		incr tasks
@@ -62,7 +75,7 @@ expect {
 
 if {$task_cnt != $tasks} {
 	send_user "\nFAILURE: Did not get proper number of tasks: "
-	send_user "$task_cnt, $tasks\n"
+	send_user "$task_cnt != $tasks\n"
 	set exit_code 1
 }
 if {$exit_code == 0} {
diff --git a/testsuite/expect/test1.20 b/testsuite/expect/test1.20
index 5ce335a23..37e04136d 100755
--- a/testsuite/expect/test1.20
+++ b/testsuite/expect/test1.20
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.21 b/testsuite/expect/test1.21
index e9ff518aa..631aad8bf 100755
--- a/testsuite/expect/test1.21
+++ b/testsuite/expect/test1.21
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,15 @@ set task_id          3
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+
 #
 # Spawn a shell via srun with stdout forwarding disabled
 #
@@ -81,7 +90,8 @@ if {$matches != 1} {
 	set exit_code 1
 }
 
-if {[test_bluegene]} {
+if {[test_front_end]} {
+        send_user "\nWARNING: Additional portions of this test are incompatible with front-end systems\n"
 	if {$exit_code == 0} {
 		send_user "\nSUCCESS\n"
 	}
diff --git a/testsuite/expect/test1.22 b/testsuite/expect/test1.22
index 061bb4417..10203bfea 100755
--- a/testsuite/expect/test1.22
+++ b/testsuite/expect/test1.22
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,15 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a job with one thread fanout
 #
diff --git a/testsuite/expect/test1.23 b/testsuite/expect/test1.23
index 5cd1e162f..cd3d05b92 100755
--- a/testsuite/expect/test1.23
+++ b/testsuite/expect/test1.23
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -48,10 +48,10 @@ if {[test_wiki_sched] == 1} {
 #
 # Submit a job with invalid mincpus requirement
 #
-set err_msg     0
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --mincpus=999999 -t1 $bin_printenv SLURMD_NODENAME]
+set err_msg	0
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --mincpus=999999 -t1 $bin_date]
 expect {
 	-re "configuration is not available" {
 		send_user "This error is expected, no worries\n"
@@ -59,7 +59,7 @@ expect {
 		exp_continue
 	}
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -72,7 +72,7 @@ expect {
 	}
 }
 
-if {[string compare $host_0 ""] != 0} {
+if {[string compare $this_date ""] != 0} {
 	send_user "\nFAILURE: job ran with invalid mincpus option\n"
 	set exit_code   1
 }
@@ -84,12 +84,17 @@ if {$err_msg != 1} {
 #
 # Submit a job with valid mincpus requirement
 #
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --mincpus=1 -t1 $bin_printenv SLURMD_NODENAME]
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --mincpus=1 -t1 $bin_date]
 expect {
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
+		exp_continue
+	}
+	#needed for cray systems that don't have a prefix
+	-re "($alpha_numeric_under)" {
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -102,7 +107,7 @@ expect {
 	}
 }
 
-if {[string compare $host_0 ""] == 0} {
+if {[string compare $this_date ""] == 0} {
 	send_user "\nFAILURE: job failed with valid mincpus option\n"
 	set exit_code   1
 }
@@ -110,10 +115,10 @@ if {[string compare $host_0 ""] == 0} {
 #
 # Submit a job with invalid mem requirement
 #
-set err_msg     0
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --mem=999999 -t1 $bin_printenv SLURMD_NODENAME]
+set err_msg	0
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --mem=999999 -t1 $bin_date]
 expect {
 	-re "not available" {
 		send_user "This error is expected, no worries\n"
@@ -121,7 +126,7 @@ expect {
 		exp_continue
 	}
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -134,7 +139,7 @@ expect {
 	}
 }
 
-if {[string compare $host_0 ""] != 0} {
+if {[string compare $this_date ""] != 0} {
 	send_user "\nFAILURE: job ran with invalid mem option\n"
 	set exit_code   1
 }
@@ -150,10 +155,10 @@ if {$err_msg != 1} {
 # so we just use a small value that should be valid if the node
 # is properly configured.
 #
-set err_msg     0
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --mem=10 -t1 $bin_printenv SLURMD_NODENAME]
+set err_msg	0
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --mem=10 -t1 $bin_date]
 expect {
 	-re "not available" {
 		send_user "This error is not unexpected, no worries\n"
@@ -161,7 +166,7 @@ expect {
 		exp_continue
 	}
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -174,7 +179,7 @@ expect {
 	}
 }
 
-if {$err_msg == 0 && [string compare $host_0 ""] == 0} {
+if {$err_msg == 0 && [string compare $this_date ""] == 0} {
 	send_user "\nFAILURE: job failed with valid mem option\n"
 	set exit_code   1
 }
@@ -182,10 +187,10 @@ if {$err_msg == 0 && [string compare $host_0 ""] == 0} {
 #
 # Submit a job with invalid tmp requirement
 #
-set err_msg     0
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --tmp=999999999 -t1 $bin_printenv SLURMD_NODENAME]
+set err_msg	0
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --tmp=999999999 -t1 $bin_date]
 expect {
 	-re "configuration is not available" {
 		send_user "This error is expected, no worries\n"
@@ -193,7 +198,7 @@ expect {
 		exp_continue
 	}
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -206,7 +211,7 @@ expect {
 	}
 }
 
-if {[string compare $host_0 ""] != 0} {
+if {[string compare $this_date ""] != 0} {
 	send_user "\nFAILURE: job ran with invalid tmp option\n"
 	set exit_code   1
 }
@@ -218,12 +223,17 @@ if {$err_msg != 1} {
 #
 # Submit a job with valid tmp requirement
 #
-set host_0      ""
-set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --tmp=0 -t1 $bin_printenv SLURMD_NODENAME]
+set this_date	""
+set timeout	$max_job_delay
+set srun_pid [spawn $srun -N1 -l --tmp=0 -t1 $bin_date]
 expect {
 	-re "0: ($alpha_numeric_under)" {
-		set host_0 $expect_out(1,string)
+		set this_date $expect_out(1,string)
+		exp_continue
+	}
+	#needed for cray systems that don't have a prefix
+	-re "($alpha_numeric_under)" {
+		set this_date $expect_out(1,string)
 		exp_continue
 	}
 	timeout {
@@ -236,7 +246,7 @@ expect {
 	}
 }
 
-if {[string compare $host_0 ""] == 0} {
+if {[string compare $this_date ""] == 0} {
 	send_user "\nFAILURE: job failed with valid tmp option\n"
 	set exit_code   1
 }
diff --git a/testsuite/expect/test1.24 b/testsuite/expect/test1.24
index 6b33af2ba..34373ed0c 100755
--- a/testsuite/expect/test1.24
+++ b/testsuite/expect/test1.24
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.25 b/testsuite/expect/test1.25
index 0bb46c8a7..b6ef79ea7 100755
--- a/testsuite/expect/test1.25
+++ b/testsuite/expect/test1.25
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -47,12 +47,10 @@ set host_0      ""
 set timeout [expr $max_job_delay + $sleep_time]
 if { [test_bluegene] } {
 	set node_cnt 32-32k
+} elseif { [test_xcpu] } {
+	set node_cnt 1-1
 } else {
-	if { [test_xcpu] } {
-		set node_cnt 1-1
-	} else {
-		set node_cnt 1-64
-	}
+	set node_cnt 1-64
 }
 
 set srun_pid [spawn $srun -N$node_cnt --no-kill -t1 $bin_sleep $sleep_time]
diff --git a/testsuite/expect/test1.26 b/testsuite/expect/test1.26
index e2bb45228..12b0e162a 100755
--- a/testsuite/expect/test1.26
+++ b/testsuite/expect/test1.26
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -46,9 +46,9 @@ if {[is_super_user] == 0} {
 	exit 0
 }
 
-if {[test_bluegene] == 1} {
-	send_user "\nWARNING: This test is incompatible with BlueGene systems\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 #
@@ -93,8 +93,11 @@ if {[string compare $host_0 ""] == 0} {
 	exit 1
 }
 if {[string compare $nodelist_name ""] == 0} {
-    send_user "\nFAILURE: Did not get nodelist_name of task 0\n"
-    exit 1
+	send_user "\nFAILURE: Did not get nodelist_name of task 0\n"
+	exit 1
+}
+if {[string compare $host_0 $nodelist_name] != 0} {
+	send_user "\nWARNING: hostname inconsistency\n"
 }
 set include_node $host_0
 
@@ -160,7 +163,7 @@ for {set inx 0} {$inx < $iterations} {incr inx} {
 	exec $bin_usleep 250000
 
 	set failures 0
-	set srun_pid [spawn $srun -N1 --nodelist=$include_node -t1 -l $bin_printenv SLURMD_NODENAME]
+	set srun_pid [spawn $srun -N1 --nodelist=$nodelist_name -t1 -l $bin_printenv SLURMD_NODENAME]
 	set   alloc $spawn_id
 
 	set srun_pid1 [spawn $srun -N1 --nodelist=$include_node -Z $bin_usleep 500000]
@@ -219,12 +222,8 @@ for {set inx 0} {$inx < $iterations} {incr inx} {
 	expect {
 		-i $alloc
 		-re "Invalid node name specified" {
-			if {$front_end == 0} {
-				send_user "\nFAILURE: some error happened\n"
-				set failures 1
-			} else {
-				send_user "\nExpected error on front-end systems\n"
-			}
+			send_user "\nFAILURE: some error happened\n"
+			set failures 1
 			exp_continue
 		}
 		-re "error:.*configuring interconnect" {
diff --git a/testsuite/expect/test1.27 b/testsuite/expect/test1.27
index e7003cecd..6479df7ab 100755
--- a/testsuite/expect/test1.27
+++ b/testsuite/expect/test1.27
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -66,7 +66,7 @@ array set good_vars {
 # Spawn a job via srun to print environment variables
 #
 set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -n1 -t1 $bin_env]
+set srun_pid [spawn $srun -N1 -n1 --cpus-per-task=1 -t1 $bin_env]
 expect {
 	-re "(SLURM_$alpha_under)=($alpha_numeric_under)" {
 		set found_vars($expect_out(1,string)) "$expect_out(2,string)"
@@ -94,10 +94,15 @@ foreach {slurm_var check_flag} [array get good_vars] {
 	    incr good
 	}
     } else {
-	send_user "FAILURE: Variable $slurm_var not found\n"
+	send_user "\nFAILURE: Variable $slurm_var not found\n"
     }
 }
 
+if {[test_cray]} {
+	send_user "\nWARNING: For Cray system, reducing environment variables to verify from $total to \n"
+	set total 5
+}
+
 if {$good < $total} {
 	send_user "\nFAILURE: Only $good of $total SLURM environment variables set\n"
 	set exit_code 1
diff --git a/testsuite/expect/test1.28 b/testsuite/expect/test1.28
index ea99c4b91..0565c4886 100755
--- a/testsuite/expect/test1.28
+++ b/testsuite/expect/test1.28
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.29 b/testsuite/expect/test1.29
index 383106c85..35075a716 100755
--- a/testsuite/expect/test1.29
+++ b/testsuite/expect/test1.29
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.29.prog.c b/testsuite/expect/test1.29.prog.c
index 228b1143c..0c0631657 100644
--- a/testsuite/expect/test1.29.prog.c
+++ b/testsuite/expect/test1.29.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.3 b/testsuite/expect/test1.3
index 841a051a9..4fb390a6a 100755
--- a/testsuite/expect/test1.3
+++ b/testsuite/expect/test1.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.30 b/testsuite/expect/test1.30
index 6602437d3..2ec5f590b 100755
--- a/testsuite/expect/test1.30
+++ b/testsuite/expect/test1.30
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.31 b/testsuite/expect/test1.31
index 487ea79b2..68f229007 100755
--- a/testsuite/expect/test1.31
+++ b/testsuite/expect/test1.31
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,11 @@ set env_name_nnodes  "SLURM_NNODES"
 if { [test_bluegene] } {
 	set min_nodes        1
 	set max_nodes        1024
-	set env_valu_nprocs  5
+	if { [test_emulated] } {
+		set env_valu_nprocs  1
+	} else {
+		set env_valu_nprocs  32
+	}
 } else {
 	set min_nodes        1
 	set max_nodes        2
@@ -64,6 +68,12 @@ set env($env_name_debug) $env_valu_debug
 
 print_header $test_id
 
+# if the srun -o option worked this test would work.
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Set target environment variables
 #
@@ -136,19 +146,22 @@ expect {
 
 if {$task_cnt != $env_valu_nprocs} {
 	send_user "\nFAILURE: did not process"
-	send_user " $env_name_nprocs environment variable\n"
+	send_user " $env_name_nprocs environment variable"
+	send_user " ($task_cnt != $env_valu_nprocs)\n"
 	set exit_code 1
 }
 # Note zero origin conversion
 incr max_node_val
 if {$max_node_val < $min_nodes} {
 	send_user "\nFAILURE: did not process"
-	send_user " $env_name_nnodes environment variable\n"
+	send_user " $env_name_nnodes environment variable"
+	send_user " ($max_node_val < $min_nodes)\n"
 	set exit_code 1
 }
 if {$max_node_val > $max_nodes} {
 	send_user "\nFAILURE: did not process"
-	send_user " $env_name_nnodes environment variable\n"
+	send_user " $env_name_nnodes environment variable"
+	send_user " ($max_node_val > $max_nodes)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test1.32 b/testsuite/expect/test1.32
index c845bf91c..36a6c036a 100755
--- a/testsuite/expect/test1.32
+++ b/testsuite/expect/test1.32
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -45,12 +45,15 @@ set usr2cnt     0
 
 print_header $test_id
 
+if { [test_cray] } {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit $exit_code
 }
 
-
 #
 # Delete left-over program and rebuild it
 #
diff --git a/testsuite/expect/test1.32.prog.c b/testsuite/expect/test1.32.prog.c
index 6cb2efe0b..762051980 100644
--- a/testsuite/expect/test1.32.prog.c
+++ b/testsuite/expect/test1.32.prog.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.33 b/testsuite/expect/test1.33
index 2d60e7e52..4d238044a 100755
--- a/testsuite/expect/test1.33
+++ b/testsuite/expect/test1.33
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -36,8 +36,18 @@ set test_id        "1.33"
 set exit_code      0
 set exit_script    "./test$test_id.exit.bash"
 set test_script    "./test$test_id.bash"
+
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over scripts and build new ones
 #
diff --git a/testsuite/expect/test1.34 b/testsuite/expect/test1.34
index bd45d6446..d3b497d92 100755
--- a/testsuite/expect/test1.34
+++ b/testsuite/expect/test1.34
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.34.prog.c b/testsuite/expect/test1.34.prog.c
index 981b6cc36..7ccf2770b 100644
--- a/testsuite/expect/test1.34.prog.c
+++ b/testsuite/expect/test1.34.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.35 b/testsuite/expect/test1.35
index dfb2935c7..d9a8253fa 100755
--- a/testsuite/expect/test1.35
+++ b/testsuite/expect/test1.35
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -43,16 +43,45 @@ set steps_started        30
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
+log_user 0
+set job_mem_opt  "--comment=no_mem"
+set step_mem_opt "--comment=no_mem"
+spawn -noecho $bin_bash -c "exec $scontrol show config | $bin_grep SelectTypeParameters"
+expect {
+	-re "MEMORY" {
+		set job_mem_opt  "--mem-per-cpu=32"
+		set step_mem_opt "--mem-per-cpu=1"
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+
 #
 # Delete left-over input script plus stdout/err files
 # Build input script file
 # Sleep between initiation of job steps to avoid overwhelming slurmd
 #
+# NOTE: Explicity set a small memory limit. Without explicitly setting the step
+#   memory limit, it will use the system default (same as the job) which may
+#   prevent the level of parallelism desired.
+#
 exec $bin_rm -f $file_in $file_out $file_err
 make_bash_script $file_in "
   for ((i = 0; i < $steps_started; i++)); do
     j=`expr $steps_started + 15 - \$i`
-    $srun $bin_sleep \$j &
+    $srun -N1 -n1 $step_mem_opt $bin_sleep \$j &
     $bin_sleep 1
   done
   $bin_sleep 2
@@ -64,19 +93,18 @@ make_bash_script $file_in "
 # Spawn a srun batch job that uses stdout/err and confirm their contents
 #
 set timeout $max_job_delay
-
-if { [test_bluegene] } {
-	set node_cnt 32-2048
+if { [test_xcpu] } {
+	set node_cnt 1-1
 } else {
-	if { [test_xcpu] } {
-		set node_cnt 1-1
-	} else {
-		set node_cnt 1-4
-	}
+	set node_cnt 1-4
 }
 
-set srun_pid [spawn $sbatch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in]
+set srun_pid [spawn $sbatch -N$node_cnt --output=$file_out --error=$file_err -t1 $job_mem_opt $file_in]
 expect {
+	-re "Requested node configuration is not available" {
+		send_user "\nFAILURE: Memory is allocated, but your nodes don't have even 32MB configured\n"
+		set exit_code 1
+	}
 	-re "Submitted batch job ($number)" {
 		set job_id $expect_out(1,string)
 		exp_continue
diff --git a/testsuite/expect/test1.36 b/testsuite/expect/test1.36
index 97374aca6..216a65e4b 100755
--- a/testsuite/expect/test1.36
+++ b/testsuite/expect/test1.36
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,21 +42,27 @@ if {[test_aix]} {
 } else {
     set task_cnt 4
 }
+
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Spawn srun with $task_cnt tasks each of which runs a $mult way /bin/id
 #
 set timeout $max_job_delay
 
-if { [test_bluegene] } {
-	set node_cnt 1-2048
+if { [test_xcpu] } {
+	set node_cnt 1-1
 } else {
-	if { [test_xcpu] } {
-		set node_cnt 1-1
-	} else {
-		set node_cnt 1-4
-	}
+	set node_cnt 1-4
 }
 
 set srun_pid [spawn $srun -N$node_cnt -n$task_cnt -O -t1 $srun -l -n$mult -O $bin_id]
diff --git a/testsuite/expect/test1.37 b/testsuite/expect/test1.37
index e54af8b34..2d7c004d7 100755
--- a/testsuite/expect/test1.37
+++ b/testsuite/expect/test1.37
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,9 +37,9 @@ set exit_code    0
 
 print_header $test_id
 
-if { [test_bluegene] } {
-	send_user "\nWARNING: This test is incompatible with bluegene systems\n"
-	exit $exit_code
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 #
diff --git a/testsuite/expect/test1.38 b/testsuite/expect/test1.38
index 427a9c5be..4eb743615 100755
--- a/testsuite/expect/test1.38
+++ b/testsuite/expect/test1.38
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,12 @@ if { [test_xcpu] } {
 	exit 0
 }
 
+# This test just uses options not applicable to cray's aprun
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Build input script file
 #
diff --git a/testsuite/expect/test1.4 b/testsuite/expect/test1.4
index 10a12c7b2..2bf1d4e7a 100755
--- a/testsuite/expect/test1.4
+++ b/testsuite/expect/test1.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,7 @@ print_header $test_id
 #
 set srun_pid [spawn $srun --usage]
 expect {
-	-re "Usage: .* executable .*" {
+	-re "srun .* executable .*" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.40 b/testsuite/expect/test1.40
index 4353ed23f..a58938be5 100755
--- a/testsuite/expect/test1.40
+++ b/testsuite/expect/test1.40
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -81,17 +81,45 @@ if {$job_id1 == 0} {
 	exit 1
 }
 
+#
+# Confirm account info within first job based SLURM_ACCOUNT
+#
+set match_acct 0
+spawn $scontrol show job $job_id1
+expect {
+	-re "Account=my_acct" {
+		set match_acct 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$match_acct == 0} {
+	send_user "\nFAILURE: Account information not processed\n"
+	set exit_code 1
+}
 #
 # Spawn an srun job that should get account from SLURM_ACCOUNT env var
 #
+make_bash_script $file_in "$bin_env | $bin_grep SLURM_ACCOUNT"
 set match_acct  0
-set srun_pid [spawn $srun -v $scontrol show job $job_id1]
+set srun_pid [spawn $srun -v $file_in]
 expect {
+	# needed for cray systems
+	-re "Granted job allocation ($number)" {
+		set job_id2 $expect_out(1,string)
+		exp_continue
+	}
 	-re "launching ($number).0" {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-	-re "Account=my_acct" {
+	-re "SLURM_ACCOUNT=QA_ACCT" {
 		set match_acct 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.41 b/testsuite/expect/test1.41
index 314195904..e8d6862ee 100755
--- a/testsuite/expect/test1.41
+++ b/testsuite/expect/test1.41
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,11 @@ set timed_out   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks
 #
diff --git a/testsuite/expect/test1.42 b/testsuite/expect/test1.42
index 3286c3816..e8546c679 100755
--- a/testsuite/expect/test1.42
+++ b/testsuite/expect/test1.42
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -33,23 +33,30 @@
 source ./globals
 
 set test_id     "1.42"
-set file_in     "test$test_id.input"
+set file_in1    "test$test_id.input1"
 set exit_code   0
 set job_id1     0
 set job_id2     0
 
 print_header $test_id
 
+# if scontrol could be ran from the compute nodes this would work as well
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
-# Build input script file
+# Build input script files
 #
-make_bash_script $file_in "$bin_sleep 30"
+exec $bin_rm -f $file_in1
+make_bash_script $file_in1 "$bin_sleep 10"
 
 #
-# Spawn an sbatch job that just sleeps for a while
+# Spawn a batch job that just sleeps for a while
 #
 set timeout $max_job_delay
-set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in]
+set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in1]
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id1 $expect_out(1,string)
@@ -72,15 +79,11 @@ if {$job_id1 == 0} {
 #
 # Submit a dependent job
 #
-set match_state 0
-set srun_pid [spawn $srun -v --dependency=afterany:$job_id1 $scontrol show job $job_id1]
+set match_job_state 0
+set srun_pid [spawn $srun --dependency=afterany:$job_id1 $scontrol show job $job_id1]
 expect {
-	-re "launching ($number).0" {
-		set job_id2 $expect_out(1,string)
-		exp_continue
-	}
 	-re "JobState=COMPLETED|COMPLETING" {
-		set match_state 1
+		set match_job_state 1
 		exp_continue
 	}
 	timeout {
@@ -92,37 +95,13 @@ expect {
 		wait
 	}
 }
-if {$match_state == 0} {
+if {$match_job_state == 0} {
 	send_user "\nFAILURE: Dependent job not completed\n"
 	set exit_code 1
 }
 
-#
-# Confirm dependency info within second job
-#
-set match_jobid 0
-spawn $scontrol show job $job_id2
-expect {
-	-re "Dependency=afterany:($number)" {
-		set match_jobid $expect_out(1,string)
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: scontrol not responding\n"
-		set exit_code 1
-	}
-	eof {
-		wait
-	}
-}
-if {$match_jobid != $job_id1} {
-	send_user "\nFAILURE: Dependency information not processed\n"
-	set exit_code 1
-}
-
-
 if {$exit_code == 0} {
-	exec $bin_rm -f $file_in
+	exec $bin_rm -f $file_in1
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test1.43 b/testsuite/expect/test1.43
index c46b2d09f..cbd3e691d 100755
--- a/testsuite/expect/test1.43
+++ b/testsuite/expect/test1.43
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -38,6 +38,11 @@ set jobs_run    0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Spawn a srun jobs with ever larger node counts
 #
diff --git a/testsuite/expect/test1.44 b/testsuite/expect/test1.44
index 7e46b6e90..1c93962ec 100755
--- a/testsuite/expect/test1.44
+++ b/testsuite/expect/test1.44
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -98,11 +98,16 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} {
 		exec $bin_sleep 1
 		set stdout_lines [get_line_cnt $file_out]
 	}
+	if {$stdout_lines != $stdout_target && [test_cray]} {
+		# The ALPS/Cray system is going to always throw an extra
+		# line on no matter what.
+		set stdout_target [expr $stdin_lines * $task_cnt + 1]
+	}
 	if {$stdout_lines != $stdout_target} {
 		if {$stdout_lines == 0} {
 			send_user "\nFAILURE: stdout is empty, is current working directory writable from compute nodes?\n"
 		} else {
-			send_user "\nFAILURE:stdout is incomplete\n"
+			send_user "\nFAILURE:stdout is incomplete ($stdout_lines != $stdout_target)\n"
 		}
 	        set exit_code 1
 	        break
diff --git a/testsuite/expect/test1.45 b/testsuite/expect/test1.45
index f763e5eea..208355e79 100755
--- a/testsuite/expect/test1.45
+++ b/testsuite/expect/test1.45
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -51,6 +51,11 @@ set num_procs_test3  ""
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit 0
@@ -139,8 +144,12 @@ if {$num_procs_test1 != $num_procs_test2} {
 }
 
 if {$num_nodes_test3 != 1} {
-	send_user "\nFAILURE: SLURM_NNODES should be 1 ($num_nodes_test3 != 1)\n"
-	set exit_code 1
+	if { [test_bluegene] } {
+		send_user "\nWARNING: Can not confirm SLURM_NNODES value, likely expanded to smallest block size\n"
+	} else {
+		send_user "\nFAILURE: SLURM_NNODES should be 1 ($num_nodes_test3 != 1)\n"
+		set exit_code 1
+	}
 }
 
 if {$num_procs_test3 != 1} {
diff --git a/testsuite/expect/test1.46 b/testsuite/expect/test1.46
index af0b6d0c6..53e775ab0 100755
--- a/testsuite/expect/test1.46
+++ b/testsuite/expect/test1.46
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,15 @@ set matches     0
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	send_user "\nWARNING: This test is incompatible with Bluegene systems\n"
+	exit $exit_code
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over input script
 # Build input script file
diff --git a/testsuite/expect/test1.47 b/testsuite/expect/test1.47
index 5d2537cfe..ffc03b721 100755
--- a/testsuite/expect/test1.47
+++ b/testsuite/expect/test1.47
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,12 @@ set job_name	"JOB.$test_id"
 
 print_header $test_id
 
+# if scontrol could be ran from the compute nodes this would work as well
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Build input script file
 #
diff --git a/testsuite/expect/test1.48 b/testsuite/expect/test1.48
index 0ccb69a08..d77cd1161 100755
--- a/testsuite/expect/test1.48
+++ b/testsuite/expect/test1.48
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.49 b/testsuite/expect/test1.49
index f2501f985..95f1b8bfd 100755
--- a/testsuite/expect/test1.49
+++ b/testsuite/expect/test1.49
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -35,7 +35,6 @@ source ./globals
 set test_id              "1.49"
 set exit_code            0
 set cwd                  "[$bin_pwd]"
-set tasks                4
 set file_in		 "$cwd/test$test_id.in"
 set task_prolog          "$cwd/test$test_id.prolog"
 set task_epilog          "$cwd/test$test_id.epilog"
@@ -44,6 +43,11 @@ set file_out_post        "$cwd/test$test_id.output_post"
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over scripts and rebuild,
 # The sleep command just forces slurmd to kill the user's (long running) epilog
@@ -79,8 +83,14 @@ make_bash_script $file_in "
 #
 set matches 0
 set timeout [expr $max_job_delay + 30]
+set tasks   4
 if { [test_bluegene] } {
-	set node_cnt 1-1024
+	if { [test_emulated] } {
+		set node_cnt 1
+		set tasks    1
+	} else {
+		set node_cnt 1-1024
+	}
 } else {
 	set node_cnt 1-1
 }
diff --git a/testsuite/expect/test1.5 b/testsuite/expect/test1.5
index a97132c4d..d47356a13 100755
--- a/testsuite/expect/test1.5
+++ b/testsuite/expect/test1.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -43,7 +43,7 @@ print_header $test_id
 #
 set srun_pid [spawn $srun --help]
 expect {
-	-re "Usage:" {
+	-re "srun .* executable" {
 		incr matches
 		exp_continue
 	}
@@ -65,8 +65,14 @@ expect {
 	}
 }
 
-if {$matches != 3} {
-	send_user "\nFAILURE: srun failed to report help message\n"
+set target_matches 3
+if {$matches < $target_matches && [test_cray]} {
+#	NOTE: On emulated Cray system, the matches should be 3.
+#	On a real ALPS/BASIL Cray system, the matches should be 1.
+	set target_matches 1
+}
+if {$matches < $target_matches} {
+	send_user "\nFAILURE: srun failed to report help message ($matches of $target_matches)\n"
 	set exit_code 1
 }
 if {$exit_code == 0} {
diff --git a/testsuite/expect/test1.50 b/testsuite/expect/test1.50
index 4bb28ee5e..008f7d1dd 100755
--- a/testsuite/expect/test1.50
+++ b/testsuite/expect/test1.50
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.51 b/testsuite/expect/test1.51
index ba3ca6f5b..ccbdc3ff1 100755
--- a/testsuite/expect/test1.51
+++ b/testsuite/expect/test1.51
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.52 b/testsuite/expect/test1.52
index 9bb561c24..f08090048 100755
--- a/testsuite/expect/test1.52
+++ b/testsuite/expect/test1.52
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.53 b/testsuite/expect/test1.53
index c42ded219..cd564bb90 100755
--- a/testsuite/expect/test1.53
+++ b/testsuite/expect/test1.53
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,8 +42,8 @@ print_header $test_id
 #
 # Delete left-over programs and rebuild them.
 #
-exec rm -f $file_prog
-exec cc -o $file_prog $file_prog.c
+exec $bin_rm -f $file_prog
+exec $bin_cc -o $file_prog $file_prog.c
 
 #
 # submit job to run 4 mins, but get signaled 2 mins before time limit
diff --git a/testsuite/expect/test1.54 b/testsuite/expect/test1.54
index a7fae8705..b8b73a4e9 100755
--- a/testsuite/expect/test1.54
+++ b/testsuite/expect/test1.54
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,7 +40,7 @@ set exit_code   0
 print_header $test_id
 
 if {[test_front_end] != 0} {
-        send_user "\nThis test is incompatible with front-end systems\n"
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
 	exit 0
 }
 
diff --git a/testsuite/expect/test1.55 b/testsuite/expect/test1.55
index f00409db3..0c81a8132 100755
--- a/testsuite/expect/test1.55
+++ b/testsuite/expect/test1.55
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -38,6 +38,11 @@ set test_script          "test$test_id.sh"
 
 print_header $test_id
 
+# limited step knowledge on cray systems.
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit 0
+}
 #
 # Create a test script to be launch by srun
 #
@@ -58,7 +63,7 @@ make_bash_script $test_script {
 set timeout $max_job_delay
 set jobid  0
 set stepid 0
-set srun_pid [spawn $srun -u -v -n1 -t1 $test_script]
+set srun_pid [spawn $srun --unbuffered -v -n1 -t1 $test_script]
 expect {
 	-re "launching (($number)\.0)" {
 		set stepid $expect_out(1,string)
diff --git a/testsuite/expect/test1.56 b/testsuite/expect/test1.56
index 09d9b6b70..532a7d226 100755
--- a/testsuite/expect/test1.56
+++ b/testsuite/expect/test1.56
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.57 b/testsuite/expect/test1.57
index 1d8f45e68..b1096ac18 100755
--- a/testsuite/expect/test1.57
+++ b/testsuite/expect/test1.57
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.58 b/testsuite/expect/test1.58
index f3ee2068c..09f59f4b9 100755
--- a/testsuite/expect/test1.58
+++ b/testsuite/expect/test1.58
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,12 +37,17 @@ set exit_code            0
 set jobid                0
 
 print_header $test_id
-set timeout $max_job_delay
+
+if { [test_cray] } {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 
 #
 # Run an srun to grab a single node allocation, but not start any
 # job steps.
 #
+set timeout $max_job_delay
 set srun_alloc_pid [spawn $salloc -v -N1 -n1 $bin_sleep 600]
 set srun_alloc_sid $spawn_id
 expect {
diff --git a/testsuite/expect/test1.59 b/testsuite/expect/test1.59
index be7d4f574..62a3237f0 100755
--- a/testsuite/expect/test1.59
+++ b/testsuite/expect/test1.59
@@ -15,7 +15,7 @@
 # UCRL-CODE-217948.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.6 b/testsuite/expect/test1.6
index 900e4fb2a..d90989257 100755
--- a/testsuite/expect/test1.6
+++ b/testsuite/expect/test1.6
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.60 b/testsuite/expect/test1.60
index 312a14973..46af3e461 100755
--- a/testsuite/expect/test1.60
+++ b/testsuite/expect/test1.60
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -48,9 +48,9 @@ set file_out_n_glob  ""
 
 print_header $test_id
 
-if {[test_bluegene] != 0} {
-	send_user "\nWARNING: This test is incompatible with bluegene systems\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 #
@@ -83,18 +83,14 @@ if {$job_id == 0} {
 }
 
 set node_count 0
-if { [test_bluegene] } {
-	set node_count 1
-} else {
-	spawn $squeue -tall -j $job_id -o "%i %D"
-	expect {
-		-re "$job_id ($number)" {
-			set node_count $expect_out(1,string)
-			exp_continue
-		}
-		eof {
-			wait
-		}
+spawn $squeue -tall -j $job_id -o "%i %D"
+expect {
+	-re "$job_id ($number)" {
+		set node_count $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
 	}
 }
 if {$node_count == 0} {
diff --git a/testsuite/expect/test1.61 b/testsuite/expect/test1.61
index 0583f9cf3..bdeaa6458 100755
--- a/testsuite/expect/test1.61
+++ b/testsuite/expect/test1.61
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,9 +40,13 @@ set file_out    "test$test_id.output"
 set file_err    "test$test_id.error"
 set timeout 60
 
-
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 exec $bin_rm -f $file_in file_out file_err
 make_bash_script $file_in "
   $bin_date
diff --git a/testsuite/expect/test1.62 b/testsuite/expect/test1.62
index ee2ba1e9f..6863526b8 100755
--- a/testsuite/expect/test1.62
+++ b/testsuite/expect/test1.62
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.63 b/testsuite/expect/test1.63
index fc52acd78..3ea27e16a 100755
--- a/testsuite/expect/test1.63
+++ b/testsuite/expect/test1.63
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -44,6 +44,11 @@ if { [test_xcpu] } {
 	exit 0
 }
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over programs and rebuild them.
 # We use our own program to get ulimit values since the output
diff --git a/testsuite/expect/test1.64 b/testsuite/expect/test1.64
index e49a71e73..dcfe3a572 100755
--- a/testsuite/expect/test1.64
+++ b/testsuite/expect/test1.64
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -44,6 +44,11 @@ if { [test_xcpu] } {
 	exit $exit_code
 }
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Spawn initial program via srun
 # Send the signal while the srun is waiting for a resource allocation
diff --git a/testsuite/expect/test1.7 b/testsuite/expect/test1.7
index c58dae804..506135d75 100755
--- a/testsuite/expect/test1.7
+++ b/testsuite/expect/test1.7
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.8 b/testsuite/expect/test1.8
index 9d3c5bb2a..5291291f8 100755
--- a/testsuite/expect/test1.8
+++ b/testsuite/expect/test1.8
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -50,6 +50,12 @@ set got_sleep_err        0
 
 print_header $test_id
 
+# if the srun -i option worked this test would work.
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over stdin/out/err files
 # Build stdin file
diff --git a/testsuite/expect/test1.80 b/testsuite/expect/test1.80
index 512c308dd..1233faacc 100755
--- a/testsuite/expect/test1.80
+++ b/testsuite/expect/test1.80
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.81 b/testsuite/expect/test1.81
index a1e7982a1..8c4efa784 100755
--- a/testsuite/expect/test1.81
+++ b/testsuite/expect/test1.81
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,11 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a 1 node job and validate that we don't get more than one
 #
diff --git a/testsuite/expect/test1.82 b/testsuite/expect/test1.82
index 89d8750f4..94e3610ad 100755
--- a/testsuite/expect/test1.82
+++ b/testsuite/expect/test1.82
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,11 @@ if { [test_xcpu] } {
 	exit 0
 }
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a 3+ node job
 #
diff --git a/testsuite/expect/test1.83 b/testsuite/expect/test1.83
index d51929014..c46e62cfd 100755
--- a/testsuite/expect/test1.83
+++ b/testsuite/expect/test1.83
@@ -20,7 +20,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.84 b/testsuite/expect/test1.84
index 614cbce91..ba7f8c727 100755
--- a/testsuite/expect/test1.84
+++ b/testsuite/expect/test1.84
@@ -19,7 +19,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -53,6 +53,14 @@ if {[test_multiple_slurmd] != 0} {
 	exit 0
 }
 
+# Various configurations allocate nodes, sockets, cores, cpus or threads;
+# not all of which are compatible with this test
+set select_type [test_select_type]
+if {[string compare $select_type "cons_res"]} {
+	send_user "\nWARNING: This test is incompatible with select/$select_type\n"
+	exit 0
+}
+
 #
 # Submit a 1 node job to determine the node's CPU count
 #
diff --git a/testsuite/expect/test1.86 b/testsuite/expect/test1.86
index e1748bbe2..984fd031b 100755
--- a/testsuite/expect/test1.86
+++ b/testsuite/expect/test1.86
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.87 b/testsuite/expect/test1.87
index 3fdc8320e..457ff29be 100755
--- a/testsuite/expect/test1.87
+++ b/testsuite/expect/test1.87
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.88 b/testsuite/expect/test1.88
index 9b10b8614..5e25ce827 100755
--- a/testsuite/expect/test1.88
+++ b/testsuite/expect/test1.88
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.88.prog.c b/testsuite/expect/test1.88.prog.c
index a37e6a63c..78d8fce2d 100644
--- a/testsuite/expect/test1.88.prog.c
+++ b/testsuite/expect/test1.88.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.89 b/testsuite/expect/test1.89
index 1c6c473e0..d8d8ae876 100755
--- a/testsuite/expect/test1.89
+++ b/testsuite/expect/test1.89
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@ set affinity 0
 log_user 0
 spawn $scontrol show config
 expect {
-	-re "task/affinity" {
+	-re "affinity" {
 		set affinity 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.89.prog.c b/testsuite/expect/test1.89.prog.c
index 49bbd2b0f..3246cb10a 100644
--- a/testsuite/expect/test1.89.prog.c
+++ b/testsuite/expect/test1.89.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.9 b/testsuite/expect/test1.9
index c8202105d..7fc2c2f07 100755
--- a/testsuite/expect/test1.9
+++ b/testsuite/expect/test1.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -34,12 +34,22 @@ source ./globals
 
 set test_id     "1.9"
 set exit_code   0
-set task_cnt    5
 set tasks       0
 set verbosity   0
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	# We never launch more than one task on emulated Bluegene
+	set task_cnt 1
+} else {
+	set task_cnt 5
+}
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks
 #
diff --git a/testsuite/expect/test1.90 b/testsuite/expect/test1.90
index 440a6253b..ca7690299 100755
--- a/testsuite/expect/test1.90
+++ b/testsuite/expect/test1.90
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -51,7 +51,7 @@ expect {
 		set fast_sched $expect_out(1,string)
 		exp_continue
 	}
-	-re "task/affinity" {
+	-re "affinity" {
 		set affinity 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.90.prog.c b/testsuite/expect/test1.90.prog.c
index 9f54c4873..022a712d9 100644
--- a/testsuite/expect/test1.90.prog.c
+++ b/testsuite/expect/test1.90.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91
index 2f6dfd0f9..ae45ca0b5 100755
--- a/testsuite/expect/test1.91
+++ b/testsuite/expect/test1.91
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -47,7 +47,7 @@ set affinity 0
 log_user 0
 spawn $scontrol show config
 expect {
-	-re "task/affinity" {
+	-re "affinity" {
 		set affinity 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.91.prog.c b/testsuite/expect/test1.91.prog.c
index 2b80ac1ff..3c5566a0e 100644
--- a/testsuite/expect/test1.91.prog.c
+++ b/testsuite/expect/test1.91.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test1.92 b/testsuite/expect/test1.92
index dc794328f..b116f28a6 100755
--- a/testsuite/expect/test1.92
+++ b/testsuite/expect/test1.92
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,9 +41,9 @@ set job_id      0
 
 print_header $test_id
 
-if { [test_bluegene] } {
-	send_user "\nWARNING: This test is not compatible with bluegene systesm\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 make_bash_script $file_bash {
diff --git a/testsuite/expect/test1.93 b/testsuite/expect/test1.93
index 69a1f1247..15bfde412 100755
--- a/testsuite/expect/test1.93
+++ b/testsuite/expect/test1.93
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.1 b/testsuite/expect/test10.1
index c440befd3..18ac76416 100755
--- a/testsuite/expect/test10.1
+++ b/testsuite/expect/test10.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.10 b/testsuite/expect/test10.10
index 619e05298..0bb3bd09f 100755
--- a/testsuite/expect/test10.10
+++ b/testsuite/expect/test10.10
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.11 b/testsuite/expect/test10.11
index 1d96d5b04..4247ad9e3 100755
--- a/testsuite/expect/test10.11
+++ b/testsuite/expect/test10.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.12 b/testsuite/expect/test10.12
index 1283f2adb..9454de715 100755
--- a/testsuite/expect/test10.12
+++ b/testsuite/expect/test10.12
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -54,7 +54,7 @@ if {[file exists $smap] == 0} {
 #
 spawn $smap --resolve 000
 expect {
-	-re "Must be physically on a BG" {
+	-re "Must be physically on a BlueGene" {
 		set non_bg 1
 		exp_continue;
 	}
@@ -72,7 +72,7 @@ expect {
 	}
 }
 if {$non_bg != 0} {
-	send_user "\nWARNING: This test is only valid on a Blue Gene system Service Node\n"
+	send_user "\nWARNING: This test is only valid on a BlueGene system Service Node\n"
 	exit 0
 }
 if {$check != 1} {
diff --git a/testsuite/expect/test10.13 b/testsuite/expect/test10.13
index e84986d8f..c8081d6f7 100755
--- a/testsuite/expect/test10.13
+++ b/testsuite/expect/test10.13
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -64,7 +64,7 @@ expect {
 		set non_bg 1
 		exp_continue
 	}
-	-re "TORUS" {
+	-re (Torus|T,T,T,T) {
 		set created 1
 		exp_continue
 	}
@@ -104,16 +104,37 @@ if {$created != 1} {
 	set exit_code 1
 }
 
+set type [get_bluegene_type]
+set match_target 0
+if {![string compare $type "L"]} {
+#	 Bluegene/L
+	set match_target 11
+} elseif {![string compare $type "P"]} {
+#	 Bluegene/P
+	set match_target 10
+} else {
+#	Bluegene/Q
+	set match_target 8
+}
+
 set matches       0
 if { $exit_code != 1 } {
-	spawn more $file
+	spawn grep -v \^\# $file
 	expect {
 		-re "BlrtsImage" {
+#			Absent from Bluegene/P and Bluegene/Q
+			set stuff [concat $stuff "1"]
+			incr matches
+			exp_continue
+		}
+		-re "CnloadImage" {
+#			Present only Bluegene/P
 			set stuff [concat $stuff "1"]
 			incr matches
 			exp_continue
 		}
 		-re "LinuxImage" {
+#			Absent from Bluegene/P and Bluegene/Q
 			set stuff [concat $stuff "2"]
 			incr matches
 			exp_continue
@@ -124,16 +145,23 @@ if { $exit_code != 1 } {
 			exp_continue
 		}
 		-re "RamDiskImage" {
+#			Absent from Bluegene/P and Bluegene/Q
 			set stuff [concat $stuff "4"]
 			incr matches
 			exp_continue
 		}
-		-re "BridgeAPILogFile" {
+		-re "IoloadImage" {
+#			Present only Bluegene/P
+			set stuff [concat $stuff "4"]
+			incr matches
+			exp_continue
+		}
+		-re "Numpsets" {
 			set stuff [concat $stuff "5"]
 			incr matches
 			exp_continue
 		}
-		-re "Numpsets.*io poor" {
+		-re "BridgeAPILogFile" {
 			set stuff [concat $stuff "6"]
 			incr matches
 			exp_continue
@@ -168,15 +196,16 @@ if { $exit_code != 1 } {
 		}
 	}
 
-	if {$matches != 11} {
-		send_user "$matches\n$stuff"
+	if {$matches != $match_target} {
+		send_user "match count: $matches != $match_target\n"
+		send_user "matches: $stuff"
 		send_user "\nFAILURE: smap bluegene.conf file was created but corrupt\n"
 		set exit_code 1
 	}
 
 }
-exec rm -f $file
 if {$exit_code == 0} {
+	exec rm -f $file
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test10.2 b/testsuite/expect/test10.2
index e056e2219..012216627 100755
--- a/testsuite/expect/test10.2
+++ b/testsuite/expect/test10.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.3 b/testsuite/expect/test10.3
index 918cf04df..7efb50d21 100755
--- a/testsuite/expect/test10.3
+++ b/testsuite/expect/test10.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.4 b/testsuite/expect/test10.4
index 7ff958d3d..9e77a396a 100755
--- a/testsuite/expect/test10.4
+++ b/testsuite/expect/test10.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.5 b/testsuite/expect/test10.5
index bc6710112..5f47939a2 100755
--- a/testsuite/expect/test10.5
+++ b/testsuite/expect/test10.5
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.6 b/testsuite/expect/test10.6
index 0be8565ae..38a0493c7 100755
--- a/testsuite/expect/test10.6
+++ b/testsuite/expect/test10.6
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.7 b/testsuite/expect/test10.7
index 2b76b7437..fb5d160eb 100755
--- a/testsuite/expect/test10.7
+++ b/testsuite/expect/test10.7
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.8 b/testsuite/expect/test10.8
index f48cc4ae3..a384e9edc 100755
--- a/testsuite/expect/test10.8
+++ b/testsuite/expect/test10.8
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test10.9 b/testsuite/expect/test10.9
index 709b31f3a..7e854b4f9 100755
--- a/testsuite/expect/test10.9
+++ b/testsuite/expect/test10.9
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.1 b/testsuite/expect/test11.1
index 81fa38d98..c27f68b6c 100755
--- a/testsuite/expect/test11.1
+++ b/testsuite/expect/test11.1
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.2 b/testsuite/expect/test11.2
index e5bdd1513..cc5c1ba34 100755
--- a/testsuite/expect/test11.2
+++ b/testsuite/expect/test11.2
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.3 b/testsuite/expect/test11.3
index 6ed4cd50a..cc3e69b1a 100755
--- a/testsuite/expect/test11.3
+++ b/testsuite/expect/test11.3
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.4 b/testsuite/expect/test11.4
index f68eaa65c..a636d167d 100755
--- a/testsuite/expect/test11.4
+++ b/testsuite/expect/test11.4
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.5 b/testsuite/expect/test11.5
index 14f80b2bc..f6a249c28 100755
--- a/testsuite/expect/test11.5
+++ b/testsuite/expect/test11.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,6 +39,11 @@ set job_id      0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over input script
 # Build input script file
diff --git a/testsuite/expect/test11.6 b/testsuite/expect/test11.6
index f32637d1a..56ef37255 100755
--- a/testsuite/expect/test11.6
+++ b/testsuite/expect/test11.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test11.7 b/testsuite/expect/test11.7
index 8d4ffc14d..f48c9a295 100755
--- a/testsuite/expect/test11.7
+++ b/testsuite/expect/test11.7
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test12.1 b/testsuite/expect/test12.1
index acef13dbf..1ad791f85 100755
--- a/testsuite/expect/test12.1
+++ b/testsuite/expect/test12.1
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2
index 23ad11838..a12f8ad54 100755
--- a/testsuite/expect/test12.2
+++ b/testsuite/expect/test12.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -49,9 +49,9 @@ set ret_code    42
 
 print_header $test_id
 
-if {[test_bluegene] != 0} {
-	send_user "\nWARNING: This test is incompatible with bluegene systems\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 proc _get_mem {prog} {
diff --git a/testsuite/expect/test12.2.prog.c b/testsuite/expect/test12.2.prog.c
index ed647ec1e..f7f738ca0 100644
--- a/testsuite/expect/test12.2.prog.c
+++ b/testsuite/expect/test12.2.prog.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test12.4 b/testsuite/expect/test12.4
index 0fdcf0145..800c82c34 100755
--- a/testsuite/expect/test12.4
+++ b/testsuite/expect/test12.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test12.5 b/testsuite/expect/test12.5
index 501959a4f..10508808e 100755
--- a/testsuite/expect/test12.5
+++ b/testsuite/expect/test12.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test13.1 b/testsuite/expect/test13.1
index 0fa4c467c..e57fd5b0a 100755
--- a/testsuite/expect/test13.1
+++ b/testsuite/expect/test13.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.1 b/testsuite/expect/test14.1
index 87f28c089..185dcd7e9 100755
--- a/testsuite/expect/test14.1
+++ b/testsuite/expect/test14.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.2 b/testsuite/expect/test14.2
index d69970260..3ec495385 100755
--- a/testsuite/expect/test14.2
+++ b/testsuite/expect/test14.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.3 b/testsuite/expect/test14.3
index 2deded0de..309524402 100755
--- a/testsuite/expect/test14.3
+++ b/testsuite/expect/test14.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.4 b/testsuite/expect/test14.4
index 76923af24..7bd045336 100755
--- a/testsuite/expect/test14.4
+++ b/testsuite/expect/test14.4
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.5 b/testsuite/expect/test14.5
index d778ff362..7c0d7b8e0 100755
--- a/testsuite/expect/test14.5
+++ b/testsuite/expect/test14.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.6 b/testsuite/expect/test14.6
index f7ceb0bb0..7610742fd 100755
--- a/testsuite/expect/test14.6
+++ b/testsuite/expect/test14.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.7 b/testsuite/expect/test14.7
index 47a01ca05..4299069d0 100755
--- a/testsuite/expect/test14.7
+++ b/testsuite/expect/test14.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test14.8 b/testsuite/expect/test14.8
index ce311dad6..1dc45717d 100755
--- a/testsuite/expect/test14.8
+++ b/testsuite/expect/test14.8
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.1 b/testsuite/expect/test15.1
index a2f2c9f9c..e35b32483 100755
--- a/testsuite/expect/test15.1
+++ b/testsuite/expect/test15.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.10 b/testsuite/expect/test15.10
index 4fc4d513f..0d939d2aa 100755
--- a/testsuite/expect/test15.10
+++ b/testsuite/expect/test15.10
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.11 b/testsuite/expect/test15.11
index 04f167fbf..75f4a7d22 100755
--- a/testsuite/expect/test15.11
+++ b/testsuite/expect/test15.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.12 b/testsuite/expect/test15.12
index 1fbec7c39..831ff5b69 100755
--- a/testsuite/expect/test15.12
+++ b/testsuite/expect/test15.12
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.13 b/testsuite/expect/test15.13
index ef64cba49..a83265326 100755
--- a/testsuite/expect/test15.13
+++ b/testsuite/expect/test15.13
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.14 b/testsuite/expect/test15.14
index c201ffe65..d570aadcf 100755
--- a/testsuite/expect/test15.14
+++ b/testsuite/expect/test15.14
@@ -1,7 +1,7 @@
 #!/usr/bin/expect
 ############################################################################
 # Purpose: Test of SLURM functionality
-#          Test of job dependencies (--depedency option).
+#          Test of job dependencies (--dependency option).
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -28,31 +28,33 @@
 #
 # You should have received a copy of the GNU General Public License along
 # with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 ############################################################################
 source ./globals
 
 set test_id     "15.14"
-set file_in     "test$test_id.input"
+set file_in1    "test$test_id.input1"
+set file_in2    "test$test_id.input2"
+set file_out2   "test$test_id.output2"
 set exit_code   0
 set job_id1     0
 set job_id2     0
-set job_acct    "TEST_ACCT"
 
 print_header $test_id
 
 #
-# Build input script file
+# Build input script files
 #
-make_bash_script $file_in "$bin_sleep 10"
+exec $bin_rm -f $file_in1 $file_in2 $file_out2
+make_bash_script $file_in1 "$bin_sleep 10"
 
 #
-# Spawn a srun batch job that just sleeps for a while
+# Spawn a batch job that just sleeps for a while
 #
 set timeout $max_job_delay
-set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in]
+set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in1]
 expect {
-	 -re "Submitted batch job ($number)" {
+	-re "Submitted batch job ($number)" {
 		set job_id1 $expect_out(1,string)
 		exp_continue
 	}
@@ -73,33 +75,26 @@ if {$job_id1 == 0} {
 #
 # Submit a dependent job
 #
+make_bash_script $file_in2 "$scontrol show job $job_id1"
 set match_state 0
-set salloc_pid [spawn $salloc --dependency=afterany:$job_id1 $srun $scontrol show job $job_id1]
+set sbatch_pid [spawn $sbatch --output=$file_out2 --dependency=afterany:$job_id1 $file_in2]
 expect {
-	-re "Granted job allocation ($number)" {
+	-re "Submitted batch job ($number)" {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-#	COMPLETED or COMPLETING
-	-re "JobState=COMPLET" {
-		set match_state 1
-		exp_continue
-	}
 	timeout {
-		send_user "\nFAILURE: salloc not responding\n"
-		if {$job_id2 != 0} {
-			cancel_job $job_id
-		}
-		slow_kill [expr 0 - $salloc_pid]
+		send_user "\nFAILURE: sbatch not responding\n"
+		slow_kill $sbatch_pid
 		set exit_code 1
 	}
 	eof {
 		wait
 	}
 }
-if {$match_state == 0} {
-	send_user "\nFAILURE: Dependent job not completed\n"
-	set exit_code 1
+if {$job_id2 == 0} {
+	send_user "\nFAILURE: batch submit failure\n"
+	exit 1
 }
 
 #
@@ -125,8 +120,33 @@ if {$match_jobid != $job_id1} {
 	set exit_code 1
 }
 
+
+#
+# Confirm that first job is complete before second job starts
+#
+if {[wait_for_file $file_out2] != 0} {
+	send_user "\nFAILURE: file $file_out2 is missing\n"
+        set exit_code 1
+} else {
+	set match_job_state 0
+	spawn $bin_cat $file_out2
+	expect {
+		-re "JobState=COMPLETED|COMPLETING" {
+			set match_job_state 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	if {$match_job_state == 0} {
+		send_user "\nFAILURE: Dependent job not completed\n"
+		set exit_code 1
+	}
+}
+
 if {$exit_code == 0} {
-	exec $bin_rm -f $file_in
+	exec $bin_rm -f $file_in1 $file_in2 $file_out2
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test15.15 b/testsuite/expect/test15.15
index 515e55e62..29c8a2b7a 100755
--- a/testsuite/expect/test15.15
+++ b/testsuite/expect/test15.15
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.16 b/testsuite/expect/test15.16
index bbf82eb40..59bae5bc8 100755
--- a/testsuite/expect/test15.16
+++ b/testsuite/expect/test15.16
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.17 b/testsuite/expect/test15.17
index e91d6632a..f4330e976 100755
--- a/testsuite/expect/test15.17
+++ b/testsuite/expect/test15.17
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,11 @@ set job_id_2     0
 
 print_header $test_id
 
+if { [test_cray] } {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Build input script file
 #
diff --git a/testsuite/expect/test15.18 b/testsuite/expect/test15.18
index 5d31e9e62..83397b46d 100755
--- a/testsuite/expect/test15.18
+++ b/testsuite/expect/test15.18
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.19 b/testsuite/expect/test15.19
index 0a2b7482e..b7500ca3d 100755
--- a/testsuite/expect/test15.19
+++ b/testsuite/expect/test15.19
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,11 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a 1 node job and validate that we don't get more than one
 #
@@ -66,7 +71,7 @@ expect {
 	}
 	-re "($number): ($alpha_numeric_under)" {
 		if {$task_cnt == 0} {
-			set host_0 $expect_out(1,string)
+			set host_0 $expect_out(2,string)
 		}
 		incr task_cnt
 		exp_continue
@@ -87,12 +92,13 @@ expect {
 if {[string compare $host_0 ""] == 0} {
 	send_user "\nFAILURE: Did not get SLURMD_NODENAME of task 0\n"
 	set exit_code   1
+	exit $exit_code
 }
 
 set alloc_fail 0
 set job_id     0
 set task_cnt2  0
-set salloc_pid [spawn $salloc -N1-1 -w $host_0 -t1 $srun -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME]
+set salloc_pid [spawn $salloc -N1-1 -w DUMMY_HOSTNAME -t1 $srun -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME]
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
@@ -127,7 +133,6 @@ if { $task_cnt2 != 0 } {
 	set exit_code   1
 }
 
-
 #
 # Submit a 1 node job
 #
diff --git a/testsuite/expect/test15.2 b/testsuite/expect/test15.2
index 9160d9458..a8a9886e7 100755
--- a/testsuite/expect/test15.2
+++ b/testsuite/expect/test15.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.20 b/testsuite/expect/test15.20
index af3bbe382..f7f32603b 100755
--- a/testsuite/expect/test15.20
+++ b/testsuite/expect/test15.20
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,6 +40,10 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit 0
diff --git a/testsuite/expect/test15.21 b/testsuite/expect/test15.21
index 43ea422b7..799cdac86 100755
--- a/testsuite/expect/test15.21
+++ b/testsuite/expect/test15.21
@@ -19,7 +19,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.22 b/testsuite/expect/test15.22
index a3e88ff4f..74726d7c6 100755
--- a/testsuite/expect/test15.22
+++ b/testsuite/expect/test15.22
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.23 b/testsuite/expect/test15.23
index 79163580f..e366e4a0b 100755
--- a/testsuite/expect/test15.23
+++ b/testsuite/expect/test15.23
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.24 b/testsuite/expect/test15.24
index dab6652ba..faa4addff 100755
--- a/testsuite/expect/test15.24
+++ b/testsuite/expect/test15.24
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,10 +37,23 @@ set exit_code   0
 set file_in     "test$test_id.input"
 set file_out    "test$test_id.output"
 set job_id      0
-set task_cnt    10
 
 print_header $test_id
 
+if {[test_cray]} {
+        send_user "\nWARNING: This test is incompatible with Cray systems\n"
+        exit $exit_code
+}
+if { [test_bluegene] } {
+	if { [test_emulated] } {
+		set task_cnt 1
+	} else {
+		set task_cnt 10
+	}
+} else {
+	set task_cnt 10
+}
+
 #
 # Submit a slurm job that will print slurm env vars and execute 'id'
 #
diff --git a/testsuite/expect/test15.25 b/testsuite/expect/test15.25
index fc8623de1..63d8aa5c7 100755
--- a/testsuite/expect/test15.25
+++ b/testsuite/expect/test15.25
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,6 +42,12 @@ set job_acct_lc "test_acct"
 
 print_header $test_id
 
+# if scontrol could be ran from the compute nodes this would work as well
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 if {[test_assoc_enforced]} {
 	send_user "\nWARNING: This test will not work when associations are enforced.\n"
 	exit $exit_code
diff --git a/testsuite/expect/test15.26 b/testsuite/expect/test15.26
index 571cde06e..23883a448 100755
--- a/testsuite/expect/test15.26
+++ b/testsuite/expect/test15.26
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -35,7 +35,6 @@ source ./globals
 
 set test_id     "15.26"
 set exit_code   0
-set matches     0
 
 print_header $test_id
 
@@ -48,21 +47,27 @@ if { [test_xcpu] } {
 # Spawn initial program via srun
 # Send the signal while the srun is waiting for a resource allocation
 #
+set wait_flag  0
+set abort_flag 0
 set salloc_pid [spawn $salloc -N1 -t1 --begin=noon -v $bin_sleep 1]
 expect {
 	-re "queued and waiting for resources" {
-		incr matches
+		set wait_flag 1
 		exec $bin_kill -INT $salloc_pid
 		send_user "\nSent SIGINT\n"
 		exp_continue
 	}
 	-re "Job allocation ($number) has been revoked" {
-		incr matches
+		set abort_flag 1
 		exp_continue
 	}
+	-re "Job aborted due to signal"  {
+		set abort_flag 1
+		exp_continue;
+	}
 	timeout {
 		send_user "\nFAILURE: salloc not responding\n"
-		set matches -999
+		set wait_flag -999
 		slow_kill $salloc_pid
 		set exit_code 1
 	}
@@ -71,7 +76,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 2} {
+if {[expr $wait_flag + $abort_flag] != 2} {
 	send_user "\nFAILURE: salloc failed to process SIGINT while waiting for allocation\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test15.3 b/testsuite/expect/test15.3
index 4eed88819..860264a23 100755
--- a/testsuite/expect/test15.3
+++ b/testsuite/expect/test15.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.4 b/testsuite/expect/test15.4
index 4c7d10df0..1e0e04054 100755
--- a/testsuite/expect/test15.4
+++ b/testsuite/expect/test15.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.5 b/testsuite/expect/test15.5
index 221b3518d..0a1cfa216 100755
--- a/testsuite/expect/test15.5
+++ b/testsuite/expect/test15.5
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -120,20 +120,16 @@ set salloc_pid [spawn $salloc -t1 --kill-command=KILL ./$file_in]
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
-		date
+		exec date
 		exp_continue
 	}
 	-re "Job allocation time limit to be reached at" {
-		date
-		exp_continue
-	}
-	-re "Job allocation ($number) has been revoked." {
-		date
+		exec date
 		exp_continue
 	}
 	-re "FINI" {
 		send_user "\nFAILURE: job time limit not honored\n"
-		date
+		exec date
 		set exit_code 1
 		exp_continue
 	}
@@ -156,19 +152,15 @@ set salloc_pid [spawn $salloc -t4 --kill-command=KILL ./$file_in]
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
-		date
+		exec date
 		exp_continue
 	}
 	-re "Job allocation time limit to be reached at" {
-		date
-		exp_continue
-	}
-	-re "Job allocation ($number) has been revoked." {
-		date
+		exec date
 		exp_continue
 	}
 	-re "FINI" {
-		date
+		exec date
 		incr completions
 		exp_continue
 	}
@@ -185,7 +177,7 @@ expect {
 	}
 }
 if {$completions != 1} {
-	send_user "\nFAILURE: job failed to complete properly\n"
+	send_user "\nFAILURE: job failed to complete properly ($completions)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test15.6 b/testsuite/expect/test15.6
index dd6b23284..3022a6609 100755
--- a/testsuite/expect/test15.6
+++ b/testsuite/expect/test15.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.7 b/testsuite/expect/test15.7
index fc44f2037..48215c11f 100755
--- a/testsuite/expect/test15.7
+++ b/testsuite/expect/test15.7
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.8 b/testsuite/expect/test15.8
index a7662dcbf..49df04e58 100755
--- a/testsuite/expect/test15.8
+++ b/testsuite/expect/test15.8
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test15.9 b/testsuite/expect/test15.9
index f4f445515..24569f19f 100755
--- a/testsuite/expect/test15.9
+++ b/testsuite/expect/test15.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test16.1 b/testsuite/expect/test16.1
index 33d0e56fa..6652a12b7 100755
--- a/testsuite/expect/test16.1
+++ b/testsuite/expect/test16.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test16.2 b/testsuite/expect/test16.2
index 7b04a664c..b9f55f581 100755
--- a/testsuite/expect/test16.2
+++ b/testsuite/expect/test16.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test16.3 b/testsuite/expect/test16.3
index 0b8d89986..bb0dbca4c 100755
--- a/testsuite/expect/test16.3
+++ b/testsuite/expect/test16.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test16.4 b/testsuite/expect/test16.4
index 5497f183c..71ba7ed92 100755
--- a/testsuite/expect/test16.4
+++ b/testsuite/expect/test16.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,10 +41,26 @@ set matches     0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit 0
 }
+if { [test_bluegene] } {
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-4
+		set task_cnt 4
+	}
+} else {
+	set node_cnt 1-4
+	set task_cnt 4
+}
 
 #
 # Delete left-over program and rebuild it
@@ -57,7 +73,7 @@ exec $bin_chmod 700 $file_prog
 # Spawn initial program via srun
 #
 set timeout $max_job_delay
-set salloc_pid [spawn $salloc -N1-4 -t2 $srun -n4 --overcommit $file_prog]
+set salloc_pid [spawn $salloc -N $node_cnt -t2 $srun -n $task_cnt --overcommit $file_prog]
 set init_id $spawn_id
 expect {
         -i $init_id
@@ -67,7 +83,7 @@ expect {
 	}
 	-re "WAITING" {
 		incr matches
-		if {$matches != 4} {
+		if {$matches != $task_cnt} {
 			exp_continue
 		}
 	}
@@ -87,7 +103,7 @@ if {$job_id == 0} {
 	send_user "\nFAILURE: job submit failure\n"
 	exit 1
 }
-if {$matches != 4} {
+if {$matches != $task_cnt} {
 	send_user "\nFAILURE: job run time failure\n"
 	exit 1
 }
@@ -100,7 +116,7 @@ spawn $sattach --layout $job_id.0
 set attach_id $spawn_id
 expect {
 	-i $attach_id
-	-re "4 tasks, ($number) nodes" {
+	-re "($number) tasks, ($number) nodes" {
 		incr matches
 		exp_continue
 	}
@@ -122,12 +138,12 @@ if {$matches == 0} {
 #
 set matches     0
 set timeout     10
-set attach_pid [spawn $sattach -l --output-filter=2 $job_id.0]
+set attach_pid [spawn $sattach -l --output-filter=[expr $task_cnt - 1] $job_id.0]
 set attach_id $spawn_id
 expect {
         -i $attach_id
 	-re "($number): WAITING" {
-		if {$expect_out(1,string) != 2} {
+		if {$expect_out(1,string) != [expr $task_cnt - 1]} {
 			send_user "\nFAILURE: output filtering by task failed\n"
 			set exit_code 1
 		} else {
@@ -164,7 +180,7 @@ expect {
 	}
 	-re "($number): WAITING" {
 		incr matches
-		if {$matches == 4} {
+		if {$matches == [expr $task_cnt + 1]} {
 			send -i $attach_id "exit\r"
 		}
 		exp_continue
@@ -177,8 +193,8 @@ expect {
 		wait
 	}
 }
-if {$matches != 5} {
-	send_user "\nFAILURE: job run time failure\n"
+if {$matches != [expr $task_cnt + 1]} {
+	send_user "\nFAILURE: job run time failure ($matches != [expr $task_cnt + 1])\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test16.4.prog.c b/testsuite/expect/test16.4.prog.c
index 92993bcfa..e66e84a8e 100644
--- a/testsuite/expect/test16.4.prog.c
+++ b/testsuite/expect/test16.4.prog.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.1 b/testsuite/expect/test17.1
index 0fea20af7..a67f65398 100755
--- a/testsuite/expect/test17.1
+++ b/testsuite/expect/test17.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.10 b/testsuite/expect/test17.10
index 7370cad60..965a92f3a 100755
--- a/testsuite/expect/test17.10
+++ b/testsuite/expect/test17.10
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.11 b/testsuite/expect/test17.11
index c9bf0c6f4..9cd0972c0 100755
--- a/testsuite/expect/test17.11
+++ b/testsuite/expect/test17.11
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,11 @@ set matches     0
 
 print_header $test_id
 
+if {[test_cray]} {
+        send_user "\nWARNING: This test is incompatible with Cray systems\n"
+        exit $exit_code
+}
+
 #
 # Delete left-over input script
 # Build input script file
diff --git a/testsuite/expect/test17.12 b/testsuite/expect/test17.12
index 4cd83e465..c48c8a477 100755
--- a/testsuite/expect/test17.12
+++ b/testsuite/expect/test17.12
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.13 b/testsuite/expect/test17.13
index 89478490b..e8a4e7d63 100755
--- a/testsuite/expect/test17.13
+++ b/testsuite/expect/test17.13
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.14 b/testsuite/expect/test17.14
index baa92b2bc..c99548340 100755
--- a/testsuite/expect/test17.14
+++ b/testsuite/expect/test17.14
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.15 b/testsuite/expect/test17.15
index 794aa838b..3b2fb61fc 100755
--- a/testsuite/expect/test17.15
+++ b/testsuite/expect/test17.15
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.15.prog.c b/testsuite/expect/test17.15.prog.c
index 23ddff30e..f1a6ac5f2 100644
--- a/testsuite/expect/test17.15.prog.c
+++ b/testsuite/expect/test17.15.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.16 b/testsuite/expect/test17.16
index 0d2803de4..f883cf56f 100755
--- a/testsuite/expect/test17.16
+++ b/testsuite/expect/test17.16
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.17 b/testsuite/expect/test17.17
index 7d897f002..265f556f7 100755
--- a/testsuite/expect/test17.17
+++ b/testsuite/expect/test17.17
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -45,6 +45,11 @@ set nodelist_name ""
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Submit a job and get the node's NodeName from the nodelist
 #
diff --git a/testsuite/expect/test17.18 b/testsuite/expect/test17.18
index 78e11df70..afe8e5d11 100755
--- a/testsuite/expect/test17.18
+++ b/testsuite/expect/test17.18
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -94,39 +94,6 @@ if {$job_id2 == 0} {
 	exit 1
 }
 
-# Wait for job to complete
-#
-if {[wait_for_job $job_id2 "DONE"] != 0} {
-	send_user "\nFAILURE: waiting for job to complete\n"
-	cancel_job $job_id2
-	cancel_job $job_id1
-	exit 1
-}
-cancel_job $job_id1
-
-#
-# Inspect the job's output file
-#
-if {[wait_for_file $file_out] != 0} {
-	exit 1
-}
-
-spawn $bin_cat $file_out
-expect {
-#	Could be COMPLETED or COMPLETING
-	-re "JobState=COMPLET" {
-		set match_state 1
-		exp_continue
-	}
-	eof {
-		wait
-	}
-}
-if {$match_state == 0} {
-	send_user "\nFAILURE: Dependent job not completed\n"
-	set exit_code 1
-}
-
 #
 # Confirm dependency info within second job
 #
@@ -164,6 +131,40 @@ if {$match_jobid == 0} {
 	set exit_code 1
 }
 
+#
+# Wait for job to complete
+#
+if {[wait_for_job $job_id2 "DONE"] != 0} {
+	send_user "\nFAILURE: waiting for job to complete\n"
+	cancel_job $job_id2
+	cancel_job $job_id1
+	exit 1
+}
+cancel_job $job_id1
+
+#
+# Inspect the job's output file
+#
+if {[wait_for_file $file_out] != 0} {
+	exit 1
+}
+
+spawn $bin_cat $file_out
+expect {
+#	Could be COMPLETED or COMPLETING
+	-re "JobState=COMPLET" {
+		set match_state 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$match_state == 0} {
+	send_user "\nFAILURE: Dependent job not completed\n"
+	set exit_code 1
+}
+
 #
 # Submit a job to run at noon tomorrow
 #
diff --git a/testsuite/expect/test17.19 b/testsuite/expect/test17.19
index fbb68c3b8..b88696f7f 100755
--- a/testsuite/expect/test17.19
+++ b/testsuite/expect/test17.19
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.2 b/testsuite/expect/test17.2
index babdbb132..85ccef65a 100755
--- a/testsuite/expect/test17.2
+++ b/testsuite/expect/test17.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.20 b/testsuite/expect/test17.20
index 41067ba76..d48a0344d 100755
--- a/testsuite/expect/test17.20
+++ b/testsuite/expect/test17.20
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.21 b/testsuite/expect/test17.21
index d773cb5a8..5fc495f31 100755
--- a/testsuite/expect/test17.21
+++ b/testsuite/expect/test17.21
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.22 b/testsuite/expect/test17.22
index 3342eaa42..91eb5b4d6 100755
--- a/testsuite/expect/test17.22
+++ b/testsuite/expect/test17.22
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.23 b/testsuite/expect/test17.23
index a70eb47b6..0c59a5f7d 100755
--- a/testsuite/expect/test17.23
+++ b/testsuite/expect/test17.23
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.24 b/testsuite/expect/test17.24
index 9a816e069..e59cf166a 100755
--- a/testsuite/expect/test17.24
+++ b/testsuite/expect/test17.24
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.25 b/testsuite/expect/test17.25
index 1edfed261..15eb701e2 100755
--- a/testsuite/expect/test17.25
+++ b/testsuite/expect/test17.25
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.26 b/testsuite/expect/test17.26
index d928263f4..81b9d26b1 100755
--- a/testsuite/expect/test17.26
+++ b/testsuite/expect/test17.26
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.27 b/testsuite/expect/test17.27
index 5f0367d36..f7c541c65 100755
--- a/testsuite/expect/test17.27
+++ b/testsuite/expect/test17.27
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,6 +42,10 @@ set job_id           0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit 0
diff --git a/testsuite/expect/test17.28 b/testsuite/expect/test17.28
index cdcdf279c..bd987018e 100755
--- a/testsuite/expect/test17.28
+++ b/testsuite/expect/test17.28
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.29 b/testsuite/expect/test17.29
index e6a21cc47..a9c65c238 100755
--- a/testsuite/expect/test17.29
+++ b/testsuite/expect/test17.29
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.3 b/testsuite/expect/test17.3
index 1875abbde..8c2109515 100755
--- a/testsuite/expect/test17.3
+++ b/testsuite/expect/test17.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.31 b/testsuite/expect/test17.31
index 4d8ca2545..6c24d5095 100755
--- a/testsuite/expect/test17.31
+++ b/testsuite/expect/test17.31
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.32 b/testsuite/expect/test17.32
index b2a5a44c6..6762483fb 100755
--- a/testsuite/expect/test17.32
+++ b/testsuite/expect/test17.32
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,10 +37,14 @@ set exit_code   0
 set file_in     "test$test_id.input"
 set file_out    "test$test_id.output"
 set job_id      0
-set task_cnt    10
 
 print_header $test_id
 
+if {[test_cray]} {
+        send_user "\nWARNING: This test is incompatible with Cray systems\n"
+        exit $exit_code
+}
+
 #
 # Submit a slurm job that will print slurm env vars and execute 'id'
 #
@@ -52,6 +56,15 @@ make_bash_script $file_in "
 #
 # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks
 #
+if { [test_bluegene] } {
+	if { [test_emulated] } {
+		set task_cnt 1
+	} else {
+		set task_cnt 10
+	}
+} else {
+	set task_cnt 10
+}
 spawn $sbatch --ntasks=$task_cnt --overcommit -N1 --output=$file_out -t1 $file_in
 expect {
 	-re "Submitted batch job ($number)" {
@@ -108,7 +121,7 @@ if {$matches != 2} {
 }
 if {$task_cnt != $tasks} {
 	send_user "\nFAILURE: Did not get proper number of tasks: "
-	send_user "$task_cnt, $tasks\n"
+	send_user "($task_cnt != $tasks)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test17.33 b/testsuite/expect/test17.33
index d579a9020..775541d5f 100755
--- a/testsuite/expect/test17.33
+++ b/testsuite/expect/test17.33
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.4 b/testsuite/expect/test17.4
index 9bc2917c3..fbea18ac6 100755
--- a/testsuite/expect/test17.4
+++ b/testsuite/expect/test17.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.5 b/testsuite/expect/test17.5
index bd12d15df..745fb006e 100755
--- a/testsuite/expect/test17.5
+++ b/testsuite/expect/test17.5
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.6 b/testsuite/expect/test17.6
index 78a809464..a65ccad69 100755
--- a/testsuite/expect/test17.6
+++ b/testsuite/expect/test17.6
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,11 +37,17 @@ set test_id     "17.6"
 set exit_code   0
 set file_in     "test$test_id.input"
 set file_out    "test$test_id.output"
-set task_cnt    2
 set tasks       0
 
 print_header $test_id
 
+if {[test_bluegene]} {
+	# We never launch more than one task on emulated Bluegene
+	set task_cnt    1
+} else {
+	set task_cnt    2
+}
+
 #
 # Submit a slurm job that will execute 'id' on $task_cnt tasks (or try anyway)
 #
@@ -107,6 +113,15 @@ if {$exit_code != 0} {
 	exit $exit_code
 }
 
+# Since we can't over commit the test ends here.
+if {[test_cray]} {
+	if {$exit_code == 0} {
+		file delete $file_in $file_out
+		send_user "\nSUCCESS\n"
+	}
+	exit $exit_code
+}
+
 #
 # Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks
 #
diff --git a/testsuite/expect/test17.7 b/testsuite/expect/test17.7
index 3fa971101..ae0952566 100755
--- a/testsuite/expect/test17.7
+++ b/testsuite/expect/test17.7
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.8 b/testsuite/expect/test17.8
index 08d253227..0bac5ba07 100755
--- a/testsuite/expect/test17.8
+++ b/testsuite/expect/test17.8
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test17.9 b/testsuite/expect/test17.9
index 5abcaa67f..dfa824183 100755
--- a/testsuite/expect/test17.9
+++ b/testsuite/expect/test17.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.1 b/testsuite/expect/test19.1
index 6c9525db4..095aabbe6 100755
--- a/testsuite/expect/test19.1
+++ b/testsuite/expect/test19.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.2 b/testsuite/expect/test19.2
index 65c440c32..f2f468598 100755
--- a/testsuite/expect/test19.2
+++ b/testsuite/expect/test19.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.3 b/testsuite/expect/test19.3
index e724e3832..e214ada41 100755
--- a/testsuite/expect/test19.3
+++ b/testsuite/expect/test19.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.4 b/testsuite/expect/test19.4
index 3ce2c0a51..4a8be54b7 100755
--- a/testsuite/expect/test19.4
+++ b/testsuite/expect/test19.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.5 b/testsuite/expect/test19.5
index 86ba99f6c..b6f8366dd 100755
--- a/testsuite/expect/test19.5
+++ b/testsuite/expect/test19.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.6 b/testsuite/expect/test19.6
index c243480a1..498e40d28 100755
--- a/testsuite/expect/test19.6
+++ b/testsuite/expect/test19.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test19.7 b/testsuite/expect/test19.7
index 8b9c0504b..f79fb881c 100755
--- a/testsuite/expect/test19.7
+++ b/testsuite/expect/test19.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.1 b/testsuite/expect/test2.1
index c3932c3c0..bc1dee2b3 100755
--- a/testsuite/expect/test2.1
+++ b/testsuite/expect/test2.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.10 b/testsuite/expect/test2.10
index 58b58a40a..00f06482b 100755
--- a/testsuite/expect/test2.10
+++ b/testsuite/expect/test2.10
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.11 b/testsuite/expect/test2.11
index 3d8eac25c..0e8738418 100755
--- a/testsuite/expect/test2.11
+++ b/testsuite/expect/test2.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,6 +41,11 @@ set scontrol_id 0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Delete left-over stdout file
 file delete $file_out
 
diff --git a/testsuite/expect/test2.12 b/testsuite/expect/test2.12
index 93cbc2639..59dc03454 100755
--- a/testsuite/expect/test2.12
+++ b/testsuite/expect/test2.12
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.13 b/testsuite/expect/test2.13
index d4f77fef3..fd2b95baf 100755
--- a/testsuite/expect/test2.13
+++ b/testsuite/expect/test2.13
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,11 +41,16 @@ set orig_time     0
 set new_time      0
 
 print_header $test_id
-exec $bin_rm -f $file_in $file_out
+
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 
 #
 # Build input script file
 #
+exec $bin_rm -f $file_in $file_out
 make_bash_script $file_in "
   $bin_echo TEST_BEGIN
   $srun -t5 $bin_sleep 120
diff --git a/testsuite/expect/test2.14 b/testsuite/expect/test2.14
index 2da1a8e6e..a873739fc 100755
--- a/testsuite/expect/test2.14
+++ b/testsuite/expect/test2.14
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -47,9 +47,9 @@ if { [test_xcpu] } {
 	exit 0
 }
 
-if { [test_bluegene] } {
-	send_user "\nWARNING: This test is incompatible with BlueGene systems\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 set node_cnt 2
diff --git a/testsuite/expect/test2.15 b/testsuite/expect/test2.15
new file mode 100755
index 000000000..6631bd3e4
--- /dev/null
+++ b/testsuite/expect/test2.15
@@ -0,0 +1,152 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate scontrol update size of running job with some running tasks.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2010 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "2.15"
+set exit_code        0
+set file_in          "test$test_id.bash"
+set job_id           0
+
+print_header $test_id
+
+if { [test_xcpu] } {
+	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
+	exit 0
+}
+if {[test_front_end] != 0} {
+	send_user "\nWARNING: This test is incompatible with front-end systems\n"
+	exit 0
+}
+
+set node_cnt 2
+set def_part [default_partition]
+if {[string compare $def_part ""] == 0} {
+	send_user "\nWARNING: No default partition\n"
+	exit $exit_code
+}
+set available [available_nodes $def_part]
+if {$available < $node_cnt} {
+	send_user "\nWARNING: not enough nodes currently available ($available avail, $node_cnt needed)\n"
+	exit $exit_code
+}
+
+# NOTE: The second sleep command in task 1 is to deal with a possible race condition in which
+# the first sleep command is killed and the shell starts the second command before it is killed
+make_bash_script $file_in "
+if \[ \$SLURM_PROCID = 0 \]; then
+	$bin_echo JOBID=\$SLURM_JOB_ID
+	$bin_echo \"Proc \$SLURM_PROCID on HOST=\$SLURMD_NODENAME : I await the end of the other tasks\"
+	$bin_sleep 35
+	$squeue -o \"%i %10j %2t %N\"
+	$bin_echo \"Proc \$SLURM_PROCID terminated properly\"
+else
+	$bin_sleep 10
+	$bin_sleep 1
+	$bin_echo \"Proc \$SLURM_PROCID terminated improperly, not killed\"
+	exit 0
+fi
+"
+
+#
+# Run job with one task continuing
+#
+set host ""
+set timeout $max_job_delay
+set srun_pid [spawn $srun -N$node_cnt -t1 $file_in]
+set srun_id $spawn_id
+expect {
+	-re "JOBID=($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re "HOST=($alpha_numeric_under)" {
+		set host $expect_out(1,string)
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: job not submitted\n"
+	exit 1
+}
+
+spawn $scontrol update jobid=$job_id nodelist=$host
+set scontrol_id $spawn_id
+expect {
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+set matches 0
+set timeout 40
+set spawn_id $srun_id
+expect {
+	-re "terminated properly" {
+		incr matches
+		exp_continue
+	}
+	-re "terminated improperly" {
+		set matches -9999
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches != 1} {
+	send_user "\nFAILURE: task 0 did not run to completion after other tasks killed\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm $file_in
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test2.2 b/testsuite/expect/test2.2
index 744d43dab..637644cc5 100755
--- a/testsuite/expect/test2.2
+++ b/testsuite/expect/test2.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.3 b/testsuite/expect/test2.3
index 680e73ced..6393e7d57 100755
--- a/testsuite/expect/test2.3
+++ b/testsuite/expect/test2.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.4 b/testsuite/expect/test2.4
index f291a6463..8e6a17352 100755
--- a/testsuite/expect/test2.4
+++ b/testsuite/expect/test2.4
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.5 b/testsuite/expect/test2.5
index 7bf685b21..4da5630ec 100755
--- a/testsuite/expect/test2.5
+++ b/testsuite/expect/test2.5
@@ -2,19 +2,20 @@
 ############################################################################
 # Purpose: Test of SLURM functionality
 #          Validate scontrol show commands for configuation, daemons,
-#          nodes, and partitions.
+#          nodes, frontends, and partitions.
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2011 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -125,6 +126,55 @@ if {$matches != 1} {
 	set exit_code 1
 }
 
+#
+# Report all slurm front end nodes
+#
+set matches     0
+set node_name   ""
+spawn $scontrol show frontend
+expect {
+	-re "FrontendName=($alpha_numeric_under)" {
+		set node_name $expect_out(1,string)
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches < 1} {
+	send_user "\nWARNING: no front end nodes found\n"
+} else {
+#
+#	Report one slurm front end node
+#
+	set matches     0
+	spawn $scontrol show frontend $node_name
+	expect {
+		-re "FrontendName=($alpha_numeric_under)" {
+			if {[string compare $expect_out(1,string) $node_name] == 0} {
+				incr matches
+			}
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: scontrol show frontend command with name failed\n"
+		set exit_code 1
+	}
+}
+
 #
 # Report all slurm nodes
 #
diff --git a/testsuite/expect/test2.6 b/testsuite/expect/test2.6
index 83eb723cf..f1c32c56d 100755
--- a/testsuite/expect/test2.6
+++ b/testsuite/expect/test2.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.7 b/testsuite/expect/test2.7
index 935122c18..301df476b 100755
--- a/testsuite/expect/test2.7
+++ b/testsuite/expect/test2.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test2.8 b/testsuite/expect/test2.8
index 2948650f7..b06e6bb23 100755
--- a/testsuite/expect/test2.8
+++ b/testsuite/expect/test2.8
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -97,15 +97,28 @@ if {$job_id2 == 0} {
 exec $bin_rm -f $file_in
 
 #
-# Look at all jobs with scontrol
+# Look for these jobs with scontrol
+#
+# NOTE: Running "scontrol show job" and looking for these job IDs
+# may not work due to a bug in awk not scanning large output buffers
 #
 set matches 0
-spawn $scontrol show job
+spawn $scontrol show job $job_id1
 expect {
 	-re "JobId=$job_id1 " {
 		incr matches
 		exp_continue
 	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+spawn $scontrol show job $job_id2
+expect {
 	-re "JobId=$job_id2 " {
 		incr matches
 		exp_continue
@@ -119,7 +132,7 @@ expect {
 	}
 }
 if {$matches != 2} {
-	send_user "\nFAILURE: scontrol failed to find all jobs\n"
+	send_user "\nFAILURE: scontrol found $matches of 2 jobs\n"
 	set exit_code 1
 }
 
@@ -178,7 +191,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 1} {
+if {[test_cray] == 0 && $matches != 1} {
 	send_user "\nFAILURE: scontrol failed to find all job steps\n"
 	set exit_code 1
 }
@@ -206,7 +219,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 1} {
+if {[test_cray] == 0 && $matches != 1} {
 	send_user "\nFAILURE: scontrol failed to specific job step\n"
 	set exit_code 1
 }
@@ -242,7 +255,7 @@ expect {
 		wait
 	}
 }
-if {$matches != 1} {
+if {[test_cray] == 0 && $matches != 1} {
 	send_user "\nFAILURE: scontrol found unexpected job step\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test2.9 b/testsuite/expect/test2.9
index cad05a400..2151e3129 100755
--- a/testsuite/expect/test2.9
+++ b/testsuite/expect/test2.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test20.1 b/testsuite/expect/test20.1
index 1c3cd515b..160efa085 100755
--- a/testsuite/expect/test20.1
+++ b/testsuite/expect/test20.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test20.2 b/testsuite/expect/test20.2
index 5b909fddf..531075276 100755
--- a/testsuite/expect/test20.2
+++ b/testsuite/expect/test20.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test20.3 b/testsuite/expect/test20.3
index c1a345ed3..52e77e0fd 100755
--- a/testsuite/expect/test20.3
+++ b/testsuite/expect/test20.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test20.4 b/testsuite/expect/test20.4
index e69acc536..c875050e4 100755
--- a/testsuite/expect/test20.4
+++ b/testsuite/expect/test20.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.1 b/testsuite/expect/test21.1
index 5a82cee79..63e3975fc 100755
--- a/testsuite/expect/test21.1
+++ b/testsuite/expect/test21.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.10 b/testsuite/expect/test21.10
index f64cb60ad..66567ba45 100755
--- a/testsuite/expect/test21.10
+++ b/testsuite/expect/test21.10
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.11 b/testsuite/expect/test21.11
index f8b1f98a4..b9a06d761 100755
--- a/testsuite/expect/test21.11
+++ b/testsuite/expect/test21.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.12 b/testsuite/expect/test21.12
index 7bd645468..171b60dc8 100755
--- a/testsuite/expect/test21.12
+++ b/testsuite/expect/test21.12
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.13 b/testsuite/expect/test21.13
index 60beda54d..369920c6d 100755
--- a/testsuite/expect/test21.13
+++ b/testsuite/expect/test21.13
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.14 b/testsuite/expect/test21.14
index a5e1aa612..70c1d01a9 100755
--- a/testsuite/expect/test21.14
+++ b/testsuite/expect/test21.14
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.15 b/testsuite/expect/test21.15
index 4bfef04b4..1e199c7a9 100755
--- a/testsuite/expect/test21.15
+++ b/testsuite/expect/test21.15
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.16 b/testsuite/expect/test21.16
index 7501fc174..2521f56c5 100755
--- a/testsuite/expect/test21.16
+++ b/testsuite/expect/test21.16
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.17 b/testsuite/expect/test21.17
index 159bc8683..7430109bf 100755
--- a/testsuite/expect/test21.17
+++ b/testsuite/expect/test21.17
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.18 b/testsuite/expect/test21.18
index 3f808a390..6bd5b886b 100755
--- a/testsuite/expect/test21.18
+++ b/testsuite/expect/test21.18
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.19 b/testsuite/expect/test21.19
index 7bb2a1232..264b10862 100755
--- a/testsuite/expect/test21.19
+++ b/testsuite/expect/test21.19
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.2 b/testsuite/expect/test21.2
index f214c038e..a7f0d02ee 100755
--- a/testsuite/expect/test21.2
+++ b/testsuite/expect/test21.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.20 b/testsuite/expect/test21.20
index bd383bc6a..5b9941404 100755
--- a/testsuite/expect/test21.20
+++ b/testsuite/expect/test21.20
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.21 b/testsuite/expect/test21.21
index f1d534031..b298fefa4 100755
--- a/testsuite/expect/test21.21
+++ b/testsuite/expect/test21.21
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.22 b/testsuite/expect/test21.22
index 1a1e28d71..1d58b4da6 100755
--- a/testsuite/expect/test21.22
+++ b/testsuite/expect/test21.22
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.23 b/testsuite/expect/test21.23
index 9dea1dbfa..c123fe00a 100755
--- a/testsuite/expect/test21.23
+++ b/testsuite/expect/test21.23
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.24 b/testsuite/expect/test21.24
index b33562ddc..bd74fb822 100755
--- a/testsuite/expect/test21.24
+++ b/testsuite/expect/test21.24
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.25 b/testsuite/expect/test21.25
index f0a8fe8dd..a9d4fc364 100755
--- a/testsuite/expect/test21.25
+++ b/testsuite/expect/test21.25
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.26 b/testsuite/expect/test21.26
index 75d00b3d0..1e6a6ff01 100755
--- a/testsuite/expect/test21.26
+++ b/testsuite/expect/test21.26
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.27 b/testsuite/expect/test21.27
index 6ce3a9858..aae3be595 100755
--- a/testsuite/expect/test21.27
+++ b/testsuite/expect/test21.27
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.28 b/testsuite/expect/test21.28
index 2205562ba..7ebf45104 100755
--- a/testsuite/expect/test21.28
+++ b/testsuite/expect/test21.28
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.29 b/testsuite/expect/test21.29
index c91585481..9c6ca4654 100755
--- a/testsuite/expect/test21.29
+++ b/testsuite/expect/test21.29
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.3 b/testsuite/expect/test21.3
index ea89a329b..83763e178 100755
--- a/testsuite/expect/test21.3
+++ b/testsuite/expect/test21.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.4 b/testsuite/expect/test21.4
index cec11a7e8..92defa194 100755
--- a/testsuite/expect/test21.4
+++ b/testsuite/expect/test21.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.5 b/testsuite/expect/test21.5
index 51dac6a59..6d97febe8 100755
--- a/testsuite/expect/test21.5
+++ b/testsuite/expect/test21.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.6 b/testsuite/expect/test21.6
index 2097bcb94..1c9f3d5f0 100755
--- a/testsuite/expect/test21.6
+++ b/testsuite/expect/test21.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.7 b/testsuite/expect/test21.7
index de609fb78..722abec58 100755
--- a/testsuite/expect/test21.7
+++ b/testsuite/expect/test21.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.8 b/testsuite/expect/test21.8
index 12345d0b4..f27abb2c0 100755
--- a/testsuite/expect/test21.8
+++ b/testsuite/expect/test21.8
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test21.9 b/testsuite/expect/test21.9
index b4eb1b9ca..7caf580df 100755
--- a/testsuite/expect/test21.9
+++ b/testsuite/expect/test21.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test22.1 b/testsuite/expect/test22.1
index 1619bf5a8..58d327f8c 100755
--- a/testsuite/expect/test22.1
+++ b/testsuite/expect/test22.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test22.2 b/testsuite/expect/test22.2
index 39df66bdb..a477da3d3 100755
--- a/testsuite/expect/test22.2
+++ b/testsuite/expect/test22.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test23.1 b/testsuite/expect/test23.1
index 822501d20..3c5f7e441 100755
--- a/testsuite/expect/test23.1
+++ b/testsuite/expect/test23.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test23.2 b/testsuite/expect/test23.2
index 3cb187ad3..67bf0a497 100755
--- a/testsuite/expect/test23.2
+++ b/testsuite/expect/test23.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test24.1 b/testsuite/expect/test24.1
index 347cc80a0..f101411a5 100755
--- a/testsuite/expect/test24.1
+++ b/testsuite/expect/test24.1
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test24.1.prog.c b/testsuite/expect/test24.1.prog.c
index 351a4d19c..1779e3976 100644
--- a/testsuite/expect/test24.1.prog.c
+++ b/testsuite/expect/test24.1.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -68,7 +68,7 @@ static void _list_delete_job(void *job_entry)
 	xfree(job_ptr);
 }
 
-int _setup_assoc_list()
+int _setup_assoc_list(void)
 {
 	slurmdb_update_object_t update;
 	slurmdb_association_rec_t *assoc = NULL;
@@ -84,7 +84,7 @@ int _setup_assoc_list()
 	/* we just want make it so we setup_childern so just pretend
 	   we are running off cache */
 	running_cache = 1;
-	assoc_mgr_init(NULL, NULL);
+	assoc_mgr_init(NULL, NULL, SLURM_SUCCESS);
 
 	/* Here we make the associations we want to add to the
 	   system.  We do this as an update to avoid having to do
diff --git a/testsuite/expect/test24.2 b/testsuite/expect/test24.2
index ab3c44d80..ac140d05d 100755
--- a/testsuite/expect/test24.2
+++ b/testsuite/expect/test24.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test25.1 b/testsuite/expect/test25.1
index 12a1f59d4..8dff69182 100755
--- a/testsuite/expect/test25.1
+++ b/testsuite/expect/test25.1
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test26.1 b/testsuite/expect/test26.1
new file mode 100755
index 000000000..2490034ca
--- /dev/null
+++ b/testsuite/expect/test26.1
@@ -0,0 +1,163 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate scontrol update command for nodes is disabled.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id        "26.1"
+set exit_code      0
+set node_list      ""
+set node_name      ""
+
+print_header $test_id
+
+if {![test_cray]} {
+        send_user "\nWARNING: This test is only compatible with Cray systems\n"
+        exit $exit_code
+}
+
+#
+# Identify a node and its state
+#
+spawn $sinfo --noheader -o  "NodeName=%N State=%t "
+expect {
+        -re "NodeName=($alpha_numeric_nodelist) State=idle " {
+                if {[string compare $node_list ""] == 0} {
+                        set node_list $expect_out(1,string)
+                }
+                exp_continue
+        }
+        -re "NodeName=($alpha_numeric_nodelist) State=allocated " {
+                if {[string compare $node_list ""] == 0} {
+                        set node_list $expect_out(1,string)
+                }
+                exp_continue
+        }
+	timeout {
+		send_user "\nFAILURE: sinfo not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {[string compare $node_list ""] == 0} {
+	send_user "\nWARNING: no nodes in usable state for this test\n"
+	exit 0
+}
+
+#
+# Convert node list to a single node name
+#
+log_user 0
+spawn $scontrol show hostnames $node_list
+expect {
+        -re "($alpha_numeric_nodelist)" {
+		set node_name $expect_out(1,string)
+        }
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+if {[string compare $node_name ""] == 0} {
+	send_user "\nWARNING: no nodes in usable state for this test\n"
+	exit 0
+}
+
+#
+# Change that node's state
+#
+set mod_error 0
+spawn $scontrol update NodeName=$node_name State=DRAIN Reason=TESTING
+expect {
+	-re "can not be changed through SLURM" {
+		set mod_error 1
+		exp_continue
+	}
+	-re "slurm_update error: ($alpha_numeric_under) ($alpha_numeric_under)" {
+		set access_err 0
+		set err_msg1 $expect_out(1,string)
+		set err_msg2 $expect_out(2,string)
+		if {[string compare $err_msg1 "Invalid"] == 0} {
+			set access_err 1
+		}
+		if {[string compare $err_msg2 "user"] == 0} {
+			set access_err 1
+		}
+		if {$access_err == 1} {
+			send_user "\nWARNING: user not authorized\n"
+			exit $exit_code
+		} else {
+			set mod_error 1
+		}
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$mod_error == 1} {
+	send_user "\nThis error was expected, no worries\n"
+} else {
+	send_user "\nFAILURE: scontrol did not get an error trying to change node state\n"
+	set exit_code 1
+}
+
+#
+# Validate node's state
+#
+spawn $scontrol show node $node_name
+expect {
+        -re "($alpha_numeric_nodelist)" {
+		set node_name $expect_out(1,string)
+        }
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test26.2 b/testsuite/expect/test26.2
new file mode 100755
index 000000000..28bc81cd0
--- /dev/null
+++ b/testsuite/expect/test26.2
@@ -0,0 +1,99 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of srun/aprun wrapper use of --alps= option
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "26.2"
+set exit_code   0
+set file_in     "test$test_id.input"
+set sleep_time  120
+
+# NOTE: If you increase sleep_time, change job time limits as well
+set sleep_time  180
+
+print_header $test_id
+
+if {![test_cray]} {
+        send_user "\nWARNING: This test is only compatible with Cray systems\n"
+        exit $exit_code
+}
+
+set is_aprun_wrapper 0
+log_user 0
+spawn $srun --help
+expect {
+	-re "--alps" {
+		set is_aprun_wrapper 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+if {$is_aprun_wrapper == 0} {
+	send_user "\nWARNING: srun/aprun wrapper not installed\n"
+	exit 0
+}
+
+#
+# Try to impose a job step time limit via ALPS using 
+# srun wrapper's --alps option
+#
+make bash_script $file_in "$bin_sleep $sleep_time; echo FINI"
+set timeout [expr $max_job_delay + $sleep_time]
+spawn $salloc -t4 $srun --alps=\"-t 1\" $file_in
+expect {
+	-re "FINI" {
+		send_user "\nFAILURE: srun --alps option ignored\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+		incr completions
+	}
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_in
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.1 b/testsuite/expect/test3.1
index 32cf90efc..d31e01938 100755
--- a/testsuite/expect/test3.1
+++ b/testsuite/expect/test3.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.10 b/testsuite/expect/test3.10
index 82df46964..e3a443c9a 100755
--- a/testsuite/expect/test3.10
+++ b/testsuite/expect/test3.10
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.11 b/testsuite/expect/test3.11
index a51336dcc..b5cdadc00 100755
--- a/testsuite/expect/test3.11
+++ b/testsuite/expect/test3.11
@@ -14,7 +14,7 @@
 #
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.12 b/testsuite/expect/test3.12
new file mode 100755
index 000000000..8cafc0426
--- /dev/null
+++ b/testsuite/expect/test3.12
@@ -0,0 +1,195 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate scontrol update command for front end nodes.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2011 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id        "3.12"
+set authorized     1
+set exit_code      0
+set node_list      ""
+set node_name      ""
+set node_old_state ""
+set node_new_state ""
+set read_state     ""
+
+print_header $test_id
+
+set matches     0
+set node_name   ""
+spawn $scontrol show FrontendName
+expect {
+	-re "FrontendName=($alpha_numeric_under) State=ALLOCATED " {
+		set node_name $expect_out(1,string)
+		incr matches
+		exp_continue
+	}
+	-re "FrontendName=($alpha_numeric_under) State=IDLE " {
+		set node_name $expect_out(1,string)
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches < 1} {
+	send_user "\nWARNING: no suitable front end nodes found\n"
+	exit $exit_code
+}
+
+
+#
+# Change that node's state
+#
+spawn $scontrol update FrontendName=$node_name State=DRAIN Reason=TESTING
+expect {
+	-re "slurm_update error: ($alpha_numeric_under) ($alpha_numeric_under)" {
+		set access_err 0
+		set err_msg1 $expect_out(1,string)
+		set err_msg2 $expect_out(2,string)
+		if {[string compare $err_msg1 "Invalid"] == 0} {
+			set access_err 1
+		}
+		if {[string compare $err_msg2 "user"] == 0} {
+			set access_err 1
+		}
+		if {$access_err == 1} {
+			send_user "\nWARNING: user not authorized\n"
+			exit $exit_code
+		} else {
+			set authorized 0
+		}
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Validate node's new state
+#
+set read_state  0
+set reason_code ""
+spawn $scontrol show FrontendName $node_name
+expect {
+	-re "State=($alpha_cap).DRAIN" {
+		set read_state 1
+		exp_continue
+	}
+	-re "Reason=($alpha_cap)" {
+		set reason_code $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$authorized == 1} {
+	if {$read_state != 1} {
+		send_user "\nFAILURE: scontrol state change error\n"
+		set exit_code 1
+	}
+
+	set reason_set 0
+	if {[string compare $reason_code "TESTING"] == 0} {
+		set reason_set 1
+	}
+
+	if {$reason_set != 1} {
+		send_user "\nFAILURE: scontrol reason change error\n"
+		set exit_code 1
+	}
+}
+
+#
+# Return that front end node's state to its old value
+#
+spawn $scontrol update FrontendName=$node_name State=RESUME
+expect {
+	-re "slurm_update error: Invalid user id" {
+		exp_continue
+	}
+	-re "slurm_update error:" {
+		send_user "\nFAILURE: scontrol update error\n"
+		set exit_code 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Record that front end node's state
+#
+set read_state 0
+spawn  $scontrol show FrontendName $node_name
+expect {
+	-re "State=($alpha_cap).DRAIN" {
+		set read_state 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$read_state != 0} {
+	send_user "\nFAILURE: scontrol state change error\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.2 b/testsuite/expect/test3.2
index fc0e3bd99..efa7d3daa 100755
--- a/testsuite/expect/test3.2
+++ b/testsuite/expect/test3.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.3 b/testsuite/expect/test3.3
index f9bc7ec50..65801ff82 100755
--- a/testsuite/expect/test3.3
+++ b/testsuite/expect/test3.3
@@ -7,13 +7,13 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -35,34 +35,65 @@ source ./globals
 set test_id        "3.3"
 set authorized     1
 set exit_code      0
+set node_list      ""
 set node_name      ""
 set read_state     ""
 
 print_header $test_id
 
+if {[test_cray]} {
+        send_user "\nWARNING: This test is incompatible with Cray systems\n"
+        exit $exit_code
+}
+
 #
 # Identify a node and its state
 #
-spawn $scontrol show node
+spawn $sinfo --noheader -o  "NodeName=%N State=%t "
 expect {
-        -re "NodeName=($alpha_numeric_under) " {
-                if {[string compare $node_name ""] == 0} {
-                        set node_name $expect_out(1,string)
+        -re "NodeName=($alpha_numeric_nodelist) State=idle " {
+                if {[string compare $node_list ""] == 0} {
+                        set node_list $expect_out(1,string)
                 }
                 exp_continue
         }
- 	timeout {
-		send_user "\nFAILURE: scontrol not responding\n"
+        -re "NodeName=($alpha_numeric_nodelist) State=allocated " {
+                if {[string compare $node_list ""] == 0} {
+                        set node_list $expect_out(1,string)
+                }
+                exp_continue
+        }
+	timeout {
+		send_user "\nFAILURE: sinfo not responding\n"
 		set exit_code 1
 	}
 	eof {
 		wait
 	}
 }
+if {[string compare $node_list ""] == 0} {
+	send_user "\nWARNING: no nodes in usable state for this test\n"
+	exit 0
+}
 
 #
-# Validate current node information
+# Convert node list to a single node name
 #
+log_user 0
+spawn $scontrol show hostnames $node_list
+expect {
+        -re "($alpha_numeric_nodelist)" {
+		set node_name $expect_out(1,string)
+        }
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
 if {[string compare $node_name ""] == 0} {
 	send_user "\nWARNING: no nodes in usable state for this test\n"
 	exit 0
diff --git a/testsuite/expect/test3.4 b/testsuite/expect/test3.4
index 55c9b7de7..e2d69508c 100755
--- a/testsuite/expect/test3.4
+++ b/testsuite/expect/test3.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -39,7 +39,7 @@ set exit_code     0
 set file_in       "test$test_id.input"
 set job_id        0
 set new_prio      1
-set read_priority 0
+set read_priority -1
 
 print_header $test_id
 
@@ -77,7 +77,10 @@ if {$job_id == 0} {
 #
 spawn  $scontrol show job $job_id
 expect {
-
+	-re "Priority=($number)" {
+		set read_priority $expect_out(1,string)
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
@@ -86,6 +89,10 @@ expect {
 		wait
 	}
 }
+if {$read_priority != 0} {
+	send_user "\nFAILURE: hold priority error\n"
+	set exit_code 1
+}
 
 #
 # Change that job's priority
@@ -124,7 +131,7 @@ expect {
 		wait
 	}
 }
-if {$new_prio != $read_priority} {
+if {$read_priority == 0} {
 	send_user "\nFAILURE: scontrol priority change error\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test3.5 b/testsuite/expect/test3.5
index c2a09d8ac..7621e5b72 100755
--- a/testsuite/expect/test3.5
+++ b/testsuite/expect/test3.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.6 b/testsuite/expect/test3.6
index 8edaa78ef..124a3f00c 100755
--- a/testsuite/expect/test3.6
+++ b/testsuite/expect/test3.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.7 b/testsuite/expect/test3.7
index bc452145c..f96628eb4 100755
--- a/testsuite/expect/test3.7
+++ b/testsuite/expect/test3.7
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.7.prog.c b/testsuite/expect/test3.7.prog.c
index 599b7eb5a..683f81181 100644
--- a/testsuite/expect/test3.7.prog.c
+++ b/testsuite/expect/test3.7.prog.c
@@ -10,7 +10,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.8 b/testsuite/expect/test3.8
index 82c0a0d9e..51977ac4d 100755
--- a/testsuite/expect/test3.8
+++ b/testsuite/expect/test3.8
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test3.9 b/testsuite/expect/test3.9
index 76a161f20..10607bdb2 100755
--- a/testsuite/expect/test3.9
+++ b/testsuite/expect/test3.9
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -37,6 +37,11 @@ set exit_code   0
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Issue scontrol reconfigure
 #
diff --git a/testsuite/expect/test4.1 b/testsuite/expect/test4.1
index e1cfed362..fb5f4c632 100755
--- a/testsuite/expect/test4.1
+++ b/testsuite/expect/test4.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.10 b/testsuite/expect/test4.10
index 360f3729d..3cf86cb57 100755
--- a/testsuite/expect/test4.10
+++ b/testsuite/expect/test4.10
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.11 b/testsuite/expect/test4.11
index bb5b76cad..60f6dbcbe 100755
--- a/testsuite/expect/test4.11
+++ b/testsuite/expect/test4.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.12 b/testsuite/expect/test4.12
index 9157c41a2..c6d832d0e 100755
--- a/testsuite/expect/test4.12
+++ b/testsuite/expect/test4.12
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -223,7 +223,7 @@ proc allocate_and_quit { node proc_cnt total_procs } {
 	set timeout 60
 	set idle_cpus [expr $total_procs - $proc_cnt]
 
-	set mypid [spawn $salloc -w $node -N1 -n $proc_cnt bash]
+	set mypid [spawn $salloc -w $node -N 1 -t 5 -n $proc_cnt bash]
 	expect {
 		-re "Granted job allocation ($number)" {
 			set job_id $expect_out(1,string)
@@ -287,10 +287,6 @@ set def_hostlist ""
 set part_exclusive 0
 spawn $scontrol show part $def_part
 expect {
-	-re " Shared=EXCLUSIVE" {
-		set part_exclusive 1
-		exp_continue
-	}
 	-re " Nodes=($alpha_numeric_nodelist)"  {
 		set def_hostlist $expect_out(1,string)
 		exp_continue
@@ -299,6 +295,10 @@ expect {
 		set def_hostlist $expect_out(1,string)
 		exp_continue
 	}
+	-re " Shared=EXCLUSIVE" {
+		set part_exclusive 1
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
@@ -382,7 +382,17 @@ if {![string compare $select_type "bluegene"]} {
 		exit 1
 	}
 
-	if {![string compare $type "P"]} {
+	if {![string compare $type "Q"]} {
+		if {$psets >= 32} {
+			set smallest 16
+		} elseif {$psets >= 16} {
+			set smallest 32
+		} elseif {$psets >= 8} {
+			set smallest 64
+		} else {
+			set smallest 128
+		}
+	} elseif {![string compare $type "P"]} {
 		if {$psets >= 32} {
 			set smallest 16
 		} elseif {$psets >= 16} {
@@ -404,8 +414,10 @@ if {![string compare $select_type "bluegene"]} {
 	}
 	set node_scaling [get_bluegene_procs_per_cnode]
 	set smallest [expr $smallest * $node_scaling]
-} elseif {![string compare $select_type "linear"]} {
+} elseif {![string compare $select_type "cray"]} {
 	set smallest $inode_procs
+} elseif {![string compare $select_type "linear"]} {
+	 set smallest $inode_procs
 } else {
 	set select_params [test_select_type_params]
 	if {$part_exclusive == 1} {
diff --git a/testsuite/expect/test4.2 b/testsuite/expect/test4.2
index acb8929bf..b33ef82c3 100755
--- a/testsuite/expect/test4.2
+++ b/testsuite/expect/test4.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.3 b/testsuite/expect/test4.3
index 32c68dcac..b569388d7 100755
--- a/testsuite/expect/test4.3
+++ b/testsuite/expect/test4.3
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.4 b/testsuite/expect/test4.4
index c0fdc0818..ad52d3c7e 100755
--- a/testsuite/expect/test4.4
+++ b/testsuite/expect/test4.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.5 b/testsuite/expect/test4.5
index f4f7a6059..5e2a3723f 100755
--- a/testsuite/expect/test4.5
+++ b/testsuite/expect/test4.5
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -49,7 +49,7 @@ print_header $test_id
 
 spawn $sinfo --Node --long --exact
 expect {
-	-re "($end_of_line)($name_string) *($number) *($name_string) *($alpha_cap)" {
+	-re "($end_of_line)($name_string) *($number_with_suffix) *($name_string) *($alpha)" {
 		if (![string compare $node_name ""]) {
 			set node_name $expect_out(2,string)
 		}
@@ -77,12 +77,16 @@ expect {
 
 spawn $sinfo --Node --long --exact --state=$node_state
 expect {
-	-re "($end_of_line)($name_string) *($number) *($name_string) *($alpha_cap)" {
+	-re "($end_of_line)($name_string) *($number_with_suffix) *($name_string) *($alpha)" {
 		if ([string compare $expect_out(5,string) $node_state]) {
 			incr mismatches
 		}
 		exp_continue
 	}
+	-re "error:" {
+		send_user "\nFAILURE: Unexpected error from sinfo\n"
+		set exit_code 1
+	}
 	-re "Unable to contact" {
 		send_user "\nFAILURE: slurm appears to be down\n"
 		exit 1
@@ -102,12 +106,16 @@ expect {
 
 spawn $sinfo --Node --long --exact --nodes=$node_name
 expect {
-	-re "($end_of_line)($name_string) *($number) *($name_string) *($alpha_cap)" {
+	-re "($end_of_line)($name_string) *($number_with_suffix) *($name_string) *($alpha)" {
 		if ([string compare $expect_out(2,string) $node_name]) {
 			incr mismatches
 		}
 		exp_continue
 	}
+	-re "error:" {
+		send_user "\nFAILURE: Unexpected error from sinfo\n"
+		set exit_code 1
+	}
 	-re "Unable to contact" {
 		send_user "\nFAILURE: slurm appears to be down\n"
 		exit 1
diff --git a/testsuite/expect/test4.6 b/testsuite/expect/test4.6
index 11005f8c6..ba64dd55e 100755
--- a/testsuite/expect/test4.6
+++ b/testsuite/expect/test4.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.7 b/testsuite/expect/test4.7
index ea18d8279..51a807f81 100755
--- a/testsuite/expect/test4.7
+++ b/testsuite/expect/test4.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.8 b/testsuite/expect/test4.8
index b49dc0078..1c778e8f8 100755
--- a/testsuite/expect/test4.8
+++ b/testsuite/expect/test4.8
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test4.9 b/testsuite/expect/test4.9
index 56f741238..d291c04bb 100755
--- a/testsuite/expect/test4.9
+++ b/testsuite/expect/test4.9
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.1 b/testsuite/expect/test5.1
index 4848ef435..3d5cec8ec 100755
--- a/testsuite/expect/test5.1
+++ b/testsuite/expect/test5.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.2 b/testsuite/expect/test5.2
index 27b217b17..b34d42bfb 100755
--- a/testsuite/expect/test5.2
+++ b/testsuite/expect/test5.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.3 b/testsuite/expect/test5.3
index b0a196d9c..a69349fd8 100755
--- a/testsuite/expect/test5.3
+++ b/testsuite/expect/test5.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.4 b/testsuite/expect/test5.4
index c502c09e1..c1b5b6f3c 100755
--- a/testsuite/expect/test5.4
+++ b/testsuite/expect/test5.4
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -229,7 +229,7 @@ expect {
 	}
 }
 
-if {$step_found == 0} {
+if {[test_cray] == 0 && $step_found == 0} {
 	send_user "\nFAILURE: squeue step format error\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test5.5 b/testsuite/expect/test5.5
index 1834d68a8..b46716270 100755
--- a/testsuite/expect/test5.5
+++ b/testsuite/expect/test5.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.6 b/testsuite/expect/test5.6
index 4b1d1244a..495acad17 100755
--- a/testsuite/expect/test5.6
+++ b/testsuite/expect/test5.6
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -336,7 +336,7 @@ expect {
 		wait
 	}
 }
-if {$job_found == 0} {
+if {[test_cray] == 0 && $job_found == 0} {
 	send_user "\nFAILURE: squeue failed to locate desired job step\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test5.7 b/testsuite/expect/test5.7
index 159c6970e..bb10c261b 100755
--- a/testsuite/expect/test5.7
+++ b/testsuite/expect/test5.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test5.8 b/testsuite/expect/test5.8
index 31208e56d..9c1c83f27 100755
--- a/testsuite/expect/test5.8
+++ b/testsuite/expect/test5.8
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.1 b/testsuite/expect/test6.1
index 2034917c1..1014f2455 100755
--- a/testsuite/expect/test6.1
+++ b/testsuite/expect/test6.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.10 b/testsuite/expect/test6.10
index e12a3bf66..8949c986d 100755
--- a/testsuite/expect/test6.10
+++ b/testsuite/expect/test6.10
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.11 b/testsuite/expect/test6.11
index dd95c4b9e..b1ae1b056 100755
--- a/testsuite/expect/test6.11
+++ b/testsuite/expect/test6.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.12 b/testsuite/expect/test6.12
index 1cfec5351..78e0f1519 100755
--- a/testsuite/expect/test6.12
+++ b/testsuite/expect/test6.12
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -50,6 +50,11 @@ if { [test_xcpu] } {
 	exit 0
 }
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # Delete left-over input script plus stdout/err files
 #
diff --git a/testsuite/expect/test6.13 b/testsuite/expect/test6.13
index eb7ceb41d..fab0af477 100755
--- a/testsuite/expect/test6.13
+++ b/testsuite/expect/test6.13
@@ -14,7 +14,7 @@
 # UCRL-CODE-217948.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.14 b/testsuite/expect/test6.14
index ed574a895..1fdedaaf3 100755
--- a/testsuite/expect/test6.14
+++ b/testsuite/expect/test6.14
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.2 b/testsuite/expect/test6.2
index 67666ffe5..432c25d33 100755
--- a/testsuite/expect/test6.2
+++ b/testsuite/expect/test6.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.3 b/testsuite/expect/test6.3
index 4b0b717ab..497dde7c2 100755
--- a/testsuite/expect/test6.3
+++ b/testsuite/expect/test6.3
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.4 b/testsuite/expect/test6.4
index c5c4650da..4bf5df314 100755
--- a/testsuite/expect/test6.4
+++ b/testsuite/expect/test6.4
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.5 b/testsuite/expect/test6.5
index 9d78850f9..d5c148023 100755
--- a/testsuite/expect/test6.5
+++ b/testsuite/expect/test6.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.6 b/testsuite/expect/test6.6
index d5cef1be3..2fbfc550a 100755
--- a/testsuite/expect/test6.6
+++ b/testsuite/expect/test6.6
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.7 b/testsuite/expect/test6.7
index e3298c1c7..48d9d9a3c 100755
--- a/testsuite/expect/test6.7
+++ b/testsuite/expect/test6.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -46,7 +46,7 @@ print_header $test_id
 # of the ulimit program is inconsistent across systems.
 #
 exec rm -f $file_prog $file_in $file_out
-exec cc -o $file_prog $file_prog.c
+exec $bin_cc -o $file_prog $file_prog.c
 make_bash_script $file_in "
  $srun ./$file_prog
 "
diff --git a/testsuite/expect/test6.8 b/testsuite/expect/test6.8
index 6ccdfc748..5624818e3 100755
--- a/testsuite/expect/test6.8
+++ b/testsuite/expect/test6.8
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test6.9 b/testsuite/expect/test6.9
index 450711e93..f892c92e3 100755
--- a/testsuite/expect/test6.9
+++ b/testsuite/expect/test6.9
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.1 b/testsuite/expect/test7.1
index adb880c02..13fb11d4e 100755
--- a/testsuite/expect/test7.1
+++ b/testsuite/expect/test7.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.10 b/testsuite/expect/test7.10
index ba730eb2d..038a5ee8c 100755
--- a/testsuite/expect/test7.10
+++ b/testsuite/expect/test7.10
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.11 b/testsuite/expect/test7.11
index 80aad26dc..331a714c3 100755
--- a/testsuite/expect/test7.11
+++ b/testsuite/expect/test7.11
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -48,13 +48,13 @@ if {[test_super_user] == 0} {
 	send_user "\nWARNING: This test must be run as SlurmUser\n"
 	exit 0
 }
-if {[test_aix] == 1} {
+if {[test_aix]} {
 	send_user "WARNING: Test is incompatible with AIX\n"
 	exit 0
 }
-if {[test_bluegene] == 1} {
-	send_user "WARNING: Test is incompatible with Blue Gene\n"
-	exit 0
+if {[test_front_end]} {
+        send_user "\nWARNING: This test is incompatible with front-end systems\n"
+        exit $exit_code
 }
 
 #
diff --git a/testsuite/expect/test7.11.prog.c b/testsuite/expect/test7.11.prog.c
index 27d01e583..721777c38 100644
--- a/testsuite/expect/test7.11.prog.c
+++ b/testsuite/expect/test7.11.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.12 b/testsuite/expect/test7.12
index 41eb35f47..9ba6d7203 100755
--- a/testsuite/expect/test7.12
+++ b/testsuite/expect/test7.12
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -138,6 +138,7 @@ if {$matches < 1} {
 } elseif {$matches != 2} {
 	send_user "\nWARNING: Failed to load PIDs associated with job step.\n"
 	send_user "         This is dependent upon the ProctrackType configured.\n"
+	send_user "         proctrack/pgid does NOT support this functionality.\n"
 }
 
 cancel_job $job_id
diff --git a/testsuite/expect/test7.12.prog.c b/testsuite/expect/test7.12.prog.c
index 578b8b2e7..00b800a77 100644
--- a/testsuite/expect/test7.12.prog.c
+++ b/testsuite/expect/test7.12.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.13 b/testsuite/expect/test7.13
index 7e0480f16..d59c1329b 100755
--- a/testsuite/expect/test7.13
+++ b/testsuite/expect/test7.13
@@ -12,7 +12,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -122,7 +122,7 @@ if {$matches != 2} {
 #
 if { [test_account_storage] == 1 } {
 	set matches 0
-	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,DerivedExitStr
+	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,Comment
 	expect {
 		-re "0:0\\|123:0\\|" {	# Job record
 			incr matches
@@ -228,7 +228,7 @@ if {$matches != 2} {
 #
 if { [test_account_storage] == 1 } {
 	set matches 0
-	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,DerivedExitStr
+	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,Comment
 	expect {
 		-re "33:0\\|0:0\\|" {	# Job record
 			incr matches
diff --git a/testsuite/expect/test7.14 b/testsuite/expect/test7.14
index 3539822e3..401adada7 100755
--- a/testsuite/expect/test7.14
+++ b/testsuite/expect/test7.14
@@ -1,7 +1,7 @@
 #!/usr/bin/expect
 ############################################################################
-# Purpose: Verify the ability to modify the Derived Exit Code/String fields
-#          of a job record in the database.
+# Purpose: Verify the ability to modify the Derived Exit Code and Comment
+#          fields of a job record in the database.
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -125,9 +125,11 @@ if {$matches != 2} {
 
 #
 # Modify the DerivedExitCode and String of the job
+# (after waiting for the job to get written from slurmctld daemon to slurmdbd)
 #
+sleep 5
 set matches 0
-spawn $sacctmgr -i modify job job=$job_id set DerivedExitCode=22 DerivedExitString=hello
+spawn $sacctmgr -i modify job job=$job_id set DerivedExitCode=22 Comment=hello
 expect {
 	-re "$job_id" {
 		incr matches
@@ -143,7 +145,7 @@ expect {
 }
 
 if {$matches != 1} {
-	send_user "\nFAILURE: sacctmgr failed to change DerivedExitCode/String\n"
+	send_user "\nFAILURE: sacctmgr failed to change DerivedExitCode/Comment\n"
 	exit 1
 }
 
@@ -152,7 +154,7 @@ if {$matches != 1} {
 # matches the above modification and that ExitCode did not change.
 #
 set matches 0
-spawn $sacct -n -P -X -j $job_id -o ExitCode,DerivedExitCode,DerivedExitStr
+spawn $sacct -n -P -X -j $job_id -o ExitCode,DerivedExitCode,Comment
 expect {
 	-re "0:0\\|0:22\\|hello" {	# Job record
 		incr matches
diff --git a/testsuite/expect/test7.2 b/testsuite/expect/test7.2
index b808ce8ab..49c5e69f0 100755
--- a/testsuite/expect/test7.2
+++ b/testsuite/expect/test7.2
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -70,6 +70,12 @@ expect {
 		wait
 	}
 }
+
+if {[test_cray]} {
+	send_user "\nWARNING: Additional portions of this test are incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 if { $exit_code == 0 } {
 	send_user "\n\nSo far, so good\nNow run PMI test under srun\n"
 } else {
@@ -81,8 +87,13 @@ if { $exit_code == 0 } {
 #
 set timeout $max_job_delay
 if { [test_bluegene] } {
-	set node_cnt 1-1024
-	set task_cnt 8
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-1024
+		set task_cnt 8
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
diff --git a/testsuite/expect/test7.2.prog.c b/testsuite/expect/test7.2.prog.c
index e77f71958..b2afd8a20 100644
--- a/testsuite/expect/test7.2.prog.c
+++ b/testsuite/expect/test7.2.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.3 b/testsuite/expect/test7.3
index 2ee89a6c4..b341be5b1 100755
--- a/testsuite/expect/test7.3
+++ b/testsuite/expect/test7.3
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.3.io.c b/testsuite/expect/test7.3.io.c
index 831e32372..0492aa90f 100644
--- a/testsuite/expect/test7.3.io.c
+++ b/testsuite/expect/test7.3.io.c
@@ -12,7 +12,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.3.prog.c b/testsuite/expect/test7.3.prog.c
index b47e98b78..1cdd992c6 100644
--- a/testsuite/expect/test7.3.prog.c
+++ b/testsuite/expect/test7.3.prog.c
@@ -11,7 +11,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.4 b/testsuite/expect/test7.4
index 38d40b022..b6957e37b 100755
--- a/testsuite/expect/test7.4
+++ b/testsuite/expect/test7.4
@@ -9,13 +9,14 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2004-2006 The Regents of the University of California.
+# Copyright (C) 2004-2007 The Regents of the University of California.
+# Copyright (C) 2008-2011 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov>
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -103,6 +104,11 @@ send_user "=======================  Run without bulk transfer ==================
 send_user "======================================================================\n"
 spawn $totalviewcli -verbosity info -e $no_bulk $srun -a --input=none -n4 -N1-2 -O -t1 $test_prog
 expect {
+	-re "All licenses are currently in use" {
+		send_user "\nWARNING: We can not run this test now\n"
+		incr no_capability
+		exp_continue
+	}
 	-re "d1.<>" {
 		if {$matches == 0} {
 			incr matches
@@ -185,6 +191,11 @@ send_user "=====================  Run with bulk transfer =======================
 send_user "======================================================================\n"
 spawn $totalviewcli -verbosity info -e $bulk $srun -a --input=none -n4 -N1-2 -O -t1 $test_prog
 expect {
+	-re "All licenses are currently in use" {
+		send_user "\nWARNING: We can not run this test now\n"
+		incr no_capability
+		exp_continue
+	}
 	-re "d1.<>" {
 		if {$matches == 0} {
 			incr matches
diff --git a/testsuite/expect/test7.4.prog.c b/testsuite/expect/test7.4.prog.c
index 16fd9075b..a5cc2c7ce 100644
--- a/testsuite/expect/test7.4.prog.c
+++ b/testsuite/expect/test7.4.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.5 b/testsuite/expect/test7.5
index 7abd86633..e3fd35950 100755
--- a/testsuite/expect/test7.5
+++ b/testsuite/expect/test7.5
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -49,7 +49,10 @@ if { [test_xcpu] } {
 	send_user "\nWARNING: This test is incompatible with XCPU systems\n"
 	exit $exit_code
 }
-
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
 
 #
 # Delete left-over program and rebuild it
diff --git a/testsuite/expect/test7.5.prog.c b/testsuite/expect/test7.5.prog.c
index e59046c31..80f4e5710 100644
--- a/testsuite/expect/test7.5.prog.c
+++ b/testsuite/expect/test7.5.prog.c
@@ -9,7 +9,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.6 b/testsuite/expect/test7.6
index bedf0e07d..0c095d611 100755
--- a/testsuite/expect/test7.6
+++ b/testsuite/expect/test7.6
@@ -8,13 +8,14 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2004-2006 The Regents of the University of California.
+# Copyright (C) 2004-2007 The Regents of the University of California.
+# Copyright (C) 2008-2011 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov>
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -133,6 +134,11 @@ send_user "=======================  Run without bulk transfer ==================
 send_user "======================================================================\n"
 spawn $totalviewcli -verbosity info -e $no_bulk $srun -a --input=none -n4 -N1-2 -O -t1 $test_prog
 expect {
+	-re "All licenses are currently in use" {
+		send_user "\nWARNING: We can not run this test now\n"
+		incr no_capability
+		exp_continue
+	}
 	-re "d1.<>" {
 		if {$matches == 0} {
 			incr matches
diff --git a/testsuite/expect/test7.6.prog.c b/testsuite/expect/test7.6.prog.c
index 43018f804..878dc9ef2 100644
--- a/testsuite/expect/test7.6.prog.c
+++ b/testsuite/expect/test7.6.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.7 b/testsuite/expect/test7.7
index a045de3c1..ca0637520 100755
--- a/testsuite/expect/test7.7
+++ b/testsuite/expect/test7.7
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.7.prog.c b/testsuite/expect/test7.7.prog.c
index 3132e9f8c..0a2bb55b3 100644
--- a/testsuite/expect/test7.7.prog.c
+++ b/testsuite/expect/test7.7.prog.c
@@ -8,7 +8,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.8 b/testsuite/expect/test7.8
index 15450b6b7..f5b923886 100755
--- a/testsuite/expect/test7.8
+++ b/testsuite/expect/test7.8
@@ -15,7 +15,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.8.prog.c b/testsuite/expect/test7.8.prog.c
index f5cccdb82..173485eb3 100644
--- a/testsuite/expect/test7.8.prog.c
+++ b/testsuite/expect/test7.8.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.9 b/testsuite/expect/test7.9
index 92108243a..b497260bd 100755
--- a/testsuite/expect/test7.9
+++ b/testsuite/expect/test7.9
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test7.9.prog.c b/testsuite/expect/test7.9.prog.c
index 3809f4dd4..3bf90eab0 100644
--- a/testsuite/expect/test7.9.prog.c
+++ b/testsuite/expect/test7.9.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.1 b/testsuite/expect/test8.1
index d1135fde6..54a4e053e 100755
--- a/testsuite/expect/test8.1
+++ b/testsuite/expect/test8.1
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -38,7 +38,6 @@ set file_in     "test$test_id.input"
 set job_id      0
 set connection  "TORUS"
 set conn_letter  "Tt"
-set geometry    "1x1x1"
 set num_nodes   512-512
 set cycle_count 2
 
@@ -49,6 +48,18 @@ if {[test_bluegene] == 0} {
 	exit $exit_code
 }
 
+set type [get_bluegene_type]
+if {$type == 0} {
+	send_user "\nFAILURE: No bluegene type found \n"
+	exit 1
+}
+
+if {![string compare $type "Q"]} {
+	set geometry    "1x1x1x1"
+} else {
+	set geometry    "1x1x1"
+}
+
 #
 # Delete left-over input script files
 # Build input script file
@@ -64,7 +75,7 @@ exec $bin_chmod 700 $file_in
 set timeout $max_job_delay
 
 for {set inx 0} {$inx < $cycle_count} {incr inx} {
-	set sbatch_pid [spawn $sbatch -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --output=/dev/null --error=/dev/null $file_in]
+	set sbatch_pid [spawn $sbatch -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --output=/dev/null --error=/dev/null -t 4 $file_in]
 	expect {
 		-re "Submitted batch job ($number)" {
 			set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test8.2 b/testsuite/expect/test8.2
index 0fc139ac3..3207fd343 100755
--- a/testsuite/expect/test8.2
+++ b/testsuite/expect/test8.2
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -45,13 +45,23 @@ if {[test_bluegene] == 0} {
 	 exit $exit_code
 }
 
+set type [get_bluegene_type]
+if {$type == 0} {
+	send_user "\nFAILURE: No bluegene type found \n"
+	exit 1
+}
+
 #
 # Set target environment variables
 #
 global env
 set env(SBATCH_CONN_TYPE)    torus
-set env(SBATCH_GEOMETRY)     1x1x1
 set env(SBATCH_NO_ROTATE)    1
+if {![string compare $type "Q"]} {
+	set env(SBATCH_GEOMETRY)     1x1x1x1
+} else {
+	set env(SBATCH_GEOMETRY)     1x1x1
+}
 
 #
 # Delete left-over input script files
diff --git a/testsuite/expect/test8.20 b/testsuite/expect/test8.20
new file mode 100755
index 000000000..28a243ee5
--- /dev/null
+++ b/testsuite/expect/test8.20
@@ -0,0 +1,258 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Bluegene/Q only: Test that job step allocations are a valid size
+#          and within the job's allocation
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2011 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "8.20"
+set exit_code        0
+set job_id           0
+set job_size         32
+
+print_header $test_id
+
+if {([test_bluegene] == 0) || [string compare [get_bluegene_type] "Q"]} {
+	send_user "\nWARNING: This test is only compatable with Bluegene/Q systems\n"
+	exit $exit_code
+}
+
+#
+# Spawn a job via salloc
+#
+set matches 0
+set timeout $max_job_delay
+set salloc_pid [spawn $salloc -N$job_size -t1 $bin_bash]
+expect {
+	-re "Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re $prompt {
+		#send_user "Job initiated\n"
+	}
+	timeout {
+		send_user "\nFAILURE: salloc not responding\n"
+		if {$job_id != 0} {
+			cancel_job $job_id
+		}
+		slow_kill [expr 0 - $salloc_pid]
+		exit 1
+	}
+}
+
+if {$job_id == 0} {
+	send_user "\nFAILURE: did not get job_id\n"
+	exit 1
+}
+
+#
+# Determine the job's allocation dimensions
+#
+set timeout 5
+set job_start1 -1
+send "$scontrol show job $job_id\r"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set job_prefix $expect_out(1,string)
+		set job_start1 $expect_out(2,string)
+		set job_start2 $expect_out(3,string)
+		set job_start3 $expect_out(4,string)
+		set job_start4 $expect_out(5,string)
+		set job_start5 $expect_out(6,string)
+		set job_fini1 $expect_out(7,string)
+		set job_fini2 $expect_out(8,string)
+		set job_fini3 $expect_out(9,string)
+		set job_fini4 $expect_out(10,string)
+		set job_fini5 $expect_out(11,string)
+		exp_continue
+	}
+	-re $prompt {
+		#break
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+}
+if {$job_start1 == -1} {
+	send_user "\nFAILURE: did not get job dimensions\n"
+	cancel_job $job_id
+	exit 1
+}
+send_user "\nJob allocation\n"
+send_user "prefix: $job_prefix\n"
+send_user "dim 1:  $job_start1 to $job_fini1  "
+send_user "dim 2:  $job_start2 to $job_fini2  "
+send_user "dim 3:  $job_start3 to $job_fini3  "
+send_user "dim 4:  $job_start4 to $job_fini4  "
+send_user "dim 5:  $job_start5 to $job_fini5\n"
+
+set job_dim1 [expr $job_fini1 - $job_start1 + 1]
+set job_dim2 [expr $job_fini2 - $job_start2 + 1]
+set job_dim3 [expr $job_fini3 - $job_start3 + 1]
+set job_dim4 [expr $job_fini4 - $job_start4 + 1]
+set job_dim5 [expr $job_fini5 - $job_start5 + 1]
+set actual_job_size [expr $job_dim1 * $job_dim2 * $job_dim3 * $job_dim4 * $job_dim5]
+send_user "size:   $actual_job_size c-nodes\n"
+if {$actual_job_size < $job_size} {
+	send_user "\nFAILURE: job allocation too small ($actual_job_size < $job_size)\n"
+	cancel_job $job_id
+	exit 1
+}
+if {$actual_job_size != $job_size} {
+#	This is a legitimate condition. A request for 5 c-nodes requires
+#	at least 6 c-nodes (3x2x1x1x1).
+	send_user "\nWARNING: job allocation too large ($actual_job_size != $job_size)\n"
+}
+
+#
+# Create job step allocations and insure they are the right size and
+# fall within the c-nodes allocated to the job
+#
+# NOTE: Change this to increment step size by one once SLURM logic can
+#       automatically increase step size as needed
+#
+set timeout 60
+set step_id 0
+set max_step_size 0
+for {set step_size 1 } {$step_size <= $job_size} {set step_size [expr $step_size + 1]} {
+	send_user "\nRunning step $job_id.$step_id at size $step_size\n"
+	set step_start1 -1
+	send "$srun -N$step_size --test-only $scontrol show step $job_id.$step_id\r"
+	expect {
+		-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+			set step_prefix $expect_out(1,string)
+			set step_start1 $expect_out(2,string)
+			set step_start2 $expect_out(3,string)
+			set step_start3 $expect_out(4,string)
+			set step_start4 $expect_out(5,string)
+			set step_start5 $expect_out(6,string)
+			set step_fini1 $expect_out(7,string)
+			set step_fini2 $expect_out(8,string)
+			set step_fini3 $expect_out(9,string)
+			set step_fini4 $expect_out(10,string)
+			set step_fini5 $expect_out(11,string)
+			exp_continue
+		}
+		-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)" {
+			set step_prefix $expect_out(1,string)
+			set step_start1 $expect_out(2,string)
+			set step_start2 $expect_out(3,string)
+			set step_start3 $expect_out(4,string)
+			set step_start4 $expect_out(5,string)
+			set step_start5 $expect_out(6,string)
+			set step_fini1 $step_start1
+			set step_fini2 $step_start2
+			set step_fini3 $step_start3
+			set step_fini4 $step_start4
+			set step_fini5 $step_start5
+			exp_continue
+		}
+		-re $prompt {
+			#break
+		}
+		timeout {
+			send_user "\nFAILURE: job not responding\n"
+			set exit_code 1
+		}
+	}
+	if {$step_start1 == -1} {
+		send_user "\nFAILURE: did not get step dimensions\n"
+		set exit_code 1
+		break
+	}
+
+	send_user "\nStep allocation\n"
+	send_user "prefix: $step_prefix\n"
+	send_user "dim 1:  $step_start1 to $step_fini1  "
+	send_user "dim 2:  $step_start2 to $step_fini2  "
+	send_user "dim 3:  $step_start3 to $step_fini3  "
+	send_user "dim 4:  $step_start4 to $step_fini4  "
+	send_user "dim 5:  $step_start5 to $step_fini5\n"
+
+	set step_dim1 [expr $step_fini1 - $step_start1 + 1]
+	set step_dim2 [expr $step_fini2 - $step_start2 + 1]
+	set step_dim3 [expr $step_fini3 - $step_start3 + 1]
+	set step_dim4 [expr $step_fini4 - $step_start4 + 1]
+	set step_dim5 [expr $step_fini5 - $step_start5 + 1]
+	set actual_step_size [expr $step_dim1 * $step_dim2 * $step_dim3 * $step_dim4 * $step_dim5]
+	send_user "size:   $actual_step_size c-nodes\n"
+	if {$actual_step_size < $step_size} {
+		send_user "\nFAILURE: step allocation too small ($actual_step_size < $step_size)\n"
+		set exit_code 1
+		break
+	}
+	if {$actual_step_size != $step_size} {
+#		This is a legitimate condition. A request for 5 c-nodes requires
+#		at least 6 c-nodes (3x2x1x1x1). The actual size depends upon
+#		the geometry of the job allocation.
+		send_user "\nWARNING: step allocation too large ($actual_step_size != $step_size)\n"
+	}
+
+	if {$step_size < $max_step_size} {
+		send_user "\nFAILURE: step allocation getting smaller ($step_size < $max_step_sizee)\n"
+		set exit_code 1
+		break
+	}
+	if {$step_size > $max_step_size} {
+		set max_step_size $step_size
+	}
+
+	if {$step_start1 > $job_fini1 || $step_fini1 < $job_start1 ||
+	    $step_start2 > $job_fini2 || $step_fini2 < $job_start2 ||
+	    $step_start3 > $job_fini3 || $step_fini3 < $job_start3 ||
+	    $step_start4 > $job_fini4 || $step_fini4 < $job_start4 ||
+	    $step_start5 > $job_fini5 || $step_fini5 < $job_start5} {
+		send_user "\nFAILURE: step allocation outside of job's allocation\n"
+		set exit_code 1
+		break
+	}
+
+	incr step_id
+}
+
+send "exit\r"
+expect {
+	timeout {
+		send_user "\nFAILURE: job not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+} else {
+	cancel_job $job_id
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test8.21 b/testsuite/expect/test8.21
new file mode 100755
index 000000000..e11af4402
--- /dev/null
+++ b/testsuite/expect/test8.21
@@ -0,0 +1,339 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Bluegene/Q only: Test that multple job step allocations are
+#          properly packed within the job's allocation
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2011 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "8.21"
+set exit_code        0
+set file_prog        "test$test_id.bash"
+set job_id           0
+set job_size         32
+
+
+print_header $test_id
+
+if {([test_bluegene] == 0) || [string compare [get_bluegene_type] "Q"]} {
+	send_user "\nWARNING: This test is only compatable with Bluegene/Q systems\n"
+	exit $exit_code
+}
+
+#
+# Spawn a job via salloc
+#
+set matches 0
+set timeout $max_job_delay
+set salloc_pid [spawn $salloc -N$job_size -t1 $bin_bash]
+expect {
+	-re "Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re $prompt {
+		#send_user "Job initiated\n"
+	}
+	timeout {
+		send_user "\nFAILURE: salloc not responding\n"
+		if {$job_id != 0} {
+			cancel_job $job_id
+		}
+		slow_kill [expr 0 - $salloc_pid]
+		exit 1
+	}
+}
+
+if {$job_id == 0} {
+	send_user "\nFAILURE: did not get job_id\n"
+	exit 1
+}
+
+#
+# Determine the job's allocation dimensions
+#
+set timeout 5
+set job_start1 -1
+send "$scontrol show job $job_id\r"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set job_prefix $expect_out(1,string)
+		set job_start1 $expect_out(2,string)
+		set job_start2 $expect_out(3,string)
+		set job_start3 $expect_out(4,string)
+		set job_start4 $expect_out(5,string)
+		set job_start5 $expect_out(6,string)
+		set job_fini1 $expect_out(7,string)
+		set job_fini2 $expect_out(8,string)
+		set job_fini3 $expect_out(9,string)
+		set job_fini4 $expect_out(10,string)
+		set job_fini5 $expect_out(11,string)
+		exp_continue
+	}
+	-re $prompt {
+		#break
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+}
+if {$job_start1 == -1} {
+	send_user "\nFAILURE: did not get job dimensions\n"
+	cancel_job $job_id
+	exit 1
+}
+send_user "\nJob allocation\n"
+send_user "prefix: $job_prefix\n"
+send_user "dim 1:  $job_start1 to $job_fini1  "
+send_user "dim 2:  $job_start2 to $job_fini2  "
+send_user "dim 3:  $job_start3 to $job_fini3  "
+send_user "dim 4:  $job_start4 to $job_fini4  "
+send_user "dim 5:  $job_start5 to $job_fini5\n"
+
+set job_dim1 [expr $job_fini1 - $job_start1 + 1]
+set job_dim2 [expr $job_fini2 - $job_start2 + 1]
+set job_dim3 [expr $job_fini3 - $job_start3 + 1]
+set job_dim4 [expr $job_fini4 - $job_start4 + 1]
+set job_dim5 [expr $job_fini5 - $job_start5 + 1]
+set actual_job_size [expr $job_dim1 * $job_dim2 * $job_dim3 * $job_dim4 * $job_dim5]
+send_user "size:   $actual_job_size c-nodes\n"
+if {$actual_job_size < $job_size} {
+	send_user "\nFAILURE: job allocation too small ($actual_job_size < $job_size)\n"
+	cancel_job $job_id
+	exit 1
+}
+if {$actual_job_size != $job_size} {
+#	This is a legitimate condition. A request for 5 c-nodes requires
+#	at least 6 c-nodes (3x2x1x1x1).
+	send_user "\nWARNING: job allocation too large ($actual_job_size != $job_size)\n"
+}
+
+#
+# Build an array to count the job's c-nodes which have been allocated to steps
+#
+for {set dim1 $job_start1} {$dim1 <= $job_fini1} {incr dim1} {
+	for {set dim2 $job_start2} {$dim2 <= $job_fini2} {incr dim2} {
+		for {set dim3 $job_start3} {$dim3 <= $job_fini3} {incr dim3} {
+			for {set dim4 $job_start4} {$dim4 <= $job_fini4} {incr dim4} {
+				for {set dim5 $job_start5} {$dim5 <= $job_fini5} {incr dim5} {
+					set use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) 0
+				}
+			}
+		}
+	}
+}
+
+set timeout 60
+send "./$file_prog $srun $squeue $job_id $actual_job_size 1\r"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set step_prefix $expect_out(1,string)
+		set step_start1 $expect_out(2,string)
+		set step_start2 $expect_out(3,string)
+		set step_start3 $expect_out(4,string)
+		set step_start4 $expect_out(5,string)
+		set step_start5 $expect_out(6,string)
+		set step_fini1 $expect_out(7,string)
+		set step_fini2 $expect_out(8,string)
+		set step_fini3 $expect_out(9,string)
+		set step_fini4 $expect_out(10,string)
+		set step_fini5 $expect_out(11,string)
+		for {set dim1 $step_start1} {$dim1 <= $step_fini1} {incr dim1} {
+			for {set dim2 $step_start2} {$dim2 <= $step_fini2} {incr dim2} {
+				for {set dim3 $step_start3} {$dim3 <= $step_fini3} {incr dim3} {
+					for {set dim4 $step_start4} {$dim4 <= $step_fini4} {incr dim4} {
+						for {set dim5 $step_start5} {$dim5 <= $step_fini5} {incr dim5} {
+							if [info exists use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)] {
+								incr use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)
+							} else {
+								send_user "\nFAILURE: invalid step c-node allocation at "
+								send_user "\[$dim1,$dim2,$dim3,$dim4,$dim5\]/"
+								set exit_code 1
+							}
+						}
+					}
+				}
+			}
+		}
+		exp_continue
+	}
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)" {
+		set step_prefix $expect_out(1,string)
+		set dim1 $expect_out(2,string)
+		set dim2 $expect_out(3,string)
+		set dim3 $expect_out(4,string)
+		set dim4 $expect_out(5,string)
+		set dim5 $expect_out(6,string)
+		if [info exists use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)] {
+			incr use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)
+		} else {
+			send_user "\nFAILURE: invalid step c-node allocation at "
+			send_user "\[$dim1,$dim2,$dim3,$dim4,$dim5\]\n"
+			set exit_code 1
+		}
+		exp_continue
+	}
+	-re $prompt {
+		#break
+	}
+	timeout {
+		send_user "\nFAILURE: job not responding\n"
+		set exit_code 1
+	}
+}
+
+#
+# Test that each of the job's c-nodes have been allocated once to some step
+#
+for {set dim1 $job_start1} {$dim1 <= $job_fini1} {incr dim1} {
+	for {set dim2 $job_start2} {$dim2 <= $job_fini2} {incr dim2} {
+		for {set dim3 $job_start3} {$dim3 <= $job_fini3} {incr dim3} {
+			for {set dim4 $job_start4} {$dim4 <= $job_fini4} {incr dim4} {
+				for {set dim5 $job_start5} {$dim5 <= $job_fini5} {incr dim5} {
+					if {$use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) != 1} {
+						send_user "\nFAILURE: c-node at \[$dim1,$dim2,$dim3,$dim4,$dim5\] "
+						send_user "allocated $use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) times\n"
+						set exit_code 1
+					}
+				}
+			}
+		}
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSo far, so good...\n\n"
+}
+
+#
+# Clear the count the job's c-nodes which have been allocated to steps
+#
+for {set dim1 $job_start1} {$dim1 <= $job_fini1} {incr dim1} {
+	for {set dim2 $job_start2} {$dim2 <= $job_fini2} {incr dim2} {
+		for {set dim3 $job_start3} {$dim3 <= $job_fini3} {incr dim3} {
+			for {set dim4 $job_start4} {$dim4 <= $job_fini4} {incr dim4} {
+				for {set dim5 $job_start5} {$dim5 <= $job_fini5} {incr dim5} {
+					set use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) 0
+				}
+			}
+		}
+	}
+}
+
+#
+# This is a randomized variation on the above logic and includes a full
+# allocation job step. Some job steps may not start due to packing issues
+#
+set timeout 60
+send "./$file_prog $srun $squeue $job_id $actual_job_size 2\r"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set step_prefix $expect_out(1,string)
+		set step_start1 $expect_out(2,string)
+		set step_start2 $expect_out(3,string)
+		set step_start3 $expect_out(4,string)
+		set step_start4 $expect_out(5,string)
+		set step_start5 $expect_out(6,string)
+		set step_fini1 $expect_out(7,string)
+		set step_fini2 $expect_out(8,string)
+		set step_fini3 $expect_out(9,string)
+		set step_fini4 $expect_out(10,string)
+		set step_fini5 $expect_out(11,string)
+		for {set dim1 $step_start1} {$dim1 <= $step_fini1} {incr dim1} {
+			for {set dim2 $step_start2} {$dim2 <= $step_fini2} {incr dim2} {
+				for {set dim3 $step_start3} {$dim3 <= $step_fini3} {incr dim3} {
+					for {set dim4 $step_start4} {$dim4 <= $step_fini4} {incr dim4} {
+						for {set dim5 $step_start5} {$dim5 <= $step_fini5} {incr dim5} {
+							if [info exists use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)] {
+								incr use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)
+							} else {
+								send_user "\nFAILURE: invalid step c-node allocation at "
+								send_user "\[$dim1,$dim2,$dim3,$dim4,$dim5\]/"
+								set exit_code 1
+							}
+						}
+					}
+				}
+			}
+		}
+		exp_continue
+	}
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)" {
+		set step_prefix $expect_out(1,string)
+		set dim1 $expect_out(2,string)
+		set dim2 $expect_out(3,string)
+		set dim3 $expect_out(4,string)
+		set dim4 $expect_out(5,string)
+		set dim5 $expect_out(6,string)
+		if [info exists use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)] {
+			incr use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)
+		} else {
+			send_user "\nFAILURE: invalid step c-node allocation at "
+			send_user "\[$dim1,$dim2,$dim3,$dim4,$dim5\]\n"
+			set exit_code 1
+		}
+		exp_continue
+	}
+	-re $prompt {
+		send_user "\nNOTE: Step create errors due to busy nodes are expected\n"
+		send "exit\r"
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: job not responding\n"
+		set exit_code 1
+	}
+}
+
+#
+# Test that each of the job's c-nodes have been allocated no more than once to some step
+#
+for {set dim1 $job_start1} {$dim1 <= $job_fini1} {incr dim1} {
+	for {set dim2 $job_start2} {$dim2 <= $job_fini2} {incr dim2} {
+		for {set dim3 $job_start3} {$dim3 <= $job_fini3} {incr dim3} {
+			for {set dim4 $job_start4} {$dim4 <= $job_fini4} {incr dim4} {
+				for {set dim5 $job_start5} {$dim5 <= $job_fini5} {incr dim5} {
+					if {$use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) > 1} {
+						send_user "\nFAILURE: c-node at \[$dim1,$dim2,$dim3,$dim4,$dim5\] "
+						send_user "allocated $use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) times\n"
+						set exit_code 1
+					}
+				}
+			}
+		}
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+} else {
+	cancel_job $job_id
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test8.21.bash b/testsuite/expect/test8.21.bash
new file mode 100755
index 000000000..c6494bc5d
--- /dev/null
+++ b/testsuite/expect/test8.21.bash
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+if [ $# -ne 5 ]; then
+	echo "test8.21.bash <srun_path> <squeue_path> <job_id> <job_size> <mode:1|2?"
+	exit 1
+fi
+srun=$1
+squeue=$2
+job_id=$3
+job_size=$4
+test_mode=$5
+
+delay_time=1
+while [ $delay_time -le 60 ]
+do
+	$srun -N1  --test-only --immediate /bin/true
+	rc=$?
+	if [ $rc -eq 0 ]
+	then
+		break
+	fi
+	sleep $delay_time
+	delay_time=`expr $delay_time + 1`
+done
+
+if [ $test_mode -gt 1 ]
+then
+	job_size=`expr $job_size + $job_size`
+	sleep_time=0
+else
+	sleep_time=1
+fi
+
+while [ $job_size -ge 2 ]
+do
+	job_size=`expr $job_size / 2`
+	$srun -N$job_size --test-only sleep 50 &
+	sleep $sleep_time
+done
+
+$srun -N1  --test-only sleep 50 &
+sleep 5
+$squeue --jobs=$job_id --steps --noheader --format='Step_ID=%i BP_List=%N'
diff --git a/testsuite/expect/test8.22 b/testsuite/expect/test8.22
new file mode 100755
index 000000000..e6dd5b469
--- /dev/null
+++ b/testsuite/expect/test8.22
@@ -0,0 +1,163 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Bluegene/Q only: Stress test of running many job step allocations
+#          within the job's allocation
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2011 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "8.22"
+set exit_code        0
+set file_in          "test$test_id.in"
+set job_id           0
+set job_size         32
+
+
+print_header $test_id
+
+if {([test_bluegene] == 0) || [string compare [get_bluegene_type] "Q"]} {
+	send_user "\nWARNING: This test is only compatable with Bluegene/Q systems\n"
+	exit $exit_code
+}
+
+#
+# Spawn a job via salloc
+#
+set matches 0
+set timeout $max_job_delay
+set salloc_pid [spawn $salloc -N$job_size -t1 $bin_bash]
+expect {
+	-re "Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re $prompt {
+		#send_user "Job initiated\n"
+	}
+	timeout {
+		send_user "\nFAILURE: salloc not responding\n"
+		if {$job_id != 0} {
+			cancel_job $job_id
+		}
+		slow_kill [expr 0 - $salloc_pid]
+		exit 1
+	}
+}
+
+if {$job_id == 0} {
+	send_user "\nFAILURE: did not get job_id\n"
+	exit 1
+}
+
+#
+# Determine the job's allocation dimensions
+#
+set job_start1 -1
+send "$scontrol show job $job_id\r"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set job_prefix $expect_out(1,string)
+		set job_start1 $expect_out(2,string)
+		set job_start2 $expect_out(3,string)
+		set job_start3 $expect_out(4,string)
+		set job_start4 $expect_out(5,string)
+		set job_start5 $expect_out(6,string)
+		set job_fini1 $expect_out(7,string)
+		set job_fini2 $expect_out(8,string)
+		set job_fini3 $expect_out(9,string)
+		set job_fini4 $expect_out(10,string)
+		set job_fini5 $expect_out(11,string)
+		exp_continue
+	}
+	-re $prompt {
+		#break
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+}
+if {$job_start1 == -1} {
+	send_user "\nFAILURE: did not get job dimensions\n"
+	cancel_job $job_id
+	exit 1
+}
+send_user "\nJob allocation\n"
+send_user "prefix: $job_prefix\n"
+send_user "dim 1:  $job_start1 to $job_fini1  "
+send_user "dim 2:  $job_start2 to $job_fini2  "
+send_user "dim 3:  $job_start3 to $job_fini3  "
+send_user "dim 4:  $job_start4 to $job_fini4  "
+send_user "dim 5:  $job_start5 to $job_fini5\n"
+
+set job_dim1 [expr $job_fini1 - $job_start1 + 1]
+set job_dim2 [expr $job_fini2 - $job_start2 + 1]
+set job_dim3 [expr $job_fini3 - $job_start3 + 1]
+set job_dim4 [expr $job_fini4 - $job_start4 + 1]
+set job_dim5 [expr $job_fini5 - $job_start5 + 1]
+set actual_job_size [expr $job_dim1 * $job_dim2 * $job_dim3 * $job_dim4 * $job_dim5]
+send_user "size:   $actual_job_size c-nodes\n"
+if {$actual_job_size < $job_size} {
+	send_user "\nFAILURE: job allocation too small ($actual_job_size < $job_size)\n"
+	cancel_job $job_id
+	exit 1
+}
+if {$actual_job_size != $job_size} {
+#	This is a legitimate condition. A request for 5 c-nodes requires
+#	at least 6 c-nodes (3x2x1x1x1).
+	send_user "\nWARNING: job allocation too large ($actual_job_size != $job_size)\n"
+}
+
+make_bash_script $file_in "
+for ((inx=0; inx<$actual_job_size; inx++)) ; do
+	$srun -N4 sleep 1 &
+	$srun -N2 sleep 1 &
+	$srun -N1 sleep 1 &
+done
+wait"
+
+send "./$file_in\r"
+expect {
+	-re $prompt {
+		send "exit\r"
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: job not responding\n"
+		set exit_code 1
+	}
+}
+
+if {$exit_code == 0} {
+	exec rm -f $file_in
+	send_user "\nSUCCESS\n"
+} else {
+	cancel_job $job_id
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test8.23 b/testsuite/expect/test8.23
new file mode 100755
index 000000000..c361c0004
--- /dev/null
+++ b/testsuite/expect/test8.23
@@ -0,0 +1,283 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Bluegene/Q only: Test that multple jobs allocations are properly
+#          packed within a midplane
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2011 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "8.23"
+set exit_code        0
+set file_in          "test$test_id.in"
+set job_size         32
+
+
+print_header $test_id
+
+if {([test_bluegene] == 0) || [string compare [get_bluegene_type] "Q"]} {
+	send_user "\nWARNING: This test is only compatable with Bluegene/Q systems\n"
+	exit $exit_code
+}
+
+make_bash_script $file_in "sleep 300"
+
+#
+# Submit a sub-midplane job
+#
+set matches    0
+set timeout    $max_job_delay
+set job_id(1)  0
+set sbatch_pid [spawn $sbatch -N$job_size -t2 --output=/dev/null $file_in]
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id(1) $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		if {$job_id(1) != 0} {
+			cancel_job $job_id(1)
+		}
+		slow_kill [expr 0 - $sbatch_pid]
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id(1) == 0} {
+	send_user "\nFAILURE: did not get job_id\n"
+	exit 1
+}
+
+#
+# Determine the job's midplane name and allocation dimensions
+#
+if {[wait_for_job $job_id(1) RUNNING] != 0} {
+	send_user "\nFAILURE: error starting job $job_id(1)\n"
+	cancel_job $job_id
+	exit 1
+}
+set job_start(1,1) -1
+spawn $squeue --jobs=$job_id(1) --noheader -o "%i BP_List=%N"
+expect {
+	-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+		set job_prefix(1) $expect_out(1,string)
+		set job_start(1,1) $expect_out(2,string)
+		set job_start(1,2) $expect_out(3,string)
+		set job_start(1,3) $expect_out(4,string)
+		set job_start(1,4) $expect_out(5,string)
+		set job_start(1,5) $expect_out(6,string)
+		set job_fini(1,1) $expect_out(7,string)
+		set job_fini(1,2) $expect_out(8,string)
+		set job_fini(1,3) $expect_out(9,string)
+		set job_fini(1,4) $expect_out(10,string)
+		set job_fini(1,5) $expect_out(11,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: squeue not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_start(1,1) == -1} {
+	send_user "\nFAILURE: did not get job dimensions\n"
+	cancel_job $job_id(1)
+	exit 1
+}
+send_user "\nJob allocation\n"
+send_user "prefix: $job_prefix(1)\n"
+send_user "dim 1:  $job_start(1,1) to $job_fini(1,1)  "
+send_user "dim 2:  $job_start(1,2) to $job_fini(1,2)  "
+send_user "dim 3:  $job_start(1,3) to $job_fini(1,3)  "
+send_user "dim 4:  $job_start(1,4) to $job_fini(1,4)  "
+send_user "dim 5:  $job_start(1,5) to $job_fini(1,5)\n"
+
+set job_dim(1,1) [expr $job_fini(1,1) - $job_start(1,1) + 1]
+set job_dim(1,2) [expr $job_fini(1,2) - $job_start(1,2) + 1]
+set job_dim(1,3) [expr $job_fini(1,3) - $job_start(1,3) + 1]
+set job_dim(1,4) [expr $job_fini(1,4) - $job_start(1,4) + 1]
+set job_dim(1,5) [expr $job_fini(1,5) - $job_start(1,5) + 1]
+set actual_job_size [expr $job_dim(1,1) * $job_dim(1,2) * $job_dim(1,3) * $job_dim(1,4) * $job_dim(1,5)]
+send_user "size:   $actual_job_size c-nodes\n"
+if {$actual_job_size < $job_size} {
+	send_user "\nFAILURE: job allocation too small ($actual_job_size < $job_size)\n"
+	cancel_job $job_id
+	exit 1
+}
+if {$actual_job_size != $job_size} {
+#	This is a legitimate condition. A request for 5 c-nodes requires
+#	at least 6 c-nodes (3x2x1x1x1).
+	send_user "\nWARNING: job allocation too large ($actual_job_size != $job_size)\n"
+}
+
+#
+# Submit more jobs to fill that midplane
+#
+set job_count [expr 512 / $actual_job_size]
+for {set inx 2} {$inx <= $job_count} {incr inx} {
+	set sbatch_pid [spawn $sbatch -N$job_size -w $job_prefix(1) -t2 --output=/dev/null $file_in]
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id($inx) $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			if {$job_id($inx) != 0} {
+				cancel_job $job_id($inx)
+			}
+			slow_kill [expr 0 - $sbatch_pid]
+			exit 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$job_id($inx) == 0} {
+		send_user "\nFAILURE: did not get job_id\n"
+		exit 1
+	}
+}
+
+#
+# Give the new jobs a chance to start and see what resources they are allocated
+#
+sleep 15
+for {set inx 2} {$inx <= $job_count} {incr inx} {
+	set job_start($inx,1) -1
+	spawn $squeue --jobs=$job_id($inx) --noheader -o "%i BP_List=%N"
+	expect {
+		-re "BP_List=($alpha_numeric).($digit)($digit)($digit)($digit)($digit)x($digit)($digit)($digit)($digit)($digit)" {
+			set job_prefix($inx) $expect_out(1,string)
+			set job_start($inx,1) $expect_out(2,string)
+			set job_start($inx,2) $expect_out(3,string)
+			set job_start($inx,3) $expect_out(4,string)
+			set job_start($inx,4) $expect_out(5,string)
+			set job_start($inx,5) $expect_out(6,string)
+			set job_fini($inx,1) $expect_out(7,string)
+			set job_fini($inx,2) $expect_out(8,string)
+			set job_fini($inx,3) $expect_out(9,string)
+			set job_fini($inx,4) $expect_out(10,string)
+			set job_fini($inx,5) $expect_out(11,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: squeue not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+}
+
+#
+# Build an array to count the job's c-nodes which have been allocated to our jobs
+#
+for {set dim1 0} {$dim1 <= 3} {incr dim1} {
+	for {set dim2 0} {$dim2 <= 3} {incr dim2} {
+		for {set dim3 0} {$dim3 <= 3} {incr dim3} {
+			for {set dim4 0} {$dim4 <= 3} {incr dim4} {
+				for {set dim5 0} {$dim5 <= 1} {incr dim5} {
+					set use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) 0
+				}
+			}
+		}
+	}
+}
+
+#
+# Validate the allocated midplane name and count specific allocated c-nodes
+#
+for {set inx 1} {$inx <= $job_count} {incr inx} {
+	if {$job_start($inx,1) == -1} {
+		continue
+	}
+	if {[string compare $job_prefix(1) $job_prefix($inx)]} {
+		send_user "\nFAILURE: job $job_id($inx) is running on "
+		send_user "midplane job_prefix($inx) instead of $job_prefix(1)\n"
+		set exit_code 1
+		continue
+	}
+	for {set dim1 $job_start($inx,1)} {$dim1 <= $job_fini($inx,1)} {incr dim1} {
+		for {set dim2 $job_start($inx,2)} {$dim2 <= $job_fini($inx,2)} {incr dim2} {
+			for {set dim3 $job_start($inx,3)} {$dim3 <= $job_fini($inx,3)} {incr dim3} {
+				for {set dim4 $job_start($inx,4)} {$dim4 <= $job_fini($inx,4)} {incr dim4} {
+					for {set dim5 $job_start($inx,5)} {$dim5 <= $job_fini($inx,5)} {incr dim5} {
+						if [info exists use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)] {
+							incr use_cnt($dim1,$dim2,$dim3,$dim4,$dim5)
+						} else {
+							send_user "\nFAILURE: invalid c-node allocation for job "
+							send_user "$job_id($inx) at \[$dim1,$dim2,$dim3,$dim4,$dim5\]/"
+							set exit_code 1
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+#
+# Test that no c-nodes have been allocated more than once to our jobs
+# This is an independent loop so that we might check for unallocated c-nodes
+#
+set unused_count 0
+for {set dim1 0} {$dim1 <= 3} {incr dim1} {
+	for {set dim2 0} {$dim2 <= 3} {incr dim2} {
+		for {set dim3 0} {$dim3 <= 3} {incr dim3} {
+			for {set dim4 0} {$dim4 <= 3} {incr dim4} {
+				for {set dim5 0} {$dim5 <= 1} {incr dim5} {
+					if {$use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) > 1} {
+						send_user "\nFAILURE: c-node at \[$dim1,$dim2,$dim3,$dim4,$dim5\] "
+						send_user "allocated $use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) times\n"
+						set exit_code 1
+					} elseif {$use_cnt($dim1,$dim2,$dim3,$dim4,$dim5) == 0} {
+						incr unused_count
+					}
+				}
+			}
+		}
+	}
+}
+send_user "\nNOTE: $unused_count c-nodes on midplane $job_prefix(1) were unused by our jobs\n"
+
+for {set inx 1} {$inx <= $job_count} {incr inx} {
+	cancel_job $job_id($inx)
+}
+
+if {$exit_code == 0} {
+	exec rm -f $file_in
+	send_user "\nSUCCESS\n"
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test8.3 b/testsuite/expect/test8.3
index dea5f9159..71e8328e9 100755
--- a/testsuite/expect/test8.3
+++ b/testsuite/expect/test8.3
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.4 b/testsuite/expect/test8.4
index 6cc1bc048..689175f3b 100755
--- a/testsuite/expect/test8.4
+++ b/testsuite/expect/test8.4
@@ -17,7 +17,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.4.prog.c b/testsuite/expect/test8.4.prog.c
index e6633b46f..8342d7b05 100644
--- a/testsuite/expect/test8.4.prog.c
+++ b/testsuite/expect/test8.4.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.5 b/testsuite/expect/test8.5
index 7eca73ff3..76cee2b5a 100755
--- a/testsuite/expect/test8.5
+++ b/testsuite/expect/test8.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -215,7 +215,9 @@ exec $bin_chmod 700 $file_in
 #
 set timeout [expr $max_job_delay + $sleep_time]
 
-if {![string compare $type "P"]} {
+if {![string compare $type "Q"]} {
+	set exit_code [run_bgp_test]
+} elseif {![string compare $type "P"]} {
 	set exit_code [run_bgp_test]
 } elseif {![string compare $type "L"]} {
 	set exit_code [run_bgl_test]
diff --git a/testsuite/expect/test8.6 b/testsuite/expect/test8.6
index e476e9bfb..f5077f585 100755
--- a/testsuite/expect/test8.6
+++ b/testsuite/expect/test8.6
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -296,7 +296,7 @@ make_bash_script $file_in "$bin_sleep $sleep_time"
 set timeout [expr $max_job_delay + $sleep_time]
 
 
-if {![string compare $type "P"]} {
+if {![string compare $type "P"] || ![string compare $type "Q"]} {
 	set started [run_bgp_test]
 } elseif {![string compare $type "L"]} {
 	set started [run_bgl_test]
diff --git a/testsuite/expect/test8.7 b/testsuite/expect/test8.7
index 817d184d9..aeb51545f 100755
--- a/testsuite/expect/test8.7
+++ b/testsuite/expect/test8.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.7.prog.c b/testsuite/expect/test8.7.prog.c
index a710c99d8..225e8909d 100644
--- a/testsuite/expect/test8.7.prog.c
+++ b/testsuite/expect/test8.7.prog.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test8.8 b/testsuite/expect/test8.8
index 6115d24fe..963aad87e 100755
--- a/testsuite/expect/test8.8
+++ b/testsuite/expect/test8.8
@@ -14,7 +14,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -44,10 +44,10 @@ set sleep_time  5
 
 # allocate a set of nodes (node_cnt) and the quit right after
 proc allocate_and_quit { node_cnt node error_cnt check} {
-	global salloc number alpha_numeric_under max_job_delay prompt
+	global salloc number alpha_numeric_under bin_bash max_job_delay prompt
 	set job_id 0
 	set block ""
-	set line "-N$node_cnt-$node_cnt"
+	set line "-N$node_cnt-$node_cnt -t5 $bin_bash"
 	if {[string compare $node ""]} {
 		set line "$line -w $node"
 	}
@@ -333,7 +333,7 @@ proc check_node { node error_cnt alloc_cnt } {
 }
 
 # set a block in a error state
-proc change_block_state { block state} {
+proc change_block_state { block state } {
 	global scontrol
 
 	set exit_code 0
@@ -473,9 +473,17 @@ if {$type == 0} {
 	exit 1
 }
 
-
-
-if {![string compare $type "P"]} {
+if {![string compare $type "Q"]} {
+	if {$psets >= 32} {
+		set smallest 16
+	} elseif {$psets >= 16} {
+		set smallest 32
+	} elseif {$psets >= 8} {
+		set smallest 64
+	} else {
+		set smallest 128
+	}
+} elseif {![string compare $type "P"]} {
 	if {$psets >= 32} {
 		set smallest 16
 	} elseif {$psets >= 16} {
diff --git a/testsuite/expect/test9.1 b/testsuite/expect/test9.1
index a55ca422d..e3bf2689e 100755
--- a/testsuite/expect/test9.1
+++ b/testsuite/expect/test9.1
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,11 +40,16 @@ set exit_code    0
 set file_in      "test$test_id.input"
 set file_out     "test$test_id.output"
 set job_name     "test$test_id"
-set task_cnt     $max_stress_tasks
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -57,6 +62,11 @@ set other_opts   "-O"
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to cat input_file to output_file, wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
 proc run_cat_job { input_file output_file } {
diff --git a/testsuite/expect/test9.2 b/testsuite/expect/test9.2
index 52fd0a751..c7b9535d6 100755
--- a/testsuite/expect/test9.2
+++ b/testsuite/expect/test9.2
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -41,8 +41,14 @@ set file_out     "test$test_id.output"
 set job_name     "test$test_id"
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -51,10 +57,14 @@ if { [test_bluegene] } {
 	}
 }
 set other_opts   "-O"
-set task_cnt     $max_stress_tasks
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to print hostname to output_file with task_cnt tasks per node,
 # wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
diff --git a/testsuite/expect/test9.3 b/testsuite/expect/test9.3
index fc41afe26..7e41c4db7 100755
--- a/testsuite/expect/test9.3
+++ b/testsuite/expect/test9.3
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,8 +42,14 @@ set file_out     "test$test_id.output"
 set job_name     "test$test_id"
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -52,10 +58,14 @@ if { [test_bluegene] } {
 	}
 }
 set other_opts   "-O"
-set task_cnt      $max_stress_tasks
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to cat input_file to output_file with task_cnt tasks
 # per node, wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
diff --git a/testsuite/expect/test9.4 b/testsuite/expect/test9.4
index 684040b54..6f6f4635c 100755
--- a/testsuite/expect/test9.4
+++ b/testsuite/expect/test9.4
@@ -21,7 +21,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -48,8 +48,14 @@ set file_out_task "test$test_id.%t.output"
 set job_name      "test$test_id"
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -57,12 +63,15 @@ if { [test_bluegene] } {
 		set node_cnt 1-4
 	}
 }
-
 set other_opts    "-O"
-set task_cnt      $max_stress_tasks
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to print hostname to output_file with task_cnt tasks
 # per node, wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
@@ -143,6 +152,7 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} {
 				send_user "\nFAILURE:stdout is incomplete\n"
 			}
 			set exit_code 1
+			break
 		} else {
 			incr success_cnt
 		}
diff --git a/testsuite/expect/test9.5 b/testsuite/expect/test9.5
index df7a1ed99..b8f2936c2 100755
--- a/testsuite/expect/test9.5
+++ b/testsuite/expect/test9.5
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -40,8 +40,14 @@ set file_out      "test$test_id.output"
 set job_name      "test$test_id"
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -50,10 +56,14 @@ if { [test_bluegene] } {
 	}
 }
 set other_opts    "-O"
-set task_cnt      $max_stress_tasks
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to print hostname to output_file with task_cnt tasks
 # per node, wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
@@ -127,6 +137,7 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} {
 			send_user "\nFAILURE:stdout is incomplete\n"
 		}
 		set exit_code 1
+		break
 	} else {
 		incr success_cnt
 	}
diff --git a/testsuite/expect/test9.6 b/testsuite/expect/test9.6
index df0fe3ed7..9c50cbc9a 100755
--- a/testsuite/expect/test9.6
+++ b/testsuite/expect/test9.6
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -42,8 +42,14 @@ set file_out     "test$test_id.output"
 set job_name     "test$test_id"
 
 set cycle_count [get_cycle_count]
+set task_cnt    $max_stress_tasks
 if { [test_bluegene] } {
-	set node_cnt 1-2048
+	if { [test_emulated] } {
+		set node_cnt 1
+		set task_cnt 1
+	} else {
+		set node_cnt 1-2048
+	}
 } else {
 	if { [test_xcpu] } {
 		set node_cnt 1-1
@@ -51,12 +57,15 @@ if { [test_bluegene] } {
 		set node_cnt 1-4
 	}
 }
-
 set other_opts   "-O"
-set task_cnt      $max_stress_tasks
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 # Execute an srun job to cat input_file to output_file with task_cnt tasks
 # per node, wait for completion
 # Returns 0 on successful completion, returns 1 otherwise
diff --git a/testsuite/expect/test9.7 b/testsuite/expect/test9.7
index 190500291..64a0df0f7 100755
--- a/testsuite/expect/test9.7
+++ b/testsuite/expect/test9.7
@@ -13,7 +13,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test9.7.bash b/testsuite/expect/test9.7.bash
index bcf025b66..9a99808bc 100755
--- a/testsuite/expect/test9.7.bash
+++ b/testsuite/expect/test9.7.bash
@@ -10,7 +10,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/expect/test9.8 b/testsuite/expect/test9.8
index f2259e736..874ec9bfc 100755
--- a/testsuite/expect/test9.8
+++ b/testsuite/expect/test9.8
@@ -16,7 +16,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
 # Please also read the included file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
@@ -46,6 +46,11 @@ set task_cnt     60
 
 print_header $test_id
 
+if {[test_cray]} {
+	send_user "\nWARNING: This test is incompatible with Cray systems\n"
+	exit $exit_code
+}
+
 #
 # A single slurmd can't handle a large task count without
 # running out of memory and pthreads
@@ -59,14 +64,17 @@ if {[test_front_end] != 0 || $enable_memory_leak_debug != 0} {
 # NOTE: The initial sleep is so that all of the submissions have time
 #   to occur before contending with a multitude of job step creations.
 #   This is especially important on very slow systems (e.g. AIX).
+# NOTE: Explicity set a small memory limit. Without explicitly setting the step
+#   memory limit, it will use the system default (same as the job) which may
+#   prevent the level of parallelism desired.
 #
 make_bash_script $file_in "
 $bin_sleep 5
 ulimit -u `ulimit -u -H`
 for ((inx=0; inx < $task_cnt; inx++)) ; do
-        $srun -N1 -n1 $bin_sleep $sleep_time &
+        $srun -N1 -n1 --mem-per-cpu=1 $bin_sleep $sleep_time &
 done
-$srun -N1 -n1 $bin_sleep $sleep_time
+$srun -N1 -n1 --mem-per-cpu=1 $bin_sleep $sleep_time
 "
 
 #
diff --git a/testsuite/expect/usleep b/testsuite/expect/usleep
index 54b448ccd..1cce57330 100755
--- a/testsuite/expect/usleep
+++ b/testsuite/expect/usleep
@@ -10,7 +10,7 @@
 # CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# For details, see <http://www.schedmd.com/slurmdocs/>.
  # Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/Makefile.in b/testsuite/slurm_unit/Makefile.in
index db6bbfdaa..12619012e 100644
--- a/testsuite/slurm_unit/Makefile.in
+++ b/testsuite/slurm_unit/Makefile.in
@@ -60,6 +60,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -70,6 +71,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -131,7 +133,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -168,6 +173,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -225,6 +231,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -260,6 +267,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/testsuite/slurm_unit/api/Makefile.in b/testsuite/slurm_unit/api/Makefile.in
index dce5355ef..beb935513 100644
--- a/testsuite/slurm_unit/api/Makefile.in
+++ b/testsuite/slurm_unit/api/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -154,7 +156,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -191,6 +196,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -248,6 +254,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -283,6 +290,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/testsuite/slurm_unit/api/manual/Makefile.in b/testsuite/slurm_unit/api/manual/Makefile.in
index ca430f72a..4dd26f13f 100644
--- a/testsuite/slurm_unit/api/manual/Makefile.in
+++ b/testsuite/slurm_unit/api/manual/Makefile.in
@@ -64,6 +64,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,6 +75,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -146,7 +148,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -183,6 +188,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -240,6 +246,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -275,6 +282,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
diff --git a/testsuite/slurm_unit/api/manual/cancel-tst.c b/testsuite/slurm_unit/api/manual/cancel-tst.c
index 82b0da815..9dc4b9ecf 100644
--- a/testsuite/slurm_unit/api/manual/cancel-tst.c
+++ b/testsuite/slurm_unit/api/manual/cancel-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/complete-tst.c b/testsuite/slurm_unit/api/manual/complete-tst.c
index 8503202e8..a9e359ad0 100644
--- a/testsuite/slurm_unit/api/manual/complete-tst.c
+++ b/testsuite/slurm_unit/api/manual/complete-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/job_info-tst.c b/testsuite/slurm_unit/api/manual/job_info-tst.c
index e799cd4a5..a5cebc8c1 100644
--- a/testsuite/slurm_unit/api/manual/job_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/job_info-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/node_info-tst.c b/testsuite/slurm_unit/api/manual/node_info-tst.c
index 788f8f7d5..386b14c16 100644
--- a/testsuite/slurm_unit/api/manual/node_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/node_info-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/partition_info-tst.c b/testsuite/slurm_unit/api/manual/partition_info-tst.c
index a83a3bbf4..6a29748fc 100644
--- a/testsuite/slurm_unit/api/manual/partition_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/partition_info-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/reconfigure-tst.c b/testsuite/slurm_unit/api/manual/reconfigure-tst.c
index d3a39d7b3..24caab949 100644
--- a/testsuite/slurm_unit/api/manual/reconfigure-tst.c
+++ b/testsuite/slurm_unit/api/manual/reconfigure-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/submit-tst.c b/testsuite/slurm_unit/api/manual/submit-tst.c
index ac3410880..512ba1fac 100644
--- a/testsuite/slurm_unit/api/manual/submit-tst.c
+++ b/testsuite/slurm_unit/api/manual/submit-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/api/manual/update_config-tst.c b/testsuite/slurm_unit/api/manual/update_config-tst.c
index dbb09de28..65555746d 100644
--- a/testsuite/slurm_unit/api/manual/update_config-tst.c
+++ b/testsuite/slurm_unit/api/manual/update_config-tst.c
@@ -7,7 +7,7 @@
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
diff --git a/testsuite/slurm_unit/common/Makefile.in b/testsuite/slurm_unit/common/Makefile.in
index 48d79a84c..fa9b50af1 100644
--- a/testsuite/slurm_unit/common/Makefile.in
+++ b/testsuite/slurm_unit/common/Makefile.in
@@ -62,6 +62,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,6 +73,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_srun.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
 	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
@@ -134,7 +136,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
 BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
 BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
 BLCR_HOME = @BLCR_HOME@
 BLCR_LDFLAGS = @BLCR_LDFLAGS@
@@ -171,6 +176,7 @@ HAVEPGCONFIG = @HAVEPGCONFIG@
 HAVE_AIX = @HAVE_AIX@
 HAVE_ELAN = @HAVE_ELAN@
 HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
 HAVE_OPENSSL = @HAVE_OPENSSL@
 HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
 HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
@@ -228,6 +234,7 @@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
 PTHREAD_LIBS = @PTHREAD_LIBS@
 RANLIB = @RANLIB@
 READLINE_LIBS = @READLINE_LIBS@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
 RELEASE = @RELEASE@
 SED = @SED@
 SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
@@ -263,6 +270,7 @@ abs_top_srcdir = @abs_top_srcdir@
 ac_ct_CC = @ac_ct_CC@
 ac_ct_CXX = @ac_ct_CXX@
 ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
 am__include = @am__include@
 am__leading_dot = @am__leading_dot@
 am__quote = @am__quote@
-- 
GitLab